1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * University Copyright- Copyright (c) 1982, 1986, 1988 31 * The Regents of the University of California 32 * All Rights Reserved 33 * 34 * University Acknowledgment- Portions of this document are derived from 35 * software developed by the University of California, Berkeley, and its 36 * contributors. 37 */ 38 39 #pragma ident "%Z%%M% %I% %E% SMI" 40 41 /* 42 * VM - shared or copy-on-write from a vnode/anonymous memory. 43 */ 44 45 #include <sys/types.h> 46 #include <sys/param.h> 47 #include <sys/t_lock.h> 48 #include <sys/errno.h> 49 #include <sys/systm.h> 50 #include <sys/mman.h> 51 #include <sys/debug.h> 52 #include <sys/cred.h> 53 #include <sys/vmsystm.h> 54 #include <sys/tuneable.h> 55 #include <sys/bitmap.h> 56 #include <sys/swap.h> 57 #include <sys/kmem.h> 58 #include <sys/sysmacros.h> 59 #include <sys/vtrace.h> 60 #include <sys/cmn_err.h> 61 #include <sys/callb.h> 62 #include <sys/vm.h> 63 #include <sys/dumphdr.h> 64 #include <sys/lgrp.h> 65 66 #include <vm/hat.h> 67 #include <vm/as.h> 68 #include <vm/seg.h> 69 #include <vm/seg_vn.h> 70 #include <vm/pvn.h> 71 #include <vm/anon.h> 72 #include <vm/page.h> 73 #include <vm/vpage.h> 74 #include <sys/proc.h> 75 #include <sys/task.h> 76 #include <sys/project.h> 77 #include <sys/zone.h> 78 #include <sys/shm_impl.h> 79 /* 80 * Private seg op routines. 81 */ 82 static int segvn_dup(struct seg *seg, struct seg *newseg); 83 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len); 84 static void segvn_free(struct seg *seg); 85 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg, 86 caddr_t addr, size_t len, enum fault_type type, 87 enum seg_rw rw); 88 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr); 89 static int segvn_setprot(struct seg *seg, caddr_t addr, 90 size_t len, uint_t prot); 91 static int segvn_checkprot(struct seg *seg, caddr_t addr, 92 size_t len, uint_t prot); 93 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 94 static size_t segvn_swapout(struct seg *seg); 95 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len, 96 int attr, uint_t flags); 97 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len, 98 char *vec); 99 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 100 int attr, int op, ulong_t *lockmap, size_t pos); 101 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len, 102 uint_t *protv); 103 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr); 104 static int segvn_gettype(struct seg *seg, caddr_t addr); 105 static int segvn_getvp(struct seg *seg, caddr_t addr, 106 struct vnode **vpp); 107 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len, 108 uint_t behav); 109 static void segvn_dump(struct seg *seg); 110 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, 111 struct page ***ppp, enum lock_type type, enum seg_rw rw); 112 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, 113 uint_t szc); 114 static int segvn_getmemid(struct seg *seg, caddr_t addr, 115 memid_t *memidp); 116 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t); 117 static int segvn_capable(struct seg *seg, segcapability_t capable); 118 119 struct seg_ops segvn_ops = { 120 segvn_dup, 121 segvn_unmap, 122 segvn_free, 123 segvn_fault, 124 segvn_faulta, 125 segvn_setprot, 126 segvn_checkprot, 127 segvn_kluster, 128 segvn_swapout, 129 segvn_sync, 130 segvn_incore, 131 segvn_lockop, 132 segvn_getprot, 133 segvn_getoffset, 134 segvn_gettype, 135 segvn_getvp, 136 segvn_advise, 137 segvn_dump, 138 segvn_pagelock, 139 segvn_setpagesize, 140 segvn_getmemid, 141 segvn_getpolicy, 142 segvn_capable, 143 }; 144 145 /* 146 * Common zfod structures, provided as a shorthand for others to use. 147 */ 148 static segvn_crargs_t zfod_segvn_crargs = 149 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL); 150 static segvn_crargs_t kzfod_segvn_crargs = 151 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER, 152 PROT_ALL & ~PROT_USER); 153 static segvn_crargs_t stack_noexec_crargs = 154 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL); 155 156 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */ 157 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */ 158 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */ 159 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */ 160 161 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */ 162 163 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */ 164 165 size_t segvn_pglock_comb_thrshld = (1UL << 16); /* 64K */ 166 size_t segvn_pglock_comb_balign = (1UL << 16); /* 64K */ 167 uint_t segvn_pglock_comb_bshift; 168 size_t segvn_pglock_comb_palign; 169 170 static int segvn_concat(struct seg *, struct seg *, int); 171 static int segvn_extend_prev(struct seg *, struct seg *, 172 struct segvn_crargs *, size_t); 173 static int segvn_extend_next(struct seg *, struct seg *, 174 struct segvn_crargs *, size_t); 175 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw); 176 static void segvn_pagelist_rele(page_t **); 177 static void segvn_setvnode_mpss(vnode_t *); 178 static void segvn_relocate_pages(page_t **, page_t *); 179 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *); 180 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t, 181 uint_t, page_t **, page_t **, uint_t *, int *); 182 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t, 183 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 184 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t, 185 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 186 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t, 187 u_offset_t, struct vpage *, page_t **, uint_t, 188 enum fault_type, enum seg_rw, int); 189 static void segvn_vpage(struct seg *); 190 static size_t segvn_count_swap_by_vpages(struct seg *); 191 192 static void segvn_purge(struct seg *seg); 193 static int segvn_reclaim(void *, caddr_t, size_t, struct page **, 194 enum seg_rw, int); 195 static int shamp_reclaim(void *, caddr_t, size_t, struct page **, 196 enum seg_rw, int); 197 198 static int sameprot(struct seg *, caddr_t, size_t); 199 200 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t); 201 static int segvn_clrszc(struct seg *); 202 static struct seg *segvn_split_seg(struct seg *, caddr_t); 203 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t, 204 ulong_t, uint_t); 205 206 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t, 207 size_t, void *, u_offset_t); 208 209 static struct kmem_cache *segvn_cache; 210 static struct kmem_cache **segvn_szc_cache; 211 212 #ifdef VM_STATS 213 static struct segvnvmstats_str { 214 ulong_t fill_vp_pages[31]; 215 ulong_t fltvnpages[49]; 216 ulong_t fullszcpages[10]; 217 ulong_t relocatepages[3]; 218 ulong_t fltanpages[17]; 219 ulong_t pagelock[2]; 220 ulong_t demoterange[3]; 221 } segvnvmstats; 222 #endif /* VM_STATS */ 223 224 #define SDR_RANGE 1 /* demote entire range */ 225 #define SDR_END 2 /* demote non aligned ends only */ 226 227 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \ 228 if ((len) != 0) { \ 229 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \ 230 ASSERT(lpgaddr >= (seg)->s_base); \ 231 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \ 232 (len)), pgsz); \ 233 ASSERT(lpgeaddr > lpgaddr); \ 234 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \ 235 } else { \ 236 lpgeaddr = lpgaddr = (addr); \ 237 } \ 238 } 239 240 /*ARGSUSED*/ 241 static int 242 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags) 243 { 244 struct segvn_data *svd = buf; 245 246 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL); 247 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL); 248 svd->svn_trnext = svd->svn_trprev = NULL; 249 return (0); 250 } 251 252 /*ARGSUSED1*/ 253 static void 254 segvn_cache_destructor(void *buf, void *cdrarg) 255 { 256 struct segvn_data *svd = buf; 257 258 rw_destroy(&svd->lock); 259 mutex_destroy(&svd->segfree_syncmtx); 260 } 261 262 /*ARGSUSED*/ 263 static int 264 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags) 265 { 266 bzero(buf, sizeof (svntr_t)); 267 return (0); 268 } 269 270 /* 271 * Patching this variable to non-zero allows the system to run with 272 * stacks marked as "not executable". It's a bit of a kludge, but is 273 * provided as a tweakable for platforms that export those ABIs 274 * (e.g. sparc V8) that have executable stacks enabled by default. 275 * There are also some restrictions for platforms that don't actually 276 * implement 'noexec' protections. 277 * 278 * Once enabled, the system is (therefore) unable to provide a fully 279 * ABI-compliant execution environment, though practically speaking, 280 * most everything works. The exceptions are generally some interpreters 281 * and debuggers that create executable code on the stack and jump 282 * into it (without explicitly mprotecting the address range to include 283 * PROT_EXEC). 284 * 285 * One important class of applications that are disabled are those 286 * that have been transformed into malicious agents using one of the 287 * numerous "buffer overflow" attacks. See 4007890. 288 */ 289 int noexec_user_stack = 0; 290 int noexec_user_stack_log = 1; 291 292 int segvn_lpg_disable = 0; 293 uint_t segvn_maxpgszc = 0; 294 295 ulong_t segvn_vmpss_clrszc_cnt; 296 ulong_t segvn_vmpss_clrszc_err; 297 ulong_t segvn_fltvnpages_clrszc_cnt; 298 ulong_t segvn_fltvnpages_clrszc_err; 299 ulong_t segvn_setpgsz_align_err; 300 ulong_t segvn_setpgsz_anon_align_err; 301 ulong_t segvn_setpgsz_getattr_err; 302 ulong_t segvn_setpgsz_eof_err; 303 ulong_t segvn_faultvnmpss_align_err1; 304 ulong_t segvn_faultvnmpss_align_err2; 305 ulong_t segvn_faultvnmpss_align_err3; 306 ulong_t segvn_faultvnmpss_align_err4; 307 ulong_t segvn_faultvnmpss_align_err5; 308 ulong_t segvn_vmpss_pageio_deadlk_err; 309 310 int segvn_use_regions = 1; 311 312 /* 313 * Segvn supports text replication optimization for NUMA platforms. Text 314 * replica's are represented by anon maps (amp). There's one amp per text file 315 * region per lgroup. A process chooses the amp for each of its text mappings 316 * based on the lgroup assignment of its main thread (t_tid = 1). All 317 * processes that want a replica on a particular lgroup for the same text file 318 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table 319 * with vp,off,size,szc used as a key. Text replication segments are read only 320 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by 321 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode 322 * pages. Replication amp is assigned to a segment when it gets its first 323 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread 324 * rechecks periodically if the process still maps an amp local to the main 325 * thread. If not async thread forces process to remap to an amp in the new 326 * home lgroup of the main thread. Current text replication implementation 327 * only provides the benefit to workloads that do most of their work in the 328 * main thread of a process or all the threads of a process run in the same 329 * lgroup. To extend text replication benefit to different types of 330 * multithreaded workloads further work would be needed in the hat layer to 331 * allow the same virtual address in the same hat to simultaneously map 332 * different physical addresses (i.e. page table replication would be needed 333 * for x86). 334 * 335 * amp pages are used instead of vnode pages as long as segment has a very 336 * simple life cycle. It's created via segvn_create(), handles S_EXEC 337 * (S_READ) pagefaults and is fully unmapped. If anything more complicated 338 * happens such as protection is changed, real COW fault happens, pagesize is 339 * changed, MC_LOCK is requested or segment is partially unmapped we turn off 340 * text replication by converting the segment back to vnode only segment 341 * (unmap segment's address range and set svd->amp to NULL). 342 * 343 * The original file can be changed after amp is inserted into 344 * svntr_hashtab. Processes that are launched after the file is already 345 * changed can't use the replica's created prior to the file change. To 346 * implement this functionality hash entries are timestamped. Replica's can 347 * only be used if current file modification time is the same as the timestamp 348 * saved when hash entry was created. However just timestamps alone are not 349 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We 350 * deal with file changes via MAP_SHARED mappings differently. When writable 351 * MAP_SHARED mappings are created to vnodes marked as executable we mark all 352 * existing replica's for this vnode as not usable for future text 353 * mappings. And we don't create new replica's for files that currently have 354 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is 355 * true). 356 */ 357 358 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20) 359 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR; 360 361 static ulong_t svntr_hashtab_sz = 512; 362 static svntr_bucket_t *svntr_hashtab = NULL; 363 static struct kmem_cache *svntr_cache; 364 static svntr_stats_t *segvn_textrepl_stats; 365 static ksema_t segvn_trasync_sem; 366 367 int segvn_disable_textrepl = 1; 368 size_t textrepl_size_thresh = (size_t)-1; 369 size_t segvn_textrepl_bytes = 0; 370 size_t segvn_textrepl_max_bytes = 0; 371 clock_t segvn_update_textrepl_interval = 0; 372 int segvn_update_tr_time = 10; 373 int segvn_disable_textrepl_update = 0; 374 375 static void segvn_textrepl(struct seg *); 376 static void segvn_textunrepl(struct seg *, int); 377 static void segvn_inval_trcache(vnode_t *); 378 static void segvn_trasync_thread(void); 379 static void segvn_trupdate_wakeup(void *); 380 static void segvn_trupdate(void); 381 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *, 382 ulong_t); 383 384 /* 385 * Initialize segvn data structures 386 */ 387 void 388 segvn_init(void) 389 { 390 uint_t maxszc; 391 uint_t szc; 392 size_t pgsz; 393 394 segvn_cache = kmem_cache_create("segvn_cache", 395 sizeof (struct segvn_data), 0, 396 segvn_cache_constructor, segvn_cache_destructor, NULL, 397 NULL, NULL, 0); 398 399 if (segvn_lpg_disable == 0) { 400 szc = maxszc = page_num_pagesizes() - 1; 401 if (szc == 0) { 402 segvn_lpg_disable = 1; 403 } 404 if (page_get_pagesize(0) != PAGESIZE) { 405 panic("segvn_init: bad szc 0"); 406 /*NOTREACHED*/ 407 } 408 while (szc != 0) { 409 pgsz = page_get_pagesize(szc); 410 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) { 411 panic("segvn_init: bad szc %d", szc); 412 /*NOTREACHED*/ 413 } 414 szc--; 415 } 416 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc) 417 segvn_maxpgszc = maxszc; 418 } 419 420 if (segvn_maxpgszc) { 421 segvn_szc_cache = (struct kmem_cache **)kmem_alloc( 422 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *), 423 KM_SLEEP); 424 } 425 426 for (szc = 1; szc <= segvn_maxpgszc; szc++) { 427 char str[32]; 428 429 (void) sprintf(str, "segvn_szc_cache%d", szc); 430 segvn_szc_cache[szc] = kmem_cache_create(str, 431 page_get_pagecnt(szc) * sizeof (page_t *), 0, 432 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG); 433 } 434 435 436 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL)) 437 segvn_use_regions = 0; 438 439 /* 440 * For now shared regions and text replication segvn support 441 * are mutually exclusive. This is acceptable because 442 * currently significant benefit from text replication was 443 * only observed on AMD64 NUMA platforms (due to relatively 444 * small L2$ size) and currently we don't support shared 445 * regions on x86. 446 */ 447 if (segvn_use_regions && !segvn_disable_textrepl) { 448 segvn_disable_textrepl = 1; 449 } 450 451 #if defined(_LP64) 452 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 && 453 !segvn_disable_textrepl) { 454 ulong_t i; 455 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t); 456 457 svntr_cache = kmem_cache_create("svntr_cache", 458 sizeof (svntr_t), 0, svntr_cache_constructor, NULL, 459 NULL, NULL, NULL, 0); 460 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP); 461 for (i = 0; i < svntr_hashtab_sz; i++) { 462 mutex_init(&svntr_hashtab[i].tr_lock, NULL, 463 MUTEX_DEFAULT, NULL); 464 } 465 segvn_textrepl_max_bytes = ptob(physmem) / 466 segvn_textrepl_max_bytes_factor; 467 segvn_textrepl_stats = kmem_zalloc(NCPU * 468 sizeof (svntr_stats_t), KM_SLEEP); 469 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL); 470 (void) thread_create(NULL, 0, segvn_trasync_thread, 471 NULL, 0, &p0, TS_RUN, minclsyspri); 472 } 473 #endif 474 475 if (!ISP2(segvn_pglock_comb_balign) || 476 segvn_pglock_comb_balign < PAGESIZE) { 477 segvn_pglock_comb_balign = 1UL << 16; /* 64K */ 478 } 479 segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1; 480 segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign); 481 } 482 483 #define SEGVN_PAGEIO ((void *)0x1) 484 #define SEGVN_NOPAGEIO ((void *)0x2) 485 486 static void 487 segvn_setvnode_mpss(vnode_t *vp) 488 { 489 int err; 490 491 ASSERT(vp->v_mpssdata == NULL || 492 vp->v_mpssdata == SEGVN_PAGEIO || 493 vp->v_mpssdata == SEGVN_NOPAGEIO); 494 495 if (vp->v_mpssdata == NULL) { 496 if (vn_vmpss_usepageio(vp)) { 497 err = VOP_PAGEIO(vp, (page_t *)NULL, 498 (u_offset_t)0, 0, 0, CRED(), NULL); 499 } else { 500 err = ENOSYS; 501 } 502 /* 503 * set v_mpssdata just once per vnode life 504 * so that it never changes. 505 */ 506 mutex_enter(&vp->v_lock); 507 if (vp->v_mpssdata == NULL) { 508 if (err == EINVAL) { 509 vp->v_mpssdata = SEGVN_PAGEIO; 510 } else { 511 vp->v_mpssdata = SEGVN_NOPAGEIO; 512 } 513 } 514 mutex_exit(&vp->v_lock); 515 } 516 } 517 518 int 519 segvn_create(struct seg *seg, void *argsp) 520 { 521 struct segvn_crargs *a = (struct segvn_crargs *)argsp; 522 struct segvn_data *svd; 523 size_t swresv = 0; 524 struct cred *cred; 525 struct anon_map *amp; 526 int error = 0; 527 size_t pgsz; 528 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT; 529 int use_rgn = 0; 530 int trok = 0; 531 532 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 533 534 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) { 535 panic("segvn_create type"); 536 /*NOTREACHED*/ 537 } 538 539 /* 540 * Check arguments. If a shared anon structure is given then 541 * it is illegal to also specify a vp. 542 */ 543 if (a->amp != NULL && a->vp != NULL) { 544 panic("segvn_create anon_map"); 545 /*NOTREACHED*/ 546 } 547 548 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) && 549 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) && 550 segvn_use_regions) { 551 use_rgn = 1; 552 } 553 554 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */ 555 if (a->type == MAP_SHARED) 556 a->flags &= ~MAP_NORESERVE; 557 558 if (a->szc != 0) { 559 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) || 560 (a->amp != NULL && a->type == MAP_PRIVATE) || 561 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) { 562 a->szc = 0; 563 } else { 564 if (a->szc > segvn_maxpgszc) 565 a->szc = segvn_maxpgszc; 566 pgsz = page_get_pagesize(a->szc); 567 if (!IS_P2ALIGNED(seg->s_base, pgsz) || 568 !IS_P2ALIGNED(seg->s_size, pgsz)) { 569 a->szc = 0; 570 } else if (a->vp != NULL) { 571 extern struct vnode kvp; 572 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) { 573 /* 574 * paranoid check. 575 * hat_page_demote() is not supported 576 * on swapfs pages. 577 */ 578 a->szc = 0; 579 } else if (map_addr_vacalign_check(seg->s_base, 580 a->offset & PAGEMASK)) { 581 a->szc = 0; 582 } 583 } else if (a->amp != NULL) { 584 pgcnt_t anum = btopr(a->offset); 585 pgcnt_t pgcnt = page_get_pagecnt(a->szc); 586 if (!IS_P2ALIGNED(anum, pgcnt)) { 587 a->szc = 0; 588 } 589 } 590 } 591 } 592 593 /* 594 * If segment may need private pages, reserve them now. 595 */ 596 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) || 597 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) { 598 if (anon_resv(seg->s_size) == 0) 599 return (EAGAIN); 600 swresv = seg->s_size; 601 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 602 seg, swresv, 1); 603 } 604 605 /* 606 * Reserve any mapping structures that may be required. 607 * 608 * Don't do it for segments that may use regions. It's currently a 609 * noop in the hat implementations anyway. 610 */ 611 if (!use_rgn) { 612 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP); 613 } 614 615 if (a->cred) { 616 cred = a->cred; 617 crhold(cred); 618 } else { 619 crhold(cred = CRED()); 620 } 621 622 /* Inform the vnode of the new mapping */ 623 if (a->vp != NULL) { 624 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK, 625 seg->s_as, seg->s_base, seg->s_size, a->prot, 626 a->maxprot, a->type, cred, NULL); 627 if (error) { 628 if (swresv != 0) { 629 anon_unresv(swresv); 630 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 631 "anon proc:%p %lu %u", seg, swresv, 0); 632 } 633 crfree(cred); 634 if (!use_rgn) { 635 hat_unload(seg->s_as->a_hat, seg->s_base, 636 seg->s_size, HAT_UNLOAD_UNMAP); 637 } 638 return (error); 639 } 640 /* 641 * svntr_hashtab will be NULL if we support shared regions. 642 */ 643 trok = ((a->flags & MAP_TEXT) && 644 (seg->s_size > textrepl_size_thresh || 645 (a->flags & _MAP_TEXTREPL)) && 646 lgrp_optimizations() && svntr_hashtab != NULL && 647 a->type == MAP_PRIVATE && swresv == 0 && 648 !(a->flags & MAP_NORESERVE) && 649 seg->s_as != &kas && a->vp->v_type == VREG); 650 651 ASSERT(!trok || !use_rgn); 652 } 653 654 /* 655 * If more than one segment in the address space, and they're adjacent 656 * virtually, try to concatenate them. Don't concatenate if an 657 * explicit anon_map structure was supplied (e.g., SystemV shared 658 * memory) or if we'll use text replication for this segment. 659 */ 660 if (a->amp == NULL && !use_rgn && !trok) { 661 struct seg *pseg, *nseg; 662 struct segvn_data *psvd, *nsvd; 663 lgrp_mem_policy_t ppolicy, npolicy; 664 uint_t lgrp_mem_policy_flags = 0; 665 extern lgrp_mem_policy_t lgrp_mem_default_policy; 666 667 /* 668 * Memory policy flags (lgrp_mem_policy_flags) is valid when 669 * extending stack/heap segments. 670 */ 671 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) && 672 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) { 673 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags; 674 } else { 675 /* 676 * Get policy when not extending it from another segment 677 */ 678 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type); 679 } 680 681 /* 682 * First, try to concatenate the previous and new segments 683 */ 684 pseg = AS_SEGPREV(seg->s_as, seg); 685 if (pseg != NULL && 686 pseg->s_base + pseg->s_size == seg->s_base && 687 pseg->s_ops == &segvn_ops) { 688 /* 689 * Get memory allocation policy from previous segment. 690 * When extension is specified (e.g. for heap) apply 691 * this policy to the new segment regardless of the 692 * outcome of segment concatenation. Extension occurs 693 * for non-default policy otherwise default policy is 694 * used and is based on extended segment size. 695 */ 696 psvd = (struct segvn_data *)pseg->s_data; 697 ppolicy = psvd->policy_info.mem_policy; 698 if (lgrp_mem_policy_flags == 699 LGRP_MP_FLAG_EXTEND_UP) { 700 if (ppolicy != lgrp_mem_default_policy) { 701 mpolicy = ppolicy; 702 } else { 703 mpolicy = lgrp_mem_policy_default( 704 pseg->s_size + seg->s_size, 705 a->type); 706 } 707 } 708 709 if (mpolicy == ppolicy && 710 (pseg->s_size + seg->s_size <= 711 segvn_comb_thrshld || psvd->amp == NULL) && 712 segvn_extend_prev(pseg, seg, a, swresv) == 0) { 713 /* 714 * success! now try to concatenate 715 * with following seg 716 */ 717 crfree(cred); 718 nseg = AS_SEGNEXT(pseg->s_as, pseg); 719 if (nseg != NULL && 720 nseg != pseg && 721 nseg->s_ops == &segvn_ops && 722 pseg->s_base + pseg->s_size == 723 nseg->s_base) 724 (void) segvn_concat(pseg, nseg, 0); 725 ASSERT(pseg->s_szc == 0 || 726 (a->szc == pseg->s_szc && 727 IS_P2ALIGNED(pseg->s_base, pgsz) && 728 IS_P2ALIGNED(pseg->s_size, pgsz))); 729 return (0); 730 } 731 } 732 733 /* 734 * Failed, so try to concatenate with following seg 735 */ 736 nseg = AS_SEGNEXT(seg->s_as, seg); 737 if (nseg != NULL && 738 seg->s_base + seg->s_size == nseg->s_base && 739 nseg->s_ops == &segvn_ops) { 740 /* 741 * Get memory allocation policy from next segment. 742 * When extension is specified (e.g. for stack) apply 743 * this policy to the new segment regardless of the 744 * outcome of segment concatenation. Extension occurs 745 * for non-default policy otherwise default policy is 746 * used and is based on extended segment size. 747 */ 748 nsvd = (struct segvn_data *)nseg->s_data; 749 npolicy = nsvd->policy_info.mem_policy; 750 if (lgrp_mem_policy_flags == 751 LGRP_MP_FLAG_EXTEND_DOWN) { 752 if (npolicy != lgrp_mem_default_policy) { 753 mpolicy = npolicy; 754 } else { 755 mpolicy = lgrp_mem_policy_default( 756 nseg->s_size + seg->s_size, 757 a->type); 758 } 759 } 760 761 if (mpolicy == npolicy && 762 segvn_extend_next(seg, nseg, a, swresv) == 0) { 763 crfree(cred); 764 ASSERT(nseg->s_szc == 0 || 765 (a->szc == nseg->s_szc && 766 IS_P2ALIGNED(nseg->s_base, pgsz) && 767 IS_P2ALIGNED(nseg->s_size, pgsz))); 768 return (0); 769 } 770 } 771 } 772 773 if (a->vp != NULL) { 774 VN_HOLD(a->vp); 775 if (a->type == MAP_SHARED) 776 lgrp_shm_policy_init(NULL, a->vp); 777 } 778 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 779 780 seg->s_ops = &segvn_ops; 781 seg->s_data = (void *)svd; 782 seg->s_szc = a->szc; 783 784 svd->seg = seg; 785 svd->vp = a->vp; 786 /* 787 * Anonymous mappings have no backing file so the offset is meaningless. 788 */ 789 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0; 790 svd->prot = a->prot; 791 svd->maxprot = a->maxprot; 792 svd->pageprot = 0; 793 svd->type = a->type; 794 svd->vpage = NULL; 795 svd->cred = cred; 796 svd->advice = MADV_NORMAL; 797 svd->pageadvice = 0; 798 svd->flags = (ushort_t)a->flags; 799 svd->softlockcnt = 0; 800 svd->softlockcnt_sbase = 0; 801 svd->softlockcnt_send = 0; 802 svd->rcookie = HAT_INVALID_REGION_COOKIE; 803 svd->pageswap = 0; 804 805 if (a->szc != 0 && a->vp != NULL) { 806 segvn_setvnode_mpss(a->vp); 807 } 808 if (svd->type == MAP_SHARED && svd->vp != NULL && 809 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) { 810 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 811 segvn_inval_trcache(svd->vp); 812 } 813 814 amp = a->amp; 815 if ((svd->amp = amp) == NULL) { 816 svd->anon_index = 0; 817 if (svd->type == MAP_SHARED) { 818 svd->swresv = 0; 819 /* 820 * Shared mappings to a vp need no other setup. 821 * If we have a shared mapping to an anon_map object 822 * which hasn't been allocated yet, allocate the 823 * struct now so that it will be properly shared 824 * by remembering the swap reservation there. 825 */ 826 if (a->vp == NULL) { 827 svd->amp = anonmap_alloc(seg->s_size, swresv, 828 ANON_SLEEP); 829 svd->amp->a_szc = seg->s_szc; 830 } 831 } else { 832 /* 833 * Private mapping (with or without a vp). 834 * Allocate anon_map when needed. 835 */ 836 svd->swresv = swresv; 837 } 838 } else { 839 pgcnt_t anon_num; 840 841 /* 842 * Mapping to an existing anon_map structure without a vp. 843 * For now we will insure that the segment size isn't larger 844 * than the size - offset gives us. Later on we may wish to 845 * have the anon array dynamically allocated itself so that 846 * we don't always have to allocate all the anon pointer slots. 847 * This of course involves adding extra code to check that we 848 * aren't trying to use an anon pointer slot beyond the end 849 * of the currently allocated anon array. 850 */ 851 if ((amp->size - a->offset) < seg->s_size) { 852 panic("segvn_create anon_map size"); 853 /*NOTREACHED*/ 854 } 855 856 anon_num = btopr(a->offset); 857 858 if (a->type == MAP_SHARED) { 859 /* 860 * SHARED mapping to a given anon_map. 861 */ 862 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 863 amp->refcnt++; 864 if (a->szc > amp->a_szc) { 865 amp->a_szc = a->szc; 866 } 867 ANON_LOCK_EXIT(&->a_rwlock); 868 svd->anon_index = anon_num; 869 svd->swresv = 0; 870 } else { 871 /* 872 * PRIVATE mapping to a given anon_map. 873 * Make sure that all the needed anon 874 * structures are created (so that we will 875 * share the underlying pages if nothing 876 * is written by this mapping) and then 877 * duplicate the anon array as is done 878 * when a privately mapped segment is dup'ed. 879 */ 880 struct anon *ap; 881 caddr_t addr; 882 caddr_t eaddr; 883 ulong_t anon_idx; 884 int hat_flag = HAT_LOAD; 885 886 if (svd->flags & MAP_TEXT) { 887 hat_flag |= HAT_LOAD_TEXT; 888 } 889 890 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 891 svd->amp->a_szc = seg->s_szc; 892 svd->anon_index = 0; 893 svd->swresv = swresv; 894 895 /* 896 * Prevent 2 threads from allocating anon 897 * slots simultaneously. 898 */ 899 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 900 eaddr = seg->s_base + seg->s_size; 901 902 for (anon_idx = anon_num, addr = seg->s_base; 903 addr < eaddr; addr += PAGESIZE, anon_idx++) { 904 page_t *pp; 905 906 if ((ap = anon_get_ptr(amp->ahp, 907 anon_idx)) != NULL) 908 continue; 909 910 /* 911 * Allocate the anon struct now. 912 * Might as well load up translation 913 * to the page while we're at it... 914 */ 915 pp = anon_zero(seg, addr, &ap, cred); 916 if (ap == NULL || pp == NULL) { 917 panic("segvn_create anon_zero"); 918 /*NOTREACHED*/ 919 } 920 921 /* 922 * Re-acquire the anon_map lock and 923 * initialize the anon array entry. 924 */ 925 ASSERT(anon_get_ptr(amp->ahp, 926 anon_idx) == NULL); 927 (void) anon_set_ptr(amp->ahp, anon_idx, ap, 928 ANON_SLEEP); 929 930 ASSERT(seg->s_szc == 0); 931 ASSERT(!IS_VMODSORT(pp->p_vnode)); 932 933 ASSERT(use_rgn == 0); 934 hat_memload(seg->s_as->a_hat, addr, pp, 935 svd->prot & ~PROT_WRITE, hat_flag); 936 937 page_unlock(pp); 938 } 939 ASSERT(seg->s_szc == 0); 940 anon_dup(amp->ahp, anon_num, svd->amp->ahp, 941 0, seg->s_size); 942 ANON_LOCK_EXIT(&->a_rwlock); 943 } 944 } 945 946 /* 947 * Set default memory allocation policy for segment 948 * 949 * Always set policy for private memory at least for initialization 950 * even if this is a shared memory segment 951 */ 952 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size); 953 954 if (svd->type == MAP_SHARED) 955 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index, 956 svd->vp, svd->offset, seg->s_size); 957 958 if (use_rgn) { 959 ASSERT(!trok); 960 ASSERT(svd->amp == NULL); 961 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base, 962 seg->s_size, (void *)svd->vp, svd->offset, svd->prot, 963 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback, 964 HAT_REGION_TEXT); 965 } 966 967 ASSERT(!trok || !(svd->prot & PROT_WRITE)); 968 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF; 969 970 return (0); 971 } 972 973 /* 974 * Concatenate two existing segments, if possible. 975 * Return 0 on success, -1 if two segments are not compatible 976 * or -2 on memory allocation failure. 977 * If amp_cat == 1 then try and concat segments with anon maps 978 */ 979 static int 980 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat) 981 { 982 struct segvn_data *svd1 = seg1->s_data; 983 struct segvn_data *svd2 = seg2->s_data; 984 struct anon_map *amp1 = svd1->amp; 985 struct anon_map *amp2 = svd2->amp; 986 struct vpage *vpage1 = svd1->vpage; 987 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL; 988 size_t size, nvpsize; 989 pgcnt_t npages1, npages2; 990 991 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as); 992 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 993 ASSERT(seg1->s_ops == seg2->s_ops); 994 995 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) || 996 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 997 return (-1); 998 } 999 1000 /* both segments exist, try to merge them */ 1001 #define incompat(x) (svd1->x != svd2->x) 1002 if (incompat(vp) || incompat(maxprot) || 1003 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) || 1004 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) || 1005 incompat(type) || incompat(cred) || incompat(flags) || 1006 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) || 1007 (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0) 1008 return (-1); 1009 #undef incompat 1010 1011 /* 1012 * vp == NULL implies zfod, offset doesn't matter 1013 */ 1014 if (svd1->vp != NULL && 1015 svd1->offset + seg1->s_size != svd2->offset) { 1016 return (-1); 1017 } 1018 1019 /* 1020 * Don't concatenate if either segment uses text replication. 1021 */ 1022 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) { 1023 return (-1); 1024 } 1025 1026 /* 1027 * Fail early if we're not supposed to concatenate 1028 * segments with non NULL amp. 1029 */ 1030 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) { 1031 return (-1); 1032 } 1033 1034 if (svd1->vp == NULL && svd1->type == MAP_SHARED) { 1035 if (amp1 != amp2) { 1036 return (-1); 1037 } 1038 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) != 1039 svd2->anon_index) { 1040 return (-1); 1041 } 1042 ASSERT(amp1 == NULL || amp1->refcnt >= 2); 1043 } 1044 1045 /* 1046 * If either seg has vpages, create a new merged vpage array. 1047 */ 1048 if (vpage1 != NULL || vpage2 != NULL) { 1049 struct vpage *vp, *evp; 1050 1051 npages1 = seg_pages(seg1); 1052 npages2 = seg_pages(seg2); 1053 nvpsize = vpgtob(npages1 + npages2); 1054 1055 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) { 1056 return (-2); 1057 } 1058 1059 if (vpage1 != NULL) { 1060 bcopy(vpage1, nvpage, vpgtob(npages1)); 1061 } else { 1062 evp = nvpage + npages1; 1063 for (vp = nvpage; vp < evp; vp++) { 1064 VPP_SETPROT(vp, svd1->prot); 1065 VPP_SETADVICE(vp, svd1->advice); 1066 } 1067 } 1068 1069 if (vpage2 != NULL) { 1070 bcopy(vpage2, nvpage + npages1, vpgtob(npages2)); 1071 } else { 1072 evp = nvpage + npages1 + npages2; 1073 for (vp = nvpage + npages1; vp < evp; vp++) { 1074 VPP_SETPROT(vp, svd2->prot); 1075 VPP_SETADVICE(vp, svd2->advice); 1076 } 1077 } 1078 1079 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) { 1080 ASSERT(svd1->swresv == seg1->s_size); 1081 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1082 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1083 evp = nvpage + npages1; 1084 for (vp = nvpage; vp < evp; vp++) { 1085 VPP_SETSWAPRES(vp); 1086 } 1087 } 1088 1089 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) { 1090 ASSERT(svd2->swresv == seg2->s_size); 1091 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1092 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1093 vp = nvpage + npages1; 1094 evp = vp + npages2; 1095 for (; vp < evp; vp++) { 1096 VPP_SETSWAPRES(vp); 1097 } 1098 } 1099 } 1100 ASSERT((vpage1 != NULL || vpage2 != NULL) || 1101 (svd1->pageswap == 0 && svd2->pageswap == 0)); 1102 1103 /* 1104 * If either segment has private pages, create a new merged anon 1105 * array. If mergeing shared anon segments just decrement anon map's 1106 * refcnt. 1107 */ 1108 if (amp1 != NULL && svd1->type == MAP_SHARED) { 1109 ASSERT(amp1 == amp2 && svd1->vp == NULL); 1110 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1111 ASSERT(amp1->refcnt >= 2); 1112 amp1->refcnt--; 1113 ANON_LOCK_EXIT(&1->a_rwlock); 1114 svd2->amp = NULL; 1115 } else if (amp1 != NULL || amp2 != NULL) { 1116 struct anon_hdr *nahp; 1117 struct anon_map *namp = NULL; 1118 size_t asize; 1119 1120 ASSERT(svd1->type == MAP_PRIVATE); 1121 1122 asize = seg1->s_size + seg2->s_size; 1123 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) { 1124 if (nvpage != NULL) { 1125 kmem_free(nvpage, nvpsize); 1126 } 1127 return (-2); 1128 } 1129 if (amp1 != NULL) { 1130 /* 1131 * XXX anon rwlock is not really needed because 1132 * this is a private segment and we are writers. 1133 */ 1134 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1135 ASSERT(amp1->refcnt == 1); 1136 if (anon_copy_ptr(amp1->ahp, svd1->anon_index, 1137 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) { 1138 anon_release(nahp, btop(asize)); 1139 ANON_LOCK_EXIT(&1->a_rwlock); 1140 if (nvpage != NULL) { 1141 kmem_free(nvpage, nvpsize); 1142 } 1143 return (-2); 1144 } 1145 } 1146 if (amp2 != NULL) { 1147 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1148 ASSERT(amp2->refcnt == 1); 1149 if (anon_copy_ptr(amp2->ahp, svd2->anon_index, 1150 nahp, btop(seg1->s_size), btop(seg2->s_size), 1151 ANON_NOSLEEP)) { 1152 anon_release(nahp, btop(asize)); 1153 ANON_LOCK_EXIT(&2->a_rwlock); 1154 if (amp1 != NULL) { 1155 ANON_LOCK_EXIT(&1->a_rwlock); 1156 } 1157 if (nvpage != NULL) { 1158 kmem_free(nvpage, nvpsize); 1159 } 1160 return (-2); 1161 } 1162 } 1163 if (amp1 != NULL) { 1164 namp = amp1; 1165 anon_release(amp1->ahp, btop(amp1->size)); 1166 } 1167 if (amp2 != NULL) { 1168 if (namp == NULL) { 1169 ASSERT(amp1 == NULL); 1170 namp = amp2; 1171 anon_release(amp2->ahp, btop(amp2->size)); 1172 } else { 1173 amp2->refcnt--; 1174 ANON_LOCK_EXIT(&2->a_rwlock); 1175 anonmap_free(amp2); 1176 } 1177 svd2->amp = NULL; /* needed for seg_free */ 1178 } 1179 namp->ahp = nahp; 1180 namp->size = asize; 1181 svd1->amp = namp; 1182 svd1->anon_index = 0; 1183 ANON_LOCK_EXIT(&namp->a_rwlock); 1184 } 1185 /* 1186 * Now free the old vpage structures. 1187 */ 1188 if (nvpage != NULL) { 1189 if (vpage1 != NULL) { 1190 kmem_free(vpage1, vpgtob(npages1)); 1191 } 1192 if (vpage2 != NULL) { 1193 svd2->vpage = NULL; 1194 kmem_free(vpage2, vpgtob(npages2)); 1195 } 1196 if (svd2->pageprot) { 1197 svd1->pageprot = 1; 1198 } 1199 if (svd2->pageadvice) { 1200 svd1->pageadvice = 1; 1201 } 1202 if (svd2->pageswap) { 1203 svd1->pageswap = 1; 1204 } 1205 svd1->vpage = nvpage; 1206 } 1207 1208 /* all looks ok, merge segments */ 1209 svd1->swresv += svd2->swresv; 1210 svd2->swresv = 0; /* so seg_free doesn't release swap space */ 1211 size = seg2->s_size; 1212 seg_free(seg2); 1213 seg1->s_size += size; 1214 return (0); 1215 } 1216 1217 /* 1218 * Extend the previous segment (seg1) to include the 1219 * new segment (seg2 + a), if possible. 1220 * Return 0 on success. 1221 */ 1222 static int 1223 segvn_extend_prev(seg1, seg2, a, swresv) 1224 struct seg *seg1, *seg2; 1225 struct segvn_crargs *a; 1226 size_t swresv; 1227 { 1228 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data; 1229 size_t size; 1230 struct anon_map *amp1; 1231 struct vpage *new_vpage; 1232 1233 /* 1234 * We don't need any segment level locks for "segvn" data 1235 * since the address space is "write" locked. 1236 */ 1237 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 1238 1239 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) { 1240 return (-1); 1241 } 1242 1243 /* second segment is new, try to extend first */ 1244 /* XXX - should also check cred */ 1245 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot || 1246 (!svd1->pageprot && (svd1->prot != a->prot)) || 1247 svd1->type != a->type || svd1->flags != a->flags || 1248 seg1->s_szc != a->szc || svd1->softlockcnt_send > 0) 1249 return (-1); 1250 1251 /* vp == NULL implies zfod, offset doesn't matter */ 1252 if (svd1->vp != NULL && 1253 svd1->offset + seg1->s_size != (a->offset & PAGEMASK)) 1254 return (-1); 1255 1256 if (svd1->tr_state != SEGVN_TR_OFF) { 1257 return (-1); 1258 } 1259 1260 amp1 = svd1->amp; 1261 if (amp1) { 1262 pgcnt_t newpgs; 1263 1264 /* 1265 * Segment has private pages, can data structures 1266 * be expanded? 1267 * 1268 * Acquire the anon_map lock to prevent it from changing, 1269 * if it is shared. This ensures that the anon_map 1270 * will not change while a thread which has a read/write 1271 * lock on an address space references it. 1272 * XXX - Don't need the anon_map lock at all if "refcnt" 1273 * is 1. 1274 * 1275 * Can't grow a MAP_SHARED segment with an anonmap because 1276 * there may be existing anon slots where we want to extend 1277 * the segment and we wouldn't know what to do with them 1278 * (e.g., for tmpfs right thing is to just leave them there, 1279 * for /dev/zero they should be cleared out). 1280 */ 1281 if (svd1->type == MAP_SHARED) 1282 return (-1); 1283 1284 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1285 if (amp1->refcnt > 1) { 1286 ANON_LOCK_EXIT(&1->a_rwlock); 1287 return (-1); 1288 } 1289 newpgs = anon_grow(amp1->ahp, &svd1->anon_index, 1290 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP); 1291 1292 if (newpgs == 0) { 1293 ANON_LOCK_EXIT(&1->a_rwlock); 1294 return (-1); 1295 } 1296 amp1->size = ptob(newpgs); 1297 ANON_LOCK_EXIT(&1->a_rwlock); 1298 } 1299 if (svd1->vpage != NULL) { 1300 struct vpage *vp, *evp; 1301 new_vpage = 1302 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1303 KM_NOSLEEP); 1304 if (new_vpage == NULL) 1305 return (-1); 1306 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1))); 1307 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1))); 1308 svd1->vpage = new_vpage; 1309 1310 vp = new_vpage + seg_pages(seg1); 1311 evp = vp + seg_pages(seg2); 1312 for (; vp < evp; vp++) 1313 VPP_SETPROT(vp, a->prot); 1314 if (svd1->pageswap && swresv) { 1315 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1316 ASSERT(swresv == seg2->s_size); 1317 vp = new_vpage + seg_pages(seg1); 1318 for (; vp < evp; vp++) { 1319 VPP_SETSWAPRES(vp); 1320 } 1321 } 1322 } 1323 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0); 1324 size = seg2->s_size; 1325 seg_free(seg2); 1326 seg1->s_size += size; 1327 svd1->swresv += swresv; 1328 if (svd1->pageprot && (a->prot & PROT_WRITE) && 1329 svd1->type == MAP_SHARED && svd1->vp != NULL && 1330 (svd1->vp->v_flag & VVMEXEC)) { 1331 ASSERT(vn_is_mapped(svd1->vp, V_WRITE)); 1332 segvn_inval_trcache(svd1->vp); 1333 } 1334 return (0); 1335 } 1336 1337 /* 1338 * Extend the next segment (seg2) to include the 1339 * new segment (seg1 + a), if possible. 1340 * Return 0 on success. 1341 */ 1342 static int 1343 segvn_extend_next( 1344 struct seg *seg1, 1345 struct seg *seg2, 1346 struct segvn_crargs *a, 1347 size_t swresv) 1348 { 1349 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data; 1350 size_t size; 1351 struct anon_map *amp2; 1352 struct vpage *new_vpage; 1353 1354 /* 1355 * We don't need any segment level locks for "segvn" data 1356 * since the address space is "write" locked. 1357 */ 1358 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock)); 1359 1360 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 1361 return (-1); 1362 } 1363 1364 /* first segment is new, try to extend second */ 1365 /* XXX - should also check cred */ 1366 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot || 1367 (!svd2->pageprot && (svd2->prot != a->prot)) || 1368 svd2->type != a->type || svd2->flags != a->flags || 1369 seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0) 1370 return (-1); 1371 /* vp == NULL implies zfod, offset doesn't matter */ 1372 if (svd2->vp != NULL && 1373 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset) 1374 return (-1); 1375 1376 if (svd2->tr_state != SEGVN_TR_OFF) { 1377 return (-1); 1378 } 1379 1380 amp2 = svd2->amp; 1381 if (amp2) { 1382 pgcnt_t newpgs; 1383 1384 /* 1385 * Segment has private pages, can data structures 1386 * be expanded? 1387 * 1388 * Acquire the anon_map lock to prevent it from changing, 1389 * if it is shared. This ensures that the anon_map 1390 * will not change while a thread which has a read/write 1391 * lock on an address space references it. 1392 * 1393 * XXX - Don't need the anon_map lock at all if "refcnt" 1394 * is 1. 1395 */ 1396 if (svd2->type == MAP_SHARED) 1397 return (-1); 1398 1399 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1400 if (amp2->refcnt > 1) { 1401 ANON_LOCK_EXIT(&2->a_rwlock); 1402 return (-1); 1403 } 1404 newpgs = anon_grow(amp2->ahp, &svd2->anon_index, 1405 btop(seg2->s_size), btop(seg1->s_size), 1406 ANON_NOSLEEP | ANON_GROWDOWN); 1407 1408 if (newpgs == 0) { 1409 ANON_LOCK_EXIT(&2->a_rwlock); 1410 return (-1); 1411 } 1412 amp2->size = ptob(newpgs); 1413 ANON_LOCK_EXIT(&2->a_rwlock); 1414 } 1415 if (svd2->vpage != NULL) { 1416 struct vpage *vp, *evp; 1417 new_vpage = 1418 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1419 KM_NOSLEEP); 1420 if (new_vpage == NULL) { 1421 /* Not merging segments so adjust anon_index back */ 1422 if (amp2) 1423 svd2->anon_index += seg_pages(seg1); 1424 return (-1); 1425 } 1426 bcopy(svd2->vpage, new_vpage + seg_pages(seg1), 1427 vpgtob(seg_pages(seg2))); 1428 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2))); 1429 svd2->vpage = new_vpage; 1430 1431 vp = new_vpage; 1432 evp = vp + seg_pages(seg1); 1433 for (; vp < evp; vp++) 1434 VPP_SETPROT(vp, a->prot); 1435 if (svd2->pageswap && swresv) { 1436 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1437 ASSERT(swresv == seg1->s_size); 1438 vp = new_vpage; 1439 for (; vp < evp; vp++) { 1440 VPP_SETSWAPRES(vp); 1441 } 1442 } 1443 } 1444 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0); 1445 size = seg1->s_size; 1446 seg_free(seg1); 1447 seg2->s_size += size; 1448 seg2->s_base -= size; 1449 svd2->offset -= size; 1450 svd2->swresv += swresv; 1451 if (svd2->pageprot && (a->prot & PROT_WRITE) && 1452 svd2->type == MAP_SHARED && svd2->vp != NULL && 1453 (svd2->vp->v_flag & VVMEXEC)) { 1454 ASSERT(vn_is_mapped(svd2->vp, V_WRITE)); 1455 segvn_inval_trcache(svd2->vp); 1456 } 1457 return (0); 1458 } 1459 1460 static int 1461 segvn_dup(struct seg *seg, struct seg *newseg) 1462 { 1463 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1464 struct segvn_data *newsvd; 1465 pgcnt_t npages = seg_pages(seg); 1466 int error = 0; 1467 uint_t prot; 1468 size_t len; 1469 struct anon_map *amp; 1470 1471 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1472 1473 /* 1474 * If segment has anon reserved, reserve more for the new seg. 1475 * For a MAP_NORESERVE segment swresv will be a count of all the 1476 * allocated anon slots; thus we reserve for the child as many slots 1477 * as the parent has allocated. This semantic prevents the child or 1478 * parent from dieing during a copy-on-write fault caused by trying 1479 * to write a shared pre-existing anon page. 1480 */ 1481 if ((len = svd->swresv) != 0) { 1482 if (anon_resv(svd->swresv) == 0) 1483 return (ENOMEM); 1484 1485 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 1486 seg, len, 0); 1487 } 1488 1489 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 1490 1491 newseg->s_ops = &segvn_ops; 1492 newseg->s_data = (void *)newsvd; 1493 newseg->s_szc = seg->s_szc; 1494 1495 newsvd->seg = newseg; 1496 if ((newsvd->vp = svd->vp) != NULL) { 1497 VN_HOLD(svd->vp); 1498 if (svd->type == MAP_SHARED) 1499 lgrp_shm_policy_init(NULL, svd->vp); 1500 } 1501 newsvd->offset = svd->offset; 1502 newsvd->prot = svd->prot; 1503 newsvd->maxprot = svd->maxprot; 1504 newsvd->pageprot = svd->pageprot; 1505 newsvd->type = svd->type; 1506 newsvd->cred = svd->cred; 1507 crhold(newsvd->cred); 1508 newsvd->advice = svd->advice; 1509 newsvd->pageadvice = svd->pageadvice; 1510 newsvd->swresv = svd->swresv; 1511 newsvd->pageswap = svd->pageswap; 1512 newsvd->flags = svd->flags; 1513 newsvd->softlockcnt = 0; 1514 newsvd->softlockcnt_sbase = 0; 1515 newsvd->softlockcnt_send = 0; 1516 newsvd->policy_info = svd->policy_info; 1517 newsvd->rcookie = HAT_INVALID_REGION_COOKIE; 1518 1519 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) { 1520 /* 1521 * Not attaching to a shared anon object. 1522 */ 1523 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) || 1524 svd->tr_state == SEGVN_TR_OFF); 1525 if (svd->tr_state == SEGVN_TR_ON) { 1526 ASSERT(newsvd->vp != NULL && amp != NULL); 1527 newsvd->tr_state = SEGVN_TR_INIT; 1528 } else { 1529 newsvd->tr_state = svd->tr_state; 1530 } 1531 newsvd->amp = NULL; 1532 newsvd->anon_index = 0; 1533 } else { 1534 /* regions for now are only used on pure vnode segments */ 1535 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 1536 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1537 newsvd->tr_state = SEGVN_TR_OFF; 1538 if (svd->type == MAP_SHARED) { 1539 newsvd->amp = amp; 1540 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1541 amp->refcnt++; 1542 ANON_LOCK_EXIT(&->a_rwlock); 1543 newsvd->anon_index = svd->anon_index; 1544 } else { 1545 int reclaim = 1; 1546 1547 /* 1548 * Allocate and initialize new anon_map structure. 1549 */ 1550 newsvd->amp = anonmap_alloc(newseg->s_size, 0, 1551 ANON_SLEEP); 1552 newsvd->amp->a_szc = newseg->s_szc; 1553 newsvd->anon_index = 0; 1554 1555 /* 1556 * We don't have to acquire the anon_map lock 1557 * for the new segment (since it belongs to an 1558 * address space that is still not associated 1559 * with any process), or the segment in the old 1560 * address space (since all threads in it 1561 * are stopped while duplicating the address space). 1562 */ 1563 1564 /* 1565 * The goal of the following code is to make sure that 1566 * softlocked pages do not end up as copy on write 1567 * pages. This would cause problems where one 1568 * thread writes to a page that is COW and a different 1569 * thread in the same process has softlocked it. The 1570 * softlock lock would move away from this process 1571 * because the write would cause this process to get 1572 * a copy (without the softlock). 1573 * 1574 * The strategy here is to just break the 1575 * sharing on pages that could possibly be 1576 * softlocked. 1577 */ 1578 retry: 1579 if (svd->softlockcnt) { 1580 struct anon *ap, *newap; 1581 size_t i; 1582 uint_t vpprot; 1583 page_t *anon_pl[1+1], *pp; 1584 caddr_t addr; 1585 ulong_t old_idx = svd->anon_index; 1586 ulong_t new_idx = 0; 1587 1588 /* 1589 * The softlock count might be non zero 1590 * because some pages are still stuck in the 1591 * cache for lazy reclaim. Flush the cache 1592 * now. This should drop the count to zero. 1593 * [or there is really I/O going on to these 1594 * pages]. Note, we have the writers lock so 1595 * nothing gets inserted during the flush. 1596 */ 1597 if (reclaim == 1) { 1598 segvn_purge(seg); 1599 reclaim = 0; 1600 goto retry; 1601 } 1602 i = btopr(seg->s_size); 1603 addr = seg->s_base; 1604 /* 1605 * XXX break cow sharing using PAGESIZE 1606 * pages. They will be relocated into larger 1607 * pages at fault time. 1608 */ 1609 while (i-- > 0) { 1610 if (ap = anon_get_ptr(amp->ahp, 1611 old_idx)) { 1612 error = anon_getpage(&ap, 1613 &vpprot, anon_pl, PAGESIZE, 1614 seg, addr, S_READ, 1615 svd->cred); 1616 if (error) { 1617 newsvd->vpage = NULL; 1618 goto out; 1619 } 1620 /* 1621 * prot need not be computed 1622 * below 'cause anon_private is 1623 * going to ignore it anyway 1624 * as child doesn't inherit 1625 * pagelock from parent. 1626 */ 1627 prot = svd->pageprot ? 1628 VPP_PROT( 1629 &svd->vpage[ 1630 seg_page(seg, addr)]) 1631 : svd->prot; 1632 pp = anon_private(&newap, 1633 newseg, addr, prot, 1634 anon_pl[0], 0, 1635 newsvd->cred); 1636 if (pp == NULL) { 1637 /* no mem abort */ 1638 newsvd->vpage = NULL; 1639 error = ENOMEM; 1640 goto out; 1641 } 1642 (void) anon_set_ptr( 1643 newsvd->amp->ahp, new_idx, 1644 newap, ANON_SLEEP); 1645 page_unlock(pp); 1646 } 1647 addr += PAGESIZE; 1648 old_idx++; 1649 new_idx++; 1650 } 1651 } else { /* common case */ 1652 if (seg->s_szc != 0) { 1653 /* 1654 * If at least one of anon slots of a 1655 * large page exists then make sure 1656 * all anon slots of a large page 1657 * exist to avoid partial cow sharing 1658 * of a large page in the future. 1659 */ 1660 anon_dup_fill_holes(amp->ahp, 1661 svd->anon_index, newsvd->amp->ahp, 1662 0, seg->s_size, seg->s_szc, 1663 svd->vp != NULL); 1664 } else { 1665 anon_dup(amp->ahp, svd->anon_index, 1666 newsvd->amp->ahp, 0, seg->s_size); 1667 } 1668 1669 hat_clrattr(seg->s_as->a_hat, seg->s_base, 1670 seg->s_size, PROT_WRITE); 1671 } 1672 } 1673 } 1674 /* 1675 * If necessary, create a vpage structure for the new segment. 1676 * Do not copy any page lock indications. 1677 */ 1678 if (svd->vpage != NULL) { 1679 uint_t i; 1680 struct vpage *ovp = svd->vpage; 1681 struct vpage *nvp; 1682 1683 nvp = newsvd->vpage = 1684 kmem_alloc(vpgtob(npages), KM_SLEEP); 1685 for (i = 0; i < npages; i++) { 1686 *nvp = *ovp++; 1687 VPP_CLRPPLOCK(nvp++); 1688 } 1689 } else 1690 newsvd->vpage = NULL; 1691 1692 /* Inform the vnode of the new mapping */ 1693 if (newsvd->vp != NULL) { 1694 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset, 1695 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot, 1696 newsvd->maxprot, newsvd->type, newsvd->cred, NULL); 1697 } 1698 out: 1699 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1700 ASSERT(newsvd->amp == NULL); 1701 ASSERT(newsvd->tr_state == SEGVN_TR_OFF); 1702 newsvd->rcookie = svd->rcookie; 1703 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie); 1704 } 1705 return (error); 1706 } 1707 1708 1709 /* 1710 * callback function to invoke free_vp_pages() for only those pages actually 1711 * processed by the HAT when a shared region is destroyed. 1712 */ 1713 extern int free_pages; 1714 1715 static void 1716 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 1717 size_t r_size, void *r_obj, u_offset_t r_objoff) 1718 { 1719 u_offset_t off; 1720 size_t len; 1721 vnode_t *vp = (vnode_t *)r_obj; 1722 1723 ASSERT(eaddr > saddr); 1724 ASSERT(saddr >= r_saddr); 1725 ASSERT(saddr < r_saddr + r_size); 1726 ASSERT(eaddr > r_saddr); 1727 ASSERT(eaddr <= r_saddr + r_size); 1728 ASSERT(vp != NULL); 1729 1730 if (!free_pages) { 1731 return; 1732 } 1733 1734 len = eaddr - saddr; 1735 off = (saddr - r_saddr) + r_objoff; 1736 free_vp_pages(vp, off, len); 1737 } 1738 1739 /* 1740 * callback function used by segvn_unmap to invoke free_vp_pages() for only 1741 * those pages actually processed by the HAT 1742 */ 1743 static void 1744 segvn_hat_unload_callback(hat_callback_t *cb) 1745 { 1746 struct seg *seg = cb->hcb_data; 1747 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1748 size_t len; 1749 u_offset_t off; 1750 1751 ASSERT(svd->vp != NULL); 1752 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr); 1753 ASSERT(cb->hcb_start_addr >= seg->s_base); 1754 1755 len = cb->hcb_end_addr - cb->hcb_start_addr; 1756 off = cb->hcb_start_addr - seg->s_base; 1757 free_vp_pages(svd->vp, svd->offset + off, len); 1758 } 1759 1760 /* 1761 * This function determines the number of bytes of swap reserved by 1762 * a segment for which per-page accounting is present. It is used to 1763 * calculate the correct value of a segvn_data's swresv. 1764 */ 1765 static size_t 1766 segvn_count_swap_by_vpages(struct seg *seg) 1767 { 1768 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1769 struct vpage *vp, *evp; 1770 size_t nswappages = 0; 1771 1772 ASSERT(svd->pageswap); 1773 ASSERT(svd->vpage != NULL); 1774 1775 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 1776 1777 for (vp = svd->vpage; vp < evp; vp++) { 1778 if (VPP_ISSWAPRES(vp)) 1779 nswappages++; 1780 } 1781 1782 return (nswappages << PAGESHIFT); 1783 } 1784 1785 static int 1786 segvn_unmap(struct seg *seg, caddr_t addr, size_t len) 1787 { 1788 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1789 struct segvn_data *nsvd; 1790 struct seg *nseg; 1791 struct anon_map *amp; 1792 pgcnt_t opages; /* old segment size in pages */ 1793 pgcnt_t npages; /* new segment size in pages */ 1794 pgcnt_t dpages; /* pages being deleted (unmapped) */ 1795 hat_callback_t callback; /* used for free_vp_pages() */ 1796 hat_callback_t *cbp = NULL; 1797 caddr_t nbase; 1798 size_t nsize; 1799 size_t oswresv; 1800 int reclaim = 1; 1801 1802 /* 1803 * We don't need any segment level locks for "segvn" data 1804 * since the address space is "write" locked. 1805 */ 1806 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1807 1808 /* 1809 * Fail the unmap if pages are SOFTLOCKed through this mapping. 1810 * softlockcnt is protected from change by the as write lock. 1811 */ 1812 retry: 1813 if (svd->softlockcnt > 0) { 1814 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1815 1816 /* 1817 * If this is shared segment non 0 softlockcnt 1818 * means locked pages are still in use. 1819 */ 1820 if (svd->type == MAP_SHARED) { 1821 return (EAGAIN); 1822 } 1823 1824 /* 1825 * since we do have the writers lock nobody can fill 1826 * the cache during the purge. The flush either succeeds 1827 * or we still have pending I/Os. 1828 */ 1829 if (reclaim == 1) { 1830 segvn_purge(seg); 1831 reclaim = 0; 1832 goto retry; 1833 } 1834 return (EAGAIN); 1835 } 1836 1837 /* 1838 * Check for bad sizes 1839 */ 1840 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size || 1841 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) { 1842 panic("segvn_unmap"); 1843 /*NOTREACHED*/ 1844 } 1845 1846 if (seg->s_szc != 0) { 1847 size_t pgsz = page_get_pagesize(seg->s_szc); 1848 int err; 1849 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 1850 ASSERT(seg->s_base != addr || seg->s_size != len); 1851 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1852 ASSERT(svd->amp == NULL); 1853 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1854 hat_leave_region(seg->s_as->a_hat, 1855 svd->rcookie, HAT_REGION_TEXT); 1856 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1857 /* 1858 * could pass a flag to segvn_demote_range() 1859 * below to tell it not to do any unloads but 1860 * this case is rare enough to not bother for 1861 * now. 1862 */ 1863 } else if (svd->tr_state == SEGVN_TR_INIT) { 1864 svd->tr_state = SEGVN_TR_OFF; 1865 } else if (svd->tr_state == SEGVN_TR_ON) { 1866 ASSERT(svd->amp != NULL); 1867 segvn_textunrepl(seg, 1); 1868 ASSERT(svd->amp == NULL); 1869 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1870 } 1871 VM_STAT_ADD(segvnvmstats.demoterange[0]); 1872 err = segvn_demote_range(seg, addr, len, SDR_END, 0); 1873 if (err == 0) { 1874 return (IE_RETRY); 1875 } 1876 return (err); 1877 } 1878 } 1879 1880 /* Inform the vnode of the unmapping. */ 1881 if (svd->vp) { 1882 int error; 1883 1884 error = VOP_DELMAP(svd->vp, 1885 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base), 1886 seg->s_as, addr, len, svd->prot, svd->maxprot, 1887 svd->type, svd->cred, NULL); 1888 1889 if (error == EAGAIN) 1890 return (error); 1891 } 1892 1893 /* 1894 * Remove any page locks set through this mapping. 1895 * If text replication is not off no page locks could have been 1896 * established via this mapping. 1897 */ 1898 if (svd->tr_state == SEGVN_TR_OFF) { 1899 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0); 1900 } 1901 1902 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1903 ASSERT(svd->amp == NULL); 1904 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1905 ASSERT(svd->type == MAP_PRIVATE); 1906 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 1907 HAT_REGION_TEXT); 1908 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1909 } else if (svd->tr_state == SEGVN_TR_ON) { 1910 ASSERT(svd->amp != NULL); 1911 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE)); 1912 segvn_textunrepl(seg, 1); 1913 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 1914 } else { 1915 if (svd->tr_state != SEGVN_TR_OFF) { 1916 ASSERT(svd->tr_state == SEGVN_TR_INIT); 1917 svd->tr_state = SEGVN_TR_OFF; 1918 } 1919 /* 1920 * Unload any hardware translations in the range to be taken 1921 * out. Use a callback to invoke free_vp_pages() effectively. 1922 */ 1923 if (svd->vp != NULL && free_pages != 0) { 1924 callback.hcb_data = seg; 1925 callback.hcb_function = segvn_hat_unload_callback; 1926 cbp = &callback; 1927 } 1928 hat_unload_callback(seg->s_as->a_hat, addr, len, 1929 HAT_UNLOAD_UNMAP, cbp); 1930 1931 if (svd->type == MAP_SHARED && svd->vp != NULL && 1932 (svd->vp->v_flag & VVMEXEC) && 1933 ((svd->prot & PROT_WRITE) || svd->pageprot)) { 1934 segvn_inval_trcache(svd->vp); 1935 } 1936 } 1937 1938 /* 1939 * Check for entire segment 1940 */ 1941 if (addr == seg->s_base && len == seg->s_size) { 1942 seg_free(seg); 1943 return (0); 1944 } 1945 1946 opages = seg_pages(seg); 1947 dpages = btop(len); 1948 npages = opages - dpages; 1949 amp = svd->amp; 1950 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc); 1951 1952 /* 1953 * Check for beginning of segment 1954 */ 1955 if (addr == seg->s_base) { 1956 if (svd->vpage != NULL) { 1957 size_t nbytes; 1958 struct vpage *ovpage; 1959 1960 ovpage = svd->vpage; /* keep pointer to vpage */ 1961 1962 nbytes = vpgtob(npages); 1963 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 1964 bcopy(&ovpage[dpages], svd->vpage, nbytes); 1965 1966 /* free up old vpage */ 1967 kmem_free(ovpage, vpgtob(opages)); 1968 } 1969 if (amp != NULL) { 1970 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1971 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 1972 /* 1973 * Shared anon map is no longer in use. Before 1974 * freeing its pages purge all entries from 1975 * pcache that belong to this amp. 1976 */ 1977 if (svd->type == MAP_SHARED) { 1978 ASSERT(amp->refcnt == 1); 1979 ASSERT(svd->softlockcnt == 0); 1980 anonmap_purge(amp); 1981 } 1982 /* 1983 * Free up now unused parts of anon_map array. 1984 */ 1985 if (amp->a_szc == seg->s_szc) { 1986 if (seg->s_szc != 0) { 1987 anon_free_pages(amp->ahp, 1988 svd->anon_index, len, 1989 seg->s_szc); 1990 } else { 1991 anon_free(amp->ahp, 1992 svd->anon_index, 1993 len); 1994 } 1995 } else { 1996 ASSERT(svd->type == MAP_SHARED); 1997 ASSERT(amp->a_szc > seg->s_szc); 1998 anon_shmap_free_pages(amp, 1999 svd->anon_index, len); 2000 } 2001 2002 /* 2003 * Unreserve swap space for the 2004 * unmapped chunk of this segment in 2005 * case it's MAP_SHARED 2006 */ 2007 if (svd->type == MAP_SHARED) { 2008 anon_unresv(len); 2009 amp->swresv -= len; 2010 } 2011 } 2012 ANON_LOCK_EXIT(&->a_rwlock); 2013 svd->anon_index += dpages; 2014 } 2015 if (svd->vp != NULL) 2016 svd->offset += len; 2017 2018 seg->s_base += len; 2019 seg->s_size -= len; 2020 2021 if (svd->swresv) { 2022 if (svd->flags & MAP_NORESERVE) { 2023 ASSERT(amp); 2024 oswresv = svd->swresv; 2025 2026 svd->swresv = ptob(anon_pages(amp->ahp, 2027 svd->anon_index, npages)); 2028 anon_unresv(oswresv - svd->swresv); 2029 } else { 2030 size_t unlen; 2031 2032 if (svd->pageswap) { 2033 oswresv = svd->swresv; 2034 svd->swresv = 2035 segvn_count_swap_by_vpages(seg); 2036 ASSERT(oswresv >= svd->swresv); 2037 unlen = oswresv - svd->swresv; 2038 } else { 2039 svd->swresv -= len; 2040 ASSERT(svd->swresv == seg->s_size); 2041 unlen = len; 2042 } 2043 anon_unresv(unlen); 2044 } 2045 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2046 seg, len, 0); 2047 } 2048 2049 return (0); 2050 } 2051 2052 /* 2053 * Check for end of segment 2054 */ 2055 if (addr + len == seg->s_base + seg->s_size) { 2056 if (svd->vpage != NULL) { 2057 size_t nbytes; 2058 struct vpage *ovpage; 2059 2060 ovpage = svd->vpage; /* keep pointer to vpage */ 2061 2062 nbytes = vpgtob(npages); 2063 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2064 bcopy(ovpage, svd->vpage, nbytes); 2065 2066 /* free up old vpage */ 2067 kmem_free(ovpage, vpgtob(opages)); 2068 2069 } 2070 if (amp != NULL) { 2071 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2072 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2073 /* 2074 * Free up now unused parts of anon_map array. 2075 */ 2076 ulong_t an_idx = svd->anon_index + npages; 2077 2078 /* 2079 * Shared anon map is no longer in use. Before 2080 * freeing its pages purge all entries from 2081 * pcache that belong to this amp. 2082 */ 2083 if (svd->type == MAP_SHARED) { 2084 ASSERT(amp->refcnt == 1); 2085 ASSERT(svd->softlockcnt == 0); 2086 anonmap_purge(amp); 2087 } 2088 2089 if (amp->a_szc == seg->s_szc) { 2090 if (seg->s_szc != 0) { 2091 anon_free_pages(amp->ahp, 2092 an_idx, len, 2093 seg->s_szc); 2094 } else { 2095 anon_free(amp->ahp, an_idx, 2096 len); 2097 } 2098 } else { 2099 ASSERT(svd->type == MAP_SHARED); 2100 ASSERT(amp->a_szc > seg->s_szc); 2101 anon_shmap_free_pages(amp, 2102 an_idx, len); 2103 } 2104 2105 /* 2106 * Unreserve swap space for the 2107 * unmapped chunk of this segment in 2108 * case it's MAP_SHARED 2109 */ 2110 if (svd->type == MAP_SHARED) { 2111 anon_unresv(len); 2112 amp->swresv -= len; 2113 } 2114 } 2115 ANON_LOCK_EXIT(&->a_rwlock); 2116 } 2117 2118 seg->s_size -= len; 2119 2120 if (svd->swresv) { 2121 if (svd->flags & MAP_NORESERVE) { 2122 ASSERT(amp); 2123 oswresv = svd->swresv; 2124 svd->swresv = ptob(anon_pages(amp->ahp, 2125 svd->anon_index, npages)); 2126 anon_unresv(oswresv - svd->swresv); 2127 } else { 2128 size_t unlen; 2129 2130 if (svd->pageswap) { 2131 oswresv = svd->swresv; 2132 svd->swresv = 2133 segvn_count_swap_by_vpages(seg); 2134 ASSERT(oswresv >= svd->swresv); 2135 unlen = oswresv - svd->swresv; 2136 } else { 2137 svd->swresv -= len; 2138 ASSERT(svd->swresv == seg->s_size); 2139 unlen = len; 2140 } 2141 anon_unresv(unlen); 2142 } 2143 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2144 "anon proc:%p %lu %u", seg, len, 0); 2145 } 2146 2147 return (0); 2148 } 2149 2150 /* 2151 * The section to go is in the middle of the segment, 2152 * have to make it into two segments. nseg is made for 2153 * the high end while seg is cut down at the low end. 2154 */ 2155 nbase = addr + len; /* new seg base */ 2156 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */ 2157 seg->s_size = addr - seg->s_base; /* shrink old seg */ 2158 nseg = seg_alloc(seg->s_as, nbase, nsize); 2159 if (nseg == NULL) { 2160 panic("segvn_unmap seg_alloc"); 2161 /*NOTREACHED*/ 2162 } 2163 nseg->s_ops = seg->s_ops; 2164 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 2165 nseg->s_data = (void *)nsvd; 2166 nseg->s_szc = seg->s_szc; 2167 *nsvd = *svd; 2168 nsvd->seg = nseg; 2169 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base); 2170 nsvd->swresv = 0; 2171 nsvd->softlockcnt = 0; 2172 nsvd->softlockcnt_sbase = 0; 2173 nsvd->softlockcnt_send = 0; 2174 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 2175 2176 if (svd->vp != NULL) { 2177 VN_HOLD(nsvd->vp); 2178 if (nsvd->type == MAP_SHARED) 2179 lgrp_shm_policy_init(NULL, nsvd->vp); 2180 } 2181 crhold(svd->cred); 2182 2183 if (svd->vpage == NULL) { 2184 nsvd->vpage = NULL; 2185 } else { 2186 /* need to split vpage into two arrays */ 2187 size_t nbytes; 2188 struct vpage *ovpage; 2189 2190 ovpage = svd->vpage; /* keep pointer to vpage */ 2191 2192 npages = seg_pages(seg); /* seg has shrunk */ 2193 nbytes = vpgtob(npages); 2194 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2195 2196 bcopy(ovpage, svd->vpage, nbytes); 2197 2198 npages = seg_pages(nseg); 2199 nbytes = vpgtob(npages); 2200 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2201 2202 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes); 2203 2204 /* free up old vpage */ 2205 kmem_free(ovpage, vpgtob(opages)); 2206 } 2207 2208 if (amp == NULL) { 2209 nsvd->amp = NULL; 2210 nsvd->anon_index = 0; 2211 } else { 2212 /* 2213 * Need to create a new anon map for the new segment. 2214 * We'll also allocate a new smaller array for the old 2215 * smaller segment to save space. 2216 */ 2217 opages = btop((uintptr_t)(addr - seg->s_base)); 2218 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2219 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2220 /* 2221 * Free up now unused parts of anon_map array. 2222 */ 2223 ulong_t an_idx = svd->anon_index + opages; 2224 2225 /* 2226 * Shared anon map is no longer in use. Before 2227 * freeing its pages purge all entries from 2228 * pcache that belong to this amp. 2229 */ 2230 if (svd->type == MAP_SHARED) { 2231 ASSERT(amp->refcnt == 1); 2232 ASSERT(svd->softlockcnt == 0); 2233 anonmap_purge(amp); 2234 } 2235 2236 if (amp->a_szc == seg->s_szc) { 2237 if (seg->s_szc != 0) { 2238 anon_free_pages(amp->ahp, an_idx, len, 2239 seg->s_szc); 2240 } else { 2241 anon_free(amp->ahp, an_idx, 2242 len); 2243 } 2244 } else { 2245 ASSERT(svd->type == MAP_SHARED); 2246 ASSERT(amp->a_szc > seg->s_szc); 2247 anon_shmap_free_pages(amp, an_idx, len); 2248 } 2249 2250 /* 2251 * Unreserve swap space for the 2252 * unmapped chunk of this segment in 2253 * case it's MAP_SHARED 2254 */ 2255 if (svd->type == MAP_SHARED) { 2256 anon_unresv(len); 2257 amp->swresv -= len; 2258 } 2259 } 2260 nsvd->anon_index = svd->anon_index + 2261 btop((uintptr_t)(nseg->s_base - seg->s_base)); 2262 if (svd->type == MAP_SHARED) { 2263 amp->refcnt++; 2264 nsvd->amp = amp; 2265 } else { 2266 struct anon_map *namp; 2267 struct anon_hdr *nahp; 2268 2269 ASSERT(svd->type == MAP_PRIVATE); 2270 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 2271 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 2272 namp->a_szc = seg->s_szc; 2273 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp, 2274 0, btop(seg->s_size), ANON_SLEEP); 2275 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index, 2276 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 2277 anon_release(amp->ahp, btop(amp->size)); 2278 svd->anon_index = 0; 2279 nsvd->anon_index = 0; 2280 amp->ahp = nahp; 2281 amp->size = seg->s_size; 2282 nsvd->amp = namp; 2283 } 2284 ANON_LOCK_EXIT(&->a_rwlock); 2285 } 2286 if (svd->swresv) { 2287 if (svd->flags & MAP_NORESERVE) { 2288 ASSERT(amp); 2289 oswresv = svd->swresv; 2290 svd->swresv = ptob(anon_pages(amp->ahp, 2291 svd->anon_index, btop(seg->s_size))); 2292 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 2293 nsvd->anon_index, btop(nseg->s_size))); 2294 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2295 anon_unresv(oswresv - (svd->swresv + nsvd->swresv)); 2296 } else { 2297 size_t unlen; 2298 2299 if (svd->pageswap) { 2300 oswresv = svd->swresv; 2301 svd->swresv = segvn_count_swap_by_vpages(seg); 2302 nsvd->swresv = segvn_count_swap_by_vpages(nseg); 2303 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2304 unlen = oswresv - (svd->swresv + nsvd->swresv); 2305 } else { 2306 if (seg->s_size + nseg->s_size + len != 2307 svd->swresv) { 2308 panic("segvn_unmap: cannot split " 2309 "swap reservation"); 2310 /*NOTREACHED*/ 2311 } 2312 svd->swresv = seg->s_size; 2313 nsvd->swresv = nseg->s_size; 2314 unlen = len; 2315 } 2316 anon_unresv(unlen); 2317 } 2318 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2319 seg, len, 0); 2320 } 2321 2322 return (0); /* I'm glad that's all over with! */ 2323 } 2324 2325 static void 2326 segvn_free(struct seg *seg) 2327 { 2328 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2329 pgcnt_t npages = seg_pages(seg); 2330 struct anon_map *amp; 2331 size_t len; 2332 2333 /* 2334 * We don't need any segment level locks for "segvn" data 2335 * since the address space is "write" locked. 2336 */ 2337 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2338 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2339 2340 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2341 2342 /* 2343 * Be sure to unlock pages. XXX Why do things get free'ed instead 2344 * of unmapped? XXX 2345 */ 2346 (void) segvn_lockop(seg, seg->s_base, seg->s_size, 2347 0, MC_UNLOCK, NULL, 0); 2348 2349 /* 2350 * Deallocate the vpage and anon pointers if necessary and possible. 2351 */ 2352 if (svd->vpage != NULL) { 2353 kmem_free(svd->vpage, vpgtob(npages)); 2354 svd->vpage = NULL; 2355 } 2356 if ((amp = svd->amp) != NULL) { 2357 /* 2358 * If there are no more references to this anon_map 2359 * structure, then deallocate the structure after freeing 2360 * up all the anon slot pointers that we can. 2361 */ 2362 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2363 ASSERT(amp->a_szc >= seg->s_szc); 2364 if (--amp->refcnt == 0) { 2365 if (svd->type == MAP_PRIVATE) { 2366 /* 2367 * Private - we only need to anon_free 2368 * the part that this segment refers to. 2369 */ 2370 if (seg->s_szc != 0) { 2371 anon_free_pages(amp->ahp, 2372 svd->anon_index, seg->s_size, 2373 seg->s_szc); 2374 } else { 2375 anon_free(amp->ahp, svd->anon_index, 2376 seg->s_size); 2377 } 2378 } else { 2379 2380 /* 2381 * Shared anon map is no longer in use. Before 2382 * freeing its pages purge all entries from 2383 * pcache that belong to this amp. 2384 */ 2385 ASSERT(svd->softlockcnt == 0); 2386 anonmap_purge(amp); 2387 2388 /* 2389 * Shared - anon_free the entire 2390 * anon_map's worth of stuff and 2391 * release any swap reservation. 2392 */ 2393 if (amp->a_szc != 0) { 2394 anon_shmap_free_pages(amp, 0, 2395 amp->size); 2396 } else { 2397 anon_free(amp->ahp, 0, amp->size); 2398 } 2399 if ((len = amp->swresv) != 0) { 2400 anon_unresv(len); 2401 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2402 "anon proc:%p %lu %u", seg, len, 0); 2403 } 2404 } 2405 svd->amp = NULL; 2406 ANON_LOCK_EXIT(&->a_rwlock); 2407 anonmap_free(amp); 2408 } else if (svd->type == MAP_PRIVATE) { 2409 /* 2410 * We had a private mapping which still has 2411 * a held anon_map so just free up all the 2412 * anon slot pointers that we were using. 2413 */ 2414 if (seg->s_szc != 0) { 2415 anon_free_pages(amp->ahp, svd->anon_index, 2416 seg->s_size, seg->s_szc); 2417 } else { 2418 anon_free(amp->ahp, svd->anon_index, 2419 seg->s_size); 2420 } 2421 ANON_LOCK_EXIT(&->a_rwlock); 2422 } else { 2423 ANON_LOCK_EXIT(&->a_rwlock); 2424 } 2425 } 2426 2427 /* 2428 * Release swap reservation. 2429 */ 2430 if ((len = svd->swresv) != 0) { 2431 anon_unresv(svd->swresv); 2432 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2433 seg, len, 0); 2434 svd->swresv = 0; 2435 } 2436 /* 2437 * Release claim on vnode, credentials, and finally free the 2438 * private data. 2439 */ 2440 if (svd->vp != NULL) { 2441 if (svd->type == MAP_SHARED) 2442 lgrp_shm_policy_fini(NULL, svd->vp); 2443 VN_RELE(svd->vp); 2444 svd->vp = NULL; 2445 } 2446 crfree(svd->cred); 2447 svd->pageprot = 0; 2448 svd->pageadvice = 0; 2449 svd->pageswap = 0; 2450 svd->cred = NULL; 2451 2452 /* 2453 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's 2454 * still working with this segment without holding as lock (in case 2455 * it's called by pcache async thread). 2456 */ 2457 ASSERT(svd->softlockcnt == 0); 2458 mutex_enter(&svd->segfree_syncmtx); 2459 mutex_exit(&svd->segfree_syncmtx); 2460 2461 seg->s_data = NULL; 2462 kmem_cache_free(segvn_cache, svd); 2463 } 2464 2465 /* 2466 * Do a F_SOFTUNLOCK call over the range requested. The range must have 2467 * already been F_SOFTLOCK'ed. 2468 * Caller must always match addr and len of a softunlock with a previous 2469 * softlock with exactly the same addr and len. 2470 */ 2471 static void 2472 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw) 2473 { 2474 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2475 page_t *pp; 2476 caddr_t adr; 2477 struct vnode *vp; 2478 u_offset_t offset; 2479 ulong_t anon_index; 2480 struct anon_map *amp; 2481 struct anon *ap = NULL; 2482 2483 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2484 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 2485 2486 if ((amp = svd->amp) != NULL) 2487 anon_index = svd->anon_index + seg_page(seg, addr); 2488 2489 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 2490 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2491 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie); 2492 } else { 2493 hat_unlock(seg->s_as->a_hat, addr, len); 2494 } 2495 for (adr = addr; adr < addr + len; adr += PAGESIZE) { 2496 if (amp != NULL) { 2497 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2498 if ((ap = anon_get_ptr(amp->ahp, anon_index++)) 2499 != NULL) { 2500 swap_xlate(ap, &vp, &offset); 2501 } else { 2502 vp = svd->vp; 2503 offset = svd->offset + 2504 (uintptr_t)(adr - seg->s_base); 2505 } 2506 ANON_LOCK_EXIT(&->a_rwlock); 2507 } else { 2508 vp = svd->vp; 2509 offset = svd->offset + 2510 (uintptr_t)(adr - seg->s_base); 2511 } 2512 2513 /* 2514 * Use page_find() instead of page_lookup() to 2515 * find the page since we know that it is locked. 2516 */ 2517 pp = page_find(vp, offset); 2518 if (pp == NULL) { 2519 panic( 2520 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx", 2521 (void *)adr, (void *)ap, (void *)vp, offset); 2522 /*NOTREACHED*/ 2523 } 2524 2525 if (rw == S_WRITE) { 2526 hat_setrefmod(pp); 2527 if (seg->s_as->a_vbits) 2528 hat_setstat(seg->s_as, adr, PAGESIZE, 2529 P_REF | P_MOD); 2530 } else if (rw != S_OTHER) { 2531 hat_setref(pp); 2532 if (seg->s_as->a_vbits) 2533 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF); 2534 } 2535 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2536 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset); 2537 page_unlock(pp); 2538 } 2539 ASSERT(svd->softlockcnt >= btop(len)); 2540 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) { 2541 /* 2542 * All SOFTLOCKS are gone. Wakeup any waiting 2543 * unmappers so they can try again to unmap. 2544 * Check for waiters first without the mutex 2545 * held so we don't always grab the mutex on 2546 * softunlocks. 2547 */ 2548 if (AS_ISUNMAPWAIT(seg->s_as)) { 2549 mutex_enter(&seg->s_as->a_contents); 2550 if (AS_ISUNMAPWAIT(seg->s_as)) { 2551 AS_CLRUNMAPWAIT(seg->s_as); 2552 cv_broadcast(&seg->s_as->a_cv); 2553 } 2554 mutex_exit(&seg->s_as->a_contents); 2555 } 2556 } 2557 } 2558 2559 #define PAGE_HANDLED ((page_t *)-1) 2560 2561 /* 2562 * Release all the pages in the NULL terminated ppp list 2563 * which haven't already been converted to PAGE_HANDLED. 2564 */ 2565 static void 2566 segvn_pagelist_rele(page_t **ppp) 2567 { 2568 for (; *ppp != NULL; ppp++) { 2569 if (*ppp != PAGE_HANDLED) 2570 page_unlock(*ppp); 2571 } 2572 } 2573 2574 static int stealcow = 1; 2575 2576 /* 2577 * Workaround for viking chip bug. See bug id 1220902. 2578 * To fix this down in pagefault() would require importing so 2579 * much as and segvn code as to be unmaintainable. 2580 */ 2581 int enable_mbit_wa = 0; 2582 2583 /* 2584 * Handles all the dirty work of getting the right 2585 * anonymous pages and loading up the translations. 2586 * This routine is called only from segvn_fault() 2587 * when looping over the range of addresses requested. 2588 * 2589 * The basic algorithm here is: 2590 * If this is an anon_zero case 2591 * Call anon_zero to allocate page 2592 * Load up translation 2593 * Return 2594 * endif 2595 * If this is an anon page 2596 * Use anon_getpage to get the page 2597 * else 2598 * Find page in pl[] list passed in 2599 * endif 2600 * If not a cow 2601 * Load up the translation to the page 2602 * return 2603 * endif 2604 * Call anon_private to handle cow 2605 * Load up (writable) translation to new page 2606 */ 2607 static faultcode_t 2608 segvn_faultpage( 2609 struct hat *hat, /* the hat to use for mapping */ 2610 struct seg *seg, /* seg_vn of interest */ 2611 caddr_t addr, /* address in as */ 2612 u_offset_t off, /* offset in vp */ 2613 struct vpage *vpage, /* pointer to vpage for vp, off */ 2614 page_t *pl[], /* object source page pointer */ 2615 uint_t vpprot, /* access allowed to object pages */ 2616 enum fault_type type, /* type of fault */ 2617 enum seg_rw rw, /* type of access at fault */ 2618 int brkcow) /* we may need to break cow */ 2619 { 2620 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2621 page_t *pp, **ppp; 2622 uint_t pageflags = 0; 2623 page_t *anon_pl[1 + 1]; 2624 page_t *opp = NULL; /* original page */ 2625 uint_t prot; 2626 int err; 2627 int cow; 2628 int claim; 2629 int steal = 0; 2630 ulong_t anon_index; 2631 struct anon *ap, *oldap; 2632 struct anon_map *amp; 2633 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 2634 int anon_lock = 0; 2635 anon_sync_obj_t cookie; 2636 2637 if (svd->flags & MAP_TEXT) { 2638 hat_flag |= HAT_LOAD_TEXT; 2639 } 2640 2641 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 2642 ASSERT(seg->s_szc == 0); 2643 ASSERT(svd->tr_state != SEGVN_TR_INIT); 2644 2645 /* 2646 * Initialize protection value for this page. 2647 * If we have per page protection values check it now. 2648 */ 2649 if (svd->pageprot) { 2650 uint_t protchk; 2651 2652 switch (rw) { 2653 case S_READ: 2654 protchk = PROT_READ; 2655 break; 2656 case S_WRITE: 2657 protchk = PROT_WRITE; 2658 break; 2659 case S_EXEC: 2660 protchk = PROT_EXEC; 2661 break; 2662 case S_OTHER: 2663 default: 2664 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 2665 break; 2666 } 2667 2668 prot = VPP_PROT(vpage); 2669 if ((prot & protchk) == 0) 2670 return (FC_PROT); /* illegal access type */ 2671 } else { 2672 prot = svd->prot; 2673 } 2674 2675 if (type == F_SOFTLOCK) { 2676 atomic_add_long((ulong_t *)&svd->softlockcnt, 1); 2677 } 2678 2679 /* 2680 * Always acquire the anon array lock to prevent 2 threads from 2681 * allocating separate anon slots for the same "addr". 2682 */ 2683 2684 if ((amp = svd->amp) != NULL) { 2685 ASSERT(RW_READ_HELD(&->a_rwlock)); 2686 anon_index = svd->anon_index + seg_page(seg, addr); 2687 anon_array_enter(amp, anon_index, &cookie); 2688 anon_lock = 1; 2689 } 2690 2691 if (svd->vp == NULL && amp != NULL) { 2692 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) { 2693 /* 2694 * Allocate a (normally) writable anonymous page of 2695 * zeroes. If no advance reservations, reserve now. 2696 */ 2697 if (svd->flags & MAP_NORESERVE) { 2698 if (anon_resv_zone(ptob(1), 2699 seg->s_as->a_proc->p_zone)) { 2700 atomic_add_long(&svd->swresv, ptob(1)); 2701 } else { 2702 err = ENOMEM; 2703 goto out; 2704 } 2705 } 2706 if ((pp = anon_zero(seg, addr, &ap, 2707 svd->cred)) == NULL) { 2708 err = ENOMEM; 2709 goto out; /* out of swap space */ 2710 } 2711 /* 2712 * Re-acquire the anon_map lock and 2713 * initialize the anon array entry. 2714 */ 2715 (void) anon_set_ptr(amp->ahp, anon_index, ap, 2716 ANON_SLEEP); 2717 2718 ASSERT(pp->p_szc == 0); 2719 2720 /* 2721 * Handle pages that have been marked for migration 2722 */ 2723 if (lgrp_optimizations()) 2724 page_migrate(seg, addr, &pp, 1); 2725 2726 if (enable_mbit_wa) { 2727 if (rw == S_WRITE) 2728 hat_setmod(pp); 2729 else if (!hat_ismod(pp)) 2730 prot &= ~PROT_WRITE; 2731 } 2732 /* 2733 * If AS_PAGLCK is set in a_flags (via memcntl(2) 2734 * with MC_LOCKAS, MCL_FUTURE) and this is a 2735 * MAP_NORESERVE segment, we may need to 2736 * permanently lock the page as it is being faulted 2737 * for the first time. The following text applies 2738 * only to MAP_NORESERVE segments: 2739 * 2740 * As per memcntl(2), if this segment was created 2741 * after MCL_FUTURE was applied (a "future" 2742 * segment), its pages must be locked. If this 2743 * segment existed at MCL_FUTURE application (a 2744 * "past" segment), the interface is unclear. 2745 * 2746 * We decide to lock only if vpage is present: 2747 * 2748 * - "future" segments will have a vpage array (see 2749 * as_map), and so will be locked as required 2750 * 2751 * - "past" segments may not have a vpage array, 2752 * depending on whether events (such as 2753 * mprotect) have occurred. Locking if vpage 2754 * exists will preserve legacy behavior. Not 2755 * locking if vpage is absent, will not break 2756 * the interface or legacy behavior. Note that 2757 * allocating vpage here if it's absent requires 2758 * upgrading the segvn reader lock, the cost of 2759 * which does not seem worthwhile. 2760 * 2761 * Usually testing and setting VPP_ISPPLOCK and 2762 * VPP_SETPPLOCK requires holding the segvn lock as 2763 * writer, but in this case all readers are 2764 * serializing on the anon array lock. 2765 */ 2766 if (AS_ISPGLCK(seg->s_as) && vpage != NULL && 2767 (svd->flags & MAP_NORESERVE) && 2768 !VPP_ISPPLOCK(vpage)) { 2769 proc_t *p = seg->s_as->a_proc; 2770 ASSERT(svd->type == MAP_PRIVATE); 2771 mutex_enter(&p->p_lock); 2772 if (rctl_incr_locked_mem(p, NULL, PAGESIZE, 2773 1) == 0) { 2774 claim = VPP_PROT(vpage) & PROT_WRITE; 2775 if (page_pp_lock(pp, claim, 0)) { 2776 VPP_SETPPLOCK(vpage); 2777 } else { 2778 rctl_decr_locked_mem(p, NULL, 2779 PAGESIZE, 1); 2780 } 2781 } 2782 mutex_exit(&p->p_lock); 2783 } 2784 2785 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2786 hat_memload(hat, addr, pp, prot, hat_flag); 2787 2788 if (!(hat_flag & HAT_LOAD_LOCK)) 2789 page_unlock(pp); 2790 2791 anon_array_exit(&cookie); 2792 return (0); 2793 } 2794 } 2795 2796 /* 2797 * Obtain the page structure via anon_getpage() if it is 2798 * a private copy of an object (the result of a previous 2799 * copy-on-write). 2800 */ 2801 if (amp != NULL) { 2802 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) { 2803 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE, 2804 seg, addr, rw, svd->cred); 2805 if (err) 2806 goto out; 2807 2808 if (svd->type == MAP_SHARED) { 2809 /* 2810 * If this is a shared mapping to an 2811 * anon_map, then ignore the write 2812 * permissions returned by anon_getpage(). 2813 * They apply to the private mappings 2814 * of this anon_map. 2815 */ 2816 vpprot |= PROT_WRITE; 2817 } 2818 opp = anon_pl[0]; 2819 } 2820 } 2821 2822 /* 2823 * Search the pl[] list passed in if it is from the 2824 * original object (i.e., not a private copy). 2825 */ 2826 if (opp == NULL) { 2827 /* 2828 * Find original page. We must be bringing it in 2829 * from the list in pl[]. 2830 */ 2831 for (ppp = pl; (opp = *ppp) != NULL; ppp++) { 2832 if (opp == PAGE_HANDLED) 2833 continue; 2834 ASSERT(opp->p_vnode == svd->vp); /* XXX */ 2835 if (opp->p_offset == off) 2836 break; 2837 } 2838 if (opp == NULL) { 2839 panic("segvn_faultpage not found"); 2840 /*NOTREACHED*/ 2841 } 2842 *ppp = PAGE_HANDLED; 2843 2844 } 2845 2846 ASSERT(PAGE_LOCKED(opp)); 2847 2848 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2849 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0); 2850 2851 /* 2852 * The fault is treated as a copy-on-write fault if a 2853 * write occurs on a private segment and the object 2854 * page (i.e., mapping) is write protected. We assume 2855 * that fatal protection checks have already been made. 2856 */ 2857 2858 if (brkcow) { 2859 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2860 cow = !(vpprot & PROT_WRITE); 2861 } else if (svd->tr_state == SEGVN_TR_ON) { 2862 /* 2863 * If we are doing text replication COW on first touch. 2864 */ 2865 ASSERT(amp != NULL); 2866 ASSERT(svd->vp != NULL); 2867 ASSERT(rw != S_WRITE); 2868 cow = (ap == NULL); 2869 } else { 2870 cow = 0; 2871 } 2872 2873 /* 2874 * If not a copy-on-write case load the translation 2875 * and return. 2876 */ 2877 if (cow == 0) { 2878 2879 /* 2880 * Handle pages that have been marked for migration 2881 */ 2882 if (lgrp_optimizations()) 2883 page_migrate(seg, addr, &opp, 1); 2884 2885 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) { 2886 if (rw == S_WRITE) 2887 hat_setmod(opp); 2888 else if (rw != S_OTHER && !hat_ismod(opp)) 2889 prot &= ~PROT_WRITE; 2890 } 2891 2892 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 2893 (!svd->pageprot && svd->prot == (prot & vpprot))); 2894 ASSERT(amp == NULL || 2895 svd->rcookie == HAT_INVALID_REGION_COOKIE); 2896 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag, 2897 svd->rcookie); 2898 2899 if (!(hat_flag & HAT_LOAD_LOCK)) 2900 page_unlock(opp); 2901 2902 if (anon_lock) { 2903 anon_array_exit(&cookie); 2904 } 2905 return (0); 2906 } 2907 2908 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2909 2910 hat_setref(opp); 2911 2912 ASSERT(amp != NULL && anon_lock); 2913 2914 /* 2915 * Steal the page only if it isn't a private page 2916 * since stealing a private page is not worth the effort. 2917 */ 2918 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) 2919 steal = 1; 2920 2921 /* 2922 * Steal the original page if the following conditions are true: 2923 * 2924 * We are low on memory, the page is not private, page is not large, 2925 * not shared, not modified, not `locked' or if we have it `locked' 2926 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies 2927 * that the page is not shared) and if it doesn't have any 2928 * translations. page_struct_lock isn't needed to look at p_cowcnt 2929 * and p_lckcnt because we first get exclusive lock on page. 2930 */ 2931 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD); 2932 2933 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 && 2934 page_tryupgrade(opp) && !hat_ismod(opp) && 2935 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) || 2936 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 && 2937 vpage != NULL && VPP_ISPPLOCK(vpage)))) { 2938 /* 2939 * Check if this page has other translations 2940 * after unloading our translation. 2941 */ 2942 if (hat_page_is_mapped(opp)) { 2943 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2944 hat_unload(seg->s_as->a_hat, addr, PAGESIZE, 2945 HAT_UNLOAD); 2946 } 2947 2948 /* 2949 * hat_unload() might sync back someone else's recent 2950 * modification, so check again. 2951 */ 2952 if (!hat_ismod(opp) && !hat_page_is_mapped(opp)) 2953 pageflags |= STEAL_PAGE; 2954 } 2955 2956 /* 2957 * If we have a vpage pointer, see if it indicates that we have 2958 * ``locked'' the page we map -- if so, tell anon_private to 2959 * transfer the locking resource to the new page. 2960 * 2961 * See Statement at the beginning of segvn_lockop regarding 2962 * the way lockcnts/cowcnts are handled during COW. 2963 * 2964 */ 2965 if (vpage != NULL && VPP_ISPPLOCK(vpage)) 2966 pageflags |= LOCK_PAGE; 2967 2968 /* 2969 * Allocate a private page and perform the copy. 2970 * For MAP_NORESERVE reserve swap space now, unless this 2971 * is a cow fault on an existing anon page in which case 2972 * MAP_NORESERVE will have made advance reservations. 2973 */ 2974 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) { 2975 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) { 2976 atomic_add_long(&svd->swresv, ptob(1)); 2977 } else { 2978 page_unlock(opp); 2979 err = ENOMEM; 2980 goto out; 2981 } 2982 } 2983 oldap = ap; 2984 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred); 2985 if (pp == NULL) { 2986 err = ENOMEM; /* out of swap space */ 2987 goto out; 2988 } 2989 2990 /* 2991 * If we copied away from an anonymous page, then 2992 * we are one step closer to freeing up an anon slot. 2993 * 2994 * NOTE: The original anon slot must be released while 2995 * holding the "anon_map" lock. This is necessary to prevent 2996 * other threads from obtaining a pointer to the anon slot 2997 * which may be freed if its "refcnt" is 1. 2998 */ 2999 if (oldap != NULL) 3000 anon_decref(oldap); 3001 3002 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 3003 3004 /* 3005 * Handle pages that have been marked for migration 3006 */ 3007 if (lgrp_optimizations()) 3008 page_migrate(seg, addr, &pp, 1); 3009 3010 ASSERT(pp->p_szc == 0); 3011 3012 ASSERT(!IS_VMODSORT(pp->p_vnode)); 3013 if (enable_mbit_wa) { 3014 if (rw == S_WRITE) 3015 hat_setmod(pp); 3016 else if (!hat_ismod(pp)) 3017 prot &= ~PROT_WRITE; 3018 } 3019 3020 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 3021 hat_memload(hat, addr, pp, prot, hat_flag); 3022 3023 if (!(hat_flag & HAT_LOAD_LOCK)) 3024 page_unlock(pp); 3025 3026 ASSERT(anon_lock); 3027 anon_array_exit(&cookie); 3028 return (0); 3029 out: 3030 if (anon_lock) 3031 anon_array_exit(&cookie); 3032 3033 if (type == F_SOFTLOCK) { 3034 atomic_add_long((ulong_t *)&svd->softlockcnt, -1); 3035 } 3036 return (FC_MAKE_ERR(err)); 3037 } 3038 3039 /* 3040 * relocate a bunch of smaller targ pages into one large repl page. all targ 3041 * pages must be complete pages smaller than replacement pages. 3042 * it's assumed that no page's szc can change since they are all PAGESIZE or 3043 * complete large pages locked SHARED. 3044 */ 3045 static void 3046 segvn_relocate_pages(page_t **targ, page_t *replacement) 3047 { 3048 page_t *pp; 3049 pgcnt_t repl_npgs, curnpgs; 3050 pgcnt_t i; 3051 uint_t repl_szc = replacement->p_szc; 3052 page_t *first_repl = replacement; 3053 page_t *repl; 3054 spgcnt_t npgs; 3055 3056 VM_STAT_ADD(segvnvmstats.relocatepages[0]); 3057 3058 ASSERT(repl_szc != 0); 3059 npgs = repl_npgs = page_get_pagecnt(repl_szc); 3060 3061 i = 0; 3062 while (repl_npgs) { 3063 spgcnt_t nreloc; 3064 int err; 3065 ASSERT(replacement != NULL); 3066 pp = targ[i]; 3067 ASSERT(pp->p_szc < repl_szc); 3068 ASSERT(PAGE_EXCL(pp)); 3069 ASSERT(!PP_ISFREE(pp)); 3070 curnpgs = page_get_pagecnt(pp->p_szc); 3071 if (curnpgs == 1) { 3072 VM_STAT_ADD(segvnvmstats.relocatepages[1]); 3073 repl = replacement; 3074 page_sub(&replacement, repl); 3075 ASSERT(PAGE_EXCL(repl)); 3076 ASSERT(!PP_ISFREE(repl)); 3077 ASSERT(repl->p_szc == repl_szc); 3078 } else { 3079 page_t *repl_savepp; 3080 int j; 3081 VM_STAT_ADD(segvnvmstats.relocatepages[2]); 3082 repl_savepp = replacement; 3083 for (j = 0; j < curnpgs; j++) { 3084 repl = replacement; 3085 page_sub(&replacement, repl); 3086 ASSERT(PAGE_EXCL(repl)); 3087 ASSERT(!PP_ISFREE(repl)); 3088 ASSERT(repl->p_szc == repl_szc); 3089 ASSERT(page_pptonum(targ[i + j]) == 3090 page_pptonum(targ[i]) + j); 3091 } 3092 repl = repl_savepp; 3093 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs)); 3094 } 3095 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL); 3096 if (err || nreloc != curnpgs) { 3097 panic("segvn_relocate_pages: " 3098 "page_relocate failed err=%d curnpgs=%ld " 3099 "nreloc=%ld", err, curnpgs, nreloc); 3100 } 3101 ASSERT(curnpgs <= repl_npgs); 3102 repl_npgs -= curnpgs; 3103 i += curnpgs; 3104 } 3105 ASSERT(replacement == NULL); 3106 3107 repl = first_repl; 3108 repl_npgs = npgs; 3109 for (i = 0; i < repl_npgs; i++) { 3110 ASSERT(PAGE_EXCL(repl)); 3111 ASSERT(!PP_ISFREE(repl)); 3112 targ[i] = repl; 3113 page_downgrade(targ[i]); 3114 repl++; 3115 } 3116 } 3117 3118 /* 3119 * Check if all pages in ppa array are complete smaller than szc pages and 3120 * their roots will still be aligned relative to their current size if the 3121 * entire ppa array is relocated into one szc page. If these conditions are 3122 * not met return 0. 3123 * 3124 * If all pages are properly aligned attempt to upgrade their locks 3125 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0. 3126 * upgrdfail was set to 0 by caller. 3127 * 3128 * Return 1 if all pages are aligned and locked exclusively. 3129 * 3130 * If all pages in ppa array happen to be physically contiguous to make one 3131 * szc page and all exclusive locks are successfully obtained promote the page 3132 * size to szc and set *pszc to szc. Return 1 with pages locked shared. 3133 */ 3134 static int 3135 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc) 3136 { 3137 page_t *pp; 3138 pfn_t pfn; 3139 pgcnt_t totnpgs = page_get_pagecnt(szc); 3140 pfn_t first_pfn; 3141 int contig = 1; 3142 pgcnt_t i; 3143 pgcnt_t j; 3144 uint_t curszc; 3145 pgcnt_t curnpgs; 3146 int root = 0; 3147 3148 ASSERT(szc > 0); 3149 3150 VM_STAT_ADD(segvnvmstats.fullszcpages[0]); 3151 3152 for (i = 0; i < totnpgs; i++) { 3153 pp = ppa[i]; 3154 ASSERT(PAGE_SHARED(pp)); 3155 ASSERT(!PP_ISFREE(pp)); 3156 pfn = page_pptonum(pp); 3157 if (i == 0) { 3158 if (!IS_P2ALIGNED(pfn, totnpgs)) { 3159 contig = 0; 3160 } else { 3161 first_pfn = pfn; 3162 } 3163 } else if (contig && pfn != first_pfn + i) { 3164 contig = 0; 3165 } 3166 if (pp->p_szc == 0) { 3167 if (root) { 3168 VM_STAT_ADD(segvnvmstats.fullszcpages[1]); 3169 return (0); 3170 } 3171 } else if (!root) { 3172 if ((curszc = pp->p_szc) >= szc) { 3173 VM_STAT_ADD(segvnvmstats.fullszcpages[2]); 3174 return (0); 3175 } 3176 if (curszc == 0) { 3177 /* 3178 * p_szc changed means we don't have all pages 3179 * locked. return failure. 3180 */ 3181 VM_STAT_ADD(segvnvmstats.fullszcpages[3]); 3182 return (0); 3183 } 3184 curnpgs = page_get_pagecnt(curszc); 3185 if (!IS_P2ALIGNED(pfn, curnpgs) || 3186 !IS_P2ALIGNED(i, curnpgs)) { 3187 VM_STAT_ADD(segvnvmstats.fullszcpages[4]); 3188 return (0); 3189 } 3190 root = 1; 3191 } else { 3192 ASSERT(i > 0); 3193 VM_STAT_ADD(segvnvmstats.fullszcpages[5]); 3194 if (pp->p_szc != curszc) { 3195 VM_STAT_ADD(segvnvmstats.fullszcpages[6]); 3196 return (0); 3197 } 3198 if (pfn - 1 != page_pptonum(ppa[i - 1])) { 3199 panic("segvn_full_szcpages: " 3200 "large page not physically contiguous"); 3201 } 3202 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) { 3203 root = 0; 3204 } 3205 } 3206 } 3207 3208 for (i = 0; i < totnpgs; i++) { 3209 ASSERT(ppa[i]->p_szc < szc); 3210 if (!page_tryupgrade(ppa[i])) { 3211 for (j = 0; j < i; j++) { 3212 page_downgrade(ppa[j]); 3213 } 3214 *pszc = ppa[i]->p_szc; 3215 *upgrdfail = 1; 3216 VM_STAT_ADD(segvnvmstats.fullszcpages[7]); 3217 return (0); 3218 } 3219 } 3220 3221 /* 3222 * When a page is put a free cachelist its szc is set to 0. if file 3223 * system reclaimed pages from cachelist targ pages will be physically 3224 * contiguous with 0 p_szc. in this case just upgrade szc of targ 3225 * pages without any relocations. 3226 * To avoid any hat issues with previous small mappings 3227 * hat_pageunload() the target pages first. 3228 */ 3229 if (contig) { 3230 VM_STAT_ADD(segvnvmstats.fullszcpages[8]); 3231 for (i = 0; i < totnpgs; i++) { 3232 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD); 3233 } 3234 for (i = 0; i < totnpgs; i++) { 3235 ppa[i]->p_szc = szc; 3236 } 3237 for (i = 0; i < totnpgs; i++) { 3238 ASSERT(PAGE_EXCL(ppa[i])); 3239 page_downgrade(ppa[i]); 3240 } 3241 if (pszc != NULL) { 3242 *pszc = szc; 3243 } 3244 } 3245 VM_STAT_ADD(segvnvmstats.fullszcpages[9]); 3246 return (1); 3247 } 3248 3249 /* 3250 * Create physically contiguous pages for [vp, off] - [vp, off + 3251 * page_size(szc)) range and for private segment return them in ppa array. 3252 * Pages are created either via IO or relocations. 3253 * 3254 * Return 1 on success and 0 on failure. 3255 * 3256 * If physically contiguous pages already exist for this range return 1 without 3257 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa 3258 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE(). 3259 */ 3260 3261 static int 3262 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off, 3263 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc, 3264 int *downsize) 3265 3266 { 3267 page_t *pplist = *ppplist; 3268 size_t pgsz = page_get_pagesize(szc); 3269 pgcnt_t pages = btop(pgsz); 3270 ulong_t start_off = off; 3271 u_offset_t eoff = off + pgsz; 3272 spgcnt_t nreloc; 3273 u_offset_t io_off = off; 3274 size_t io_len; 3275 page_t *io_pplist = NULL; 3276 page_t *done_pplist = NULL; 3277 pgcnt_t pgidx = 0; 3278 page_t *pp; 3279 page_t *newpp; 3280 page_t *targpp; 3281 int io_err = 0; 3282 int i; 3283 pfn_t pfn; 3284 ulong_t ppages; 3285 page_t *targ_pplist = NULL; 3286 page_t *repl_pplist = NULL; 3287 page_t *tmp_pplist; 3288 int nios = 0; 3289 uint_t pszc; 3290 struct vattr va; 3291 3292 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]); 3293 3294 ASSERT(szc != 0); 3295 ASSERT(pplist->p_szc == szc); 3296 3297 /* 3298 * downsize will be set to 1 only if we fail to lock pages. this will 3299 * allow subsequent faults to try to relocate the page again. If we 3300 * fail due to misalignment don't downsize and let the caller map the 3301 * whole region with small mappings to avoid more faults into the area 3302 * where we can't get large pages anyway. 3303 */ 3304 *downsize = 0; 3305 3306 while (off < eoff) { 3307 newpp = pplist; 3308 ASSERT(newpp != NULL); 3309 ASSERT(PAGE_EXCL(newpp)); 3310 ASSERT(!PP_ISFREE(newpp)); 3311 /* 3312 * we pass NULL for nrelocp to page_lookup_create() 3313 * so that it doesn't relocate. We relocate here 3314 * later only after we make sure we can lock all 3315 * pages in the range we handle and they are all 3316 * aligned. 3317 */ 3318 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0); 3319 ASSERT(pp != NULL); 3320 ASSERT(!PP_ISFREE(pp)); 3321 ASSERT(pp->p_vnode == vp); 3322 ASSERT(pp->p_offset == off); 3323 if (pp == newpp) { 3324 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]); 3325 page_sub(&pplist, pp); 3326 ASSERT(PAGE_EXCL(pp)); 3327 ASSERT(page_iolock_assert(pp)); 3328 page_list_concat(&io_pplist, &pp); 3329 off += PAGESIZE; 3330 continue; 3331 } 3332 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]); 3333 pfn = page_pptonum(pp); 3334 pszc = pp->p_szc; 3335 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL && 3336 IS_P2ALIGNED(pfn, pages)) { 3337 ASSERT(repl_pplist == NULL); 3338 ASSERT(done_pplist == NULL); 3339 ASSERT(pplist == *ppplist); 3340 page_unlock(pp); 3341 page_free_replacement_page(pplist); 3342 page_create_putback(pages); 3343 *ppplist = NULL; 3344 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]); 3345 return (1); 3346 } 3347 if (pszc >= szc) { 3348 page_unlock(pp); 3349 segvn_faultvnmpss_align_err1++; 3350 goto out; 3351 } 3352 ppages = page_get_pagecnt(pszc); 3353 if (!IS_P2ALIGNED(pfn, ppages)) { 3354 ASSERT(pszc > 0); 3355 /* 3356 * sizing down to pszc won't help. 3357 */ 3358 page_unlock(pp); 3359 segvn_faultvnmpss_align_err2++; 3360 goto out; 3361 } 3362 pfn = page_pptonum(newpp); 3363 if (!IS_P2ALIGNED(pfn, ppages)) { 3364 ASSERT(pszc > 0); 3365 /* 3366 * sizing down to pszc won't help. 3367 */ 3368 page_unlock(pp); 3369 segvn_faultvnmpss_align_err3++; 3370 goto out; 3371 } 3372 if (!PAGE_EXCL(pp)) { 3373 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]); 3374 page_unlock(pp); 3375 *downsize = 1; 3376 *ret_pszc = pp->p_szc; 3377 goto out; 3378 } 3379 targpp = pp; 3380 if (io_pplist != NULL) { 3381 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]); 3382 io_len = off - io_off; 3383 /* 3384 * Some file systems like NFS don't check EOF 3385 * conditions in VOP_PAGEIO(). Check it here 3386 * now that pages are locked SE_EXCL. Any file 3387 * truncation will wait until the pages are 3388 * unlocked so no need to worry that file will 3389 * be truncated after we check its size here. 3390 * XXX fix NFS to remove this check. 3391 */ 3392 va.va_mask = AT_SIZE; 3393 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) { 3394 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]); 3395 page_unlock(targpp); 3396 goto out; 3397 } 3398 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3399 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]); 3400 *downsize = 1; 3401 *ret_pszc = 0; 3402 page_unlock(targpp); 3403 goto out; 3404 } 3405 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3406 B_READ, svd->cred, NULL); 3407 if (io_err) { 3408 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]); 3409 page_unlock(targpp); 3410 if (io_err == EDEADLK) { 3411 segvn_vmpss_pageio_deadlk_err++; 3412 } 3413 goto out; 3414 } 3415 nios++; 3416 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]); 3417 while (io_pplist != NULL) { 3418 pp = io_pplist; 3419 page_sub(&io_pplist, pp); 3420 ASSERT(page_iolock_assert(pp)); 3421 page_io_unlock(pp); 3422 pgidx = (pp->p_offset - start_off) >> 3423 PAGESHIFT; 3424 ASSERT(pgidx < pages); 3425 ppa[pgidx] = pp; 3426 page_list_concat(&done_pplist, &pp); 3427 } 3428 } 3429 pp = targpp; 3430 ASSERT(PAGE_EXCL(pp)); 3431 ASSERT(pp->p_szc <= pszc); 3432 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) { 3433 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]); 3434 page_unlock(pp); 3435 *downsize = 1; 3436 *ret_pszc = pp->p_szc; 3437 goto out; 3438 } 3439 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]); 3440 /* 3441 * page szc chould have changed before the entire group was 3442 * locked. reread page szc. 3443 */ 3444 pszc = pp->p_szc; 3445 ppages = page_get_pagecnt(pszc); 3446 3447 /* link just the roots */ 3448 page_list_concat(&targ_pplist, &pp); 3449 page_sub(&pplist, newpp); 3450 page_list_concat(&repl_pplist, &newpp); 3451 off += PAGESIZE; 3452 while (--ppages != 0) { 3453 newpp = pplist; 3454 page_sub(&pplist, newpp); 3455 off += PAGESIZE; 3456 } 3457 io_off = off; 3458 } 3459 if (io_pplist != NULL) { 3460 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]); 3461 io_len = eoff - io_off; 3462 va.va_mask = AT_SIZE; 3463 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) { 3464 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]); 3465 goto out; 3466 } 3467 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3468 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]); 3469 *downsize = 1; 3470 *ret_pszc = 0; 3471 goto out; 3472 } 3473 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3474 B_READ, svd->cred, NULL); 3475 if (io_err) { 3476 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]); 3477 if (io_err == EDEADLK) { 3478 segvn_vmpss_pageio_deadlk_err++; 3479 } 3480 goto out; 3481 } 3482 nios++; 3483 while (io_pplist != NULL) { 3484 pp = io_pplist; 3485 page_sub(&io_pplist, pp); 3486 ASSERT(page_iolock_assert(pp)); 3487 page_io_unlock(pp); 3488 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3489 ASSERT(pgidx < pages); 3490 ppa[pgidx] = pp; 3491 } 3492 } 3493 /* 3494 * we're now bound to succeed or panic. 3495 * remove pages from done_pplist. it's not needed anymore. 3496 */ 3497 while (done_pplist != NULL) { 3498 pp = done_pplist; 3499 page_sub(&done_pplist, pp); 3500 } 3501 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]); 3502 ASSERT(pplist == NULL); 3503 *ppplist = NULL; 3504 while (targ_pplist != NULL) { 3505 int ret; 3506 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]); 3507 ASSERT(repl_pplist); 3508 pp = targ_pplist; 3509 page_sub(&targ_pplist, pp); 3510 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3511 newpp = repl_pplist; 3512 page_sub(&repl_pplist, newpp); 3513 #ifdef DEBUG 3514 pfn = page_pptonum(pp); 3515 pszc = pp->p_szc; 3516 ppages = page_get_pagecnt(pszc); 3517 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3518 pfn = page_pptonum(newpp); 3519 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3520 ASSERT(P2PHASE(pfn, pages) == pgidx); 3521 #endif 3522 nreloc = 0; 3523 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL); 3524 if (ret != 0 || nreloc == 0) { 3525 panic("segvn_fill_vp_pages: " 3526 "page_relocate failed"); 3527 } 3528 pp = newpp; 3529 while (nreloc-- != 0) { 3530 ASSERT(PAGE_EXCL(pp)); 3531 ASSERT(pp->p_vnode == vp); 3532 ASSERT(pgidx == 3533 ((pp->p_offset - start_off) >> PAGESHIFT)); 3534 ppa[pgidx++] = pp; 3535 pp++; 3536 } 3537 } 3538 3539 if (svd->type == MAP_PRIVATE) { 3540 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]); 3541 for (i = 0; i < pages; i++) { 3542 ASSERT(ppa[i] != NULL); 3543 ASSERT(PAGE_EXCL(ppa[i])); 3544 ASSERT(ppa[i]->p_vnode == vp); 3545 ASSERT(ppa[i]->p_offset == 3546 start_off + (i << PAGESHIFT)); 3547 page_downgrade(ppa[i]); 3548 } 3549 ppa[pages] = NULL; 3550 } else { 3551 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]); 3552 /* 3553 * the caller will still call VOP_GETPAGE() for shared segments 3554 * to check FS write permissions. For private segments we map 3555 * file read only anyway. so no VOP_GETPAGE is needed. 3556 */ 3557 for (i = 0; i < pages; i++) { 3558 ASSERT(ppa[i] != NULL); 3559 ASSERT(PAGE_EXCL(ppa[i])); 3560 ASSERT(ppa[i]->p_vnode == vp); 3561 ASSERT(ppa[i]->p_offset == 3562 start_off + (i << PAGESHIFT)); 3563 page_unlock(ppa[i]); 3564 } 3565 ppa[0] = NULL; 3566 } 3567 3568 return (1); 3569 out: 3570 /* 3571 * Do the cleanup. Unlock target pages we didn't relocate. They are 3572 * linked on targ_pplist by root pages. reassemble unused replacement 3573 * and io pages back to pplist. 3574 */ 3575 if (io_pplist != NULL) { 3576 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]); 3577 pp = io_pplist; 3578 do { 3579 ASSERT(pp->p_vnode == vp); 3580 ASSERT(pp->p_offset == io_off); 3581 ASSERT(page_iolock_assert(pp)); 3582 page_io_unlock(pp); 3583 page_hashout(pp, NULL); 3584 io_off += PAGESIZE; 3585 } while ((pp = pp->p_next) != io_pplist); 3586 page_list_concat(&io_pplist, &pplist); 3587 pplist = io_pplist; 3588 } 3589 tmp_pplist = NULL; 3590 while (targ_pplist != NULL) { 3591 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]); 3592 pp = targ_pplist; 3593 ASSERT(PAGE_EXCL(pp)); 3594 page_sub(&targ_pplist, pp); 3595 3596 pszc = pp->p_szc; 3597 ppages = page_get_pagecnt(pszc); 3598 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3599 3600 if (pszc != 0) { 3601 group_page_unlock(pp); 3602 } 3603 page_unlock(pp); 3604 3605 pp = repl_pplist; 3606 ASSERT(pp != NULL); 3607 ASSERT(PAGE_EXCL(pp)); 3608 ASSERT(pp->p_szc == szc); 3609 page_sub(&repl_pplist, pp); 3610 3611 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3612 3613 /* relink replacement page */ 3614 page_list_concat(&tmp_pplist, &pp); 3615 while (--ppages != 0) { 3616 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]); 3617 pp++; 3618 ASSERT(PAGE_EXCL(pp)); 3619 ASSERT(pp->p_szc == szc); 3620 page_list_concat(&tmp_pplist, &pp); 3621 } 3622 } 3623 if (tmp_pplist != NULL) { 3624 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]); 3625 page_list_concat(&tmp_pplist, &pplist); 3626 pplist = tmp_pplist; 3627 } 3628 /* 3629 * at this point all pages are either on done_pplist or 3630 * pplist. They can't be all on done_pplist otherwise 3631 * we'd've been done. 3632 */ 3633 ASSERT(pplist != NULL); 3634 if (nios != 0) { 3635 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]); 3636 pp = pplist; 3637 do { 3638 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]); 3639 ASSERT(pp->p_szc == szc); 3640 ASSERT(PAGE_EXCL(pp)); 3641 ASSERT(pp->p_vnode != vp); 3642 pp->p_szc = 0; 3643 } while ((pp = pp->p_next) != pplist); 3644 3645 pp = done_pplist; 3646 do { 3647 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]); 3648 ASSERT(pp->p_szc == szc); 3649 ASSERT(PAGE_EXCL(pp)); 3650 ASSERT(pp->p_vnode == vp); 3651 pp->p_szc = 0; 3652 } while ((pp = pp->p_next) != done_pplist); 3653 3654 while (pplist != NULL) { 3655 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]); 3656 pp = pplist; 3657 page_sub(&pplist, pp); 3658 page_free(pp, 0); 3659 } 3660 3661 while (done_pplist != NULL) { 3662 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]); 3663 pp = done_pplist; 3664 page_sub(&done_pplist, pp); 3665 page_unlock(pp); 3666 } 3667 *ppplist = NULL; 3668 return (0); 3669 } 3670 ASSERT(pplist == *ppplist); 3671 if (io_err) { 3672 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]); 3673 /* 3674 * don't downsize on io error. 3675 * see if vop_getpage succeeds. 3676 * pplist may still be used in this case 3677 * for relocations. 3678 */ 3679 return (0); 3680 } 3681 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]); 3682 page_free_replacement_page(pplist); 3683 page_create_putback(pages); 3684 *ppplist = NULL; 3685 return (0); 3686 } 3687 3688 int segvn_anypgsz = 0; 3689 3690 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \ 3691 if ((type) == F_SOFTLOCK) { \ 3692 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \ 3693 -(pages)); \ 3694 } 3695 3696 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \ 3697 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \ 3698 if ((rw) == S_WRITE) { \ 3699 for (i = 0; i < (pages); i++) { \ 3700 ASSERT((ppa)[i]->p_vnode == \ 3701 (ppa)[0]->p_vnode); \ 3702 hat_setmod((ppa)[i]); \ 3703 } \ 3704 } else if ((rw) != S_OTHER && \ 3705 ((prot) & (vpprot) & PROT_WRITE)) { \ 3706 for (i = 0; i < (pages); i++) { \ 3707 ASSERT((ppa)[i]->p_vnode == \ 3708 (ppa)[0]->p_vnode); \ 3709 if (!hat_ismod((ppa)[i])) { \ 3710 prot &= ~PROT_WRITE; \ 3711 break; \ 3712 } \ 3713 } \ 3714 } \ 3715 } 3716 3717 #ifdef VM_STATS 3718 3719 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \ 3720 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]); 3721 3722 #else /* VM_STATS */ 3723 3724 #define SEGVN_VMSTAT_FLTVNPAGES(idx) 3725 3726 #endif 3727 3728 static faultcode_t 3729 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 3730 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 3731 caddr_t eaddr, int brkcow) 3732 { 3733 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 3734 struct anon_map *amp = svd->amp; 3735 uchar_t segtype = svd->type; 3736 uint_t szc = seg->s_szc; 3737 size_t pgsz = page_get_pagesize(szc); 3738 size_t maxpgsz = pgsz; 3739 pgcnt_t pages = btop(pgsz); 3740 pgcnt_t maxpages = pages; 3741 size_t ppasize = (pages + 1) * sizeof (page_t *); 3742 caddr_t a = lpgaddr; 3743 caddr_t maxlpgeaddr = lpgeaddr; 3744 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base); 3745 ulong_t aindx = svd->anon_index + seg_page(seg, a); 3746 struct vpage *vpage = (svd->vpage != NULL) ? 3747 &svd->vpage[seg_page(seg, a)] : NULL; 3748 vnode_t *vp = svd->vp; 3749 page_t **ppa; 3750 uint_t pszc; 3751 size_t ppgsz; 3752 pgcnt_t ppages; 3753 faultcode_t err = 0; 3754 int ierr; 3755 int vop_size_err = 0; 3756 uint_t protchk, prot, vpprot; 3757 ulong_t i; 3758 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 3759 anon_sync_obj_t an_cookie; 3760 enum seg_rw arw; 3761 int alloc_failed = 0; 3762 int adjszc_chk; 3763 struct vattr va; 3764 int xhat = 0; 3765 page_t *pplist; 3766 pfn_t pfn; 3767 int physcontig; 3768 int upgrdfail; 3769 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */ 3770 int tron = (svd->tr_state == SEGVN_TR_ON); 3771 3772 ASSERT(szc != 0); 3773 ASSERT(vp != NULL); 3774 ASSERT(brkcow == 0 || amp != NULL); 3775 ASSERT(tron == 0 || amp != NULL); 3776 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 3777 ASSERT(!(svd->flags & MAP_NORESERVE)); 3778 ASSERT(type != F_SOFTUNLOCK); 3779 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 3780 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages)); 3781 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 3782 ASSERT(seg->s_szc < NBBY * sizeof (int)); 3783 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz); 3784 ASSERT(svd->tr_state != SEGVN_TR_INIT); 3785 3786 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]); 3787 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]); 3788 3789 if (svd->flags & MAP_TEXT) { 3790 hat_flag |= HAT_LOAD_TEXT; 3791 } 3792 3793 if (svd->pageprot) { 3794 switch (rw) { 3795 case S_READ: 3796 protchk = PROT_READ; 3797 break; 3798 case S_WRITE: 3799 protchk = PROT_WRITE; 3800 break; 3801 case S_EXEC: 3802 protchk = PROT_EXEC; 3803 break; 3804 case S_OTHER: 3805 default: 3806 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 3807 break; 3808 } 3809 } else { 3810 prot = svd->prot; 3811 /* caller has already done segment level protection check. */ 3812 } 3813 3814 if (seg->s_as->a_hat != hat) { 3815 xhat = 1; 3816 } 3817 3818 if (rw == S_WRITE && segtype == MAP_PRIVATE) { 3819 SEGVN_VMSTAT_FLTVNPAGES(2); 3820 arw = S_READ; 3821 } else { 3822 arw = rw; 3823 } 3824 3825 ppa = kmem_alloc(ppasize, KM_SLEEP); 3826 3827 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]); 3828 3829 for (;;) { 3830 adjszc_chk = 0; 3831 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) { 3832 if (adjszc_chk) { 3833 while (szc < seg->s_szc) { 3834 uintptr_t e; 3835 uint_t tszc; 3836 tszc = segvn_anypgsz_vnode ? szc + 1 : 3837 seg->s_szc; 3838 ppgsz = page_get_pagesize(tszc); 3839 if (!IS_P2ALIGNED(a, ppgsz) || 3840 ((alloc_failed >> tszc) & 0x1)) { 3841 break; 3842 } 3843 SEGVN_VMSTAT_FLTVNPAGES(4); 3844 szc = tszc; 3845 pgsz = ppgsz; 3846 pages = btop(pgsz); 3847 e = P2ROUNDUP((uintptr_t)eaddr, pgsz); 3848 lpgeaddr = (caddr_t)e; 3849 } 3850 } 3851 3852 again: 3853 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) { 3854 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 3855 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 3856 anon_array_enter(amp, aindx, &an_cookie); 3857 if (anon_get_ptr(amp->ahp, aindx) != NULL) { 3858 SEGVN_VMSTAT_FLTVNPAGES(5); 3859 ASSERT(anon_pages(amp->ahp, aindx, 3860 maxpages) == maxpages); 3861 anon_array_exit(&an_cookie); 3862 ANON_LOCK_EXIT(&->a_rwlock); 3863 err = segvn_fault_anonpages(hat, seg, 3864 a, a + maxpgsz, type, rw, 3865 MAX(a, addr), 3866 MIN(a + maxpgsz, eaddr), brkcow); 3867 if (err != 0) { 3868 SEGVN_VMSTAT_FLTVNPAGES(6); 3869 goto out; 3870 } 3871 if (szc < seg->s_szc) { 3872 szc = seg->s_szc; 3873 pgsz = maxpgsz; 3874 pages = maxpages; 3875 lpgeaddr = maxlpgeaddr; 3876 } 3877 goto next; 3878 } else { 3879 ASSERT(anon_pages(amp->ahp, aindx, 3880 maxpages) == 0); 3881 SEGVN_VMSTAT_FLTVNPAGES(7); 3882 anon_array_exit(&an_cookie); 3883 ANON_LOCK_EXIT(&->a_rwlock); 3884 } 3885 } 3886 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz)); 3887 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz)); 3888 3889 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 3890 ASSERT(vpage != NULL); 3891 prot = VPP_PROT(vpage); 3892 ASSERT(sameprot(seg, a, maxpgsz)); 3893 if ((prot & protchk) == 0) { 3894 SEGVN_VMSTAT_FLTVNPAGES(8); 3895 err = FC_PROT; 3896 goto out; 3897 } 3898 } 3899 if (type == F_SOFTLOCK) { 3900 atomic_add_long((ulong_t *)&svd->softlockcnt, 3901 pages); 3902 } 3903 3904 pplist = NULL; 3905 physcontig = 0; 3906 ppa[0] = NULL; 3907 if (!brkcow && !tron && szc && 3908 !page_exists_physcontig(vp, off, szc, 3909 segtype == MAP_PRIVATE ? ppa : NULL)) { 3910 SEGVN_VMSTAT_FLTVNPAGES(9); 3911 if (page_alloc_pages(vp, seg, a, &pplist, NULL, 3912 szc, 0, 0) && type != F_SOFTLOCK) { 3913 SEGVN_VMSTAT_FLTVNPAGES(10); 3914 pszc = 0; 3915 ierr = -1; 3916 alloc_failed |= (1 << szc); 3917 break; 3918 } 3919 if (pplist != NULL && 3920 vp->v_mpssdata == SEGVN_PAGEIO) { 3921 int downsize; 3922 SEGVN_VMSTAT_FLTVNPAGES(11); 3923 physcontig = segvn_fill_vp_pages(svd, 3924 vp, off, szc, ppa, &pplist, 3925 &pszc, &downsize); 3926 ASSERT(!physcontig || pplist == NULL); 3927 if (!physcontig && downsize && 3928 type != F_SOFTLOCK) { 3929 ASSERT(pplist == NULL); 3930 SEGVN_VMSTAT_FLTVNPAGES(12); 3931 ierr = -1; 3932 break; 3933 } 3934 ASSERT(!physcontig || 3935 segtype == MAP_PRIVATE || 3936 ppa[0] == NULL); 3937 if (physcontig && ppa[0] == NULL) { 3938 physcontig = 0; 3939 } 3940 } 3941 } else if (!brkcow && !tron && szc && ppa[0] != NULL) { 3942 SEGVN_VMSTAT_FLTVNPAGES(13); 3943 ASSERT(segtype == MAP_PRIVATE); 3944 physcontig = 1; 3945 } 3946 3947 if (!physcontig) { 3948 SEGVN_VMSTAT_FLTVNPAGES(14); 3949 ppa[0] = NULL; 3950 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz, 3951 &vpprot, ppa, pgsz, seg, a, arw, 3952 svd->cred, NULL); 3953 #ifdef DEBUG 3954 if (ierr == 0) { 3955 for (i = 0; i < pages; i++) { 3956 ASSERT(PAGE_LOCKED(ppa[i])); 3957 ASSERT(!PP_ISFREE(ppa[i])); 3958 ASSERT(ppa[i]->p_vnode == vp); 3959 ASSERT(ppa[i]->p_offset == 3960 off + (i << PAGESHIFT)); 3961 } 3962 } 3963 #endif /* DEBUG */ 3964 if (segtype == MAP_PRIVATE) { 3965 SEGVN_VMSTAT_FLTVNPAGES(15); 3966 vpprot &= ~PROT_WRITE; 3967 } 3968 } else { 3969 ASSERT(segtype == MAP_PRIVATE); 3970 SEGVN_VMSTAT_FLTVNPAGES(16); 3971 vpprot = PROT_ALL & ~PROT_WRITE; 3972 ierr = 0; 3973 } 3974 3975 if (ierr != 0) { 3976 SEGVN_VMSTAT_FLTVNPAGES(17); 3977 if (pplist != NULL) { 3978 SEGVN_VMSTAT_FLTVNPAGES(18); 3979 page_free_replacement_page(pplist); 3980 page_create_putback(pages); 3981 } 3982 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 3983 if (a + pgsz <= eaddr) { 3984 SEGVN_VMSTAT_FLTVNPAGES(19); 3985 err = FC_MAKE_ERR(ierr); 3986 goto out; 3987 } 3988 va.va_mask = AT_SIZE; 3989 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) { 3990 SEGVN_VMSTAT_FLTVNPAGES(20); 3991 err = FC_MAKE_ERR(EIO); 3992 goto out; 3993 } 3994 if (btopr(va.va_size) >= btopr(off + pgsz)) { 3995 SEGVN_VMSTAT_FLTVNPAGES(21); 3996 err = FC_MAKE_ERR(ierr); 3997 goto out; 3998 } 3999 if (btopr(va.va_size) < 4000 btopr(off + (eaddr - a))) { 4001 SEGVN_VMSTAT_FLTVNPAGES(22); 4002 err = FC_MAKE_ERR(ierr); 4003 goto out; 4004 } 4005 if (brkcow || tron || type == F_SOFTLOCK) { 4006 /* can't reduce map area */ 4007 SEGVN_VMSTAT_FLTVNPAGES(23); 4008 vop_size_err = 1; 4009 goto out; 4010 } 4011 SEGVN_VMSTAT_FLTVNPAGES(24); 4012 ASSERT(szc != 0); 4013 pszc = 0; 4014 ierr = -1; 4015 break; 4016 } 4017 4018 if (amp != NULL) { 4019 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4020 anon_array_enter(amp, aindx, &an_cookie); 4021 } 4022 if (amp != NULL && 4023 anon_get_ptr(amp->ahp, aindx) != NULL) { 4024 ulong_t taindx = P2ALIGN(aindx, maxpages); 4025 4026 SEGVN_VMSTAT_FLTVNPAGES(25); 4027 ASSERT(anon_pages(amp->ahp, taindx, 4028 maxpages) == maxpages); 4029 for (i = 0; i < pages; i++) { 4030 page_unlock(ppa[i]); 4031 } 4032 anon_array_exit(&an_cookie); 4033 ANON_LOCK_EXIT(&->a_rwlock); 4034 if (pplist != NULL) { 4035 page_free_replacement_page(pplist); 4036 page_create_putback(pages); 4037 } 4038 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4039 if (szc < seg->s_szc) { 4040 SEGVN_VMSTAT_FLTVNPAGES(26); 4041 /* 4042 * For private segments SOFTLOCK 4043 * either always breaks cow (any rw 4044 * type except S_READ_NOCOW) or 4045 * address space is locked as writer 4046 * (S_READ_NOCOW case) and anon slots 4047 * can't show up on second check. 4048 * Therefore if we are here for 4049 * SOFTLOCK case it must be a cow 4050 * break but cow break never reduces 4051 * szc. text replication (tron) in 4052 * this case works as cow break. 4053 * Thus the assert below. 4054 */ 4055 ASSERT(!brkcow && !tron && 4056 type != F_SOFTLOCK); 4057 pszc = seg->s_szc; 4058 ierr = -2; 4059 break; 4060 } 4061 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4062 goto again; 4063 } 4064 #ifdef DEBUG 4065 if (amp != NULL) { 4066 ulong_t taindx = P2ALIGN(aindx, maxpages); 4067 ASSERT(!anon_pages(amp->ahp, taindx, maxpages)); 4068 } 4069 #endif /* DEBUG */ 4070 4071 if (brkcow || tron) { 4072 ASSERT(amp != NULL); 4073 ASSERT(pplist == NULL); 4074 ASSERT(szc == seg->s_szc); 4075 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4076 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 4077 SEGVN_VMSTAT_FLTVNPAGES(27); 4078 ierr = anon_map_privatepages(amp, aindx, szc, 4079 seg, a, prot, ppa, vpage, segvn_anypgsz, 4080 tron ? PG_LOCAL : 0, svd->cred); 4081 if (ierr != 0) { 4082 SEGVN_VMSTAT_FLTVNPAGES(28); 4083 anon_array_exit(&an_cookie); 4084 ANON_LOCK_EXIT(&->a_rwlock); 4085 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4086 err = FC_MAKE_ERR(ierr); 4087 goto out; 4088 } 4089 4090 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4091 /* 4092 * p_szc can't be changed for locked 4093 * swapfs pages. 4094 */ 4095 ASSERT(svd->rcookie == 4096 HAT_INVALID_REGION_COOKIE); 4097 hat_memload_array(hat, a, pgsz, ppa, prot, 4098 hat_flag); 4099 4100 if (!(hat_flag & HAT_LOAD_LOCK)) { 4101 SEGVN_VMSTAT_FLTVNPAGES(29); 4102 for (i = 0; i < pages; i++) { 4103 page_unlock(ppa[i]); 4104 } 4105 } 4106 anon_array_exit(&an_cookie); 4107 ANON_LOCK_EXIT(&->a_rwlock); 4108 goto next; 4109 } 4110 4111 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 4112 (!svd->pageprot && svd->prot == (prot & vpprot))); 4113 4114 pfn = page_pptonum(ppa[0]); 4115 /* 4116 * hat_page_demote() needs an SE_EXCL lock on one of 4117 * constituent page_t's and it decreases root's p_szc 4118 * last. This means if root's p_szc is equal szc and 4119 * all its constituent pages are locked 4120 * hat_page_demote() that could have changed p_szc to 4121 * szc is already done and no new have page_demote() 4122 * can start for this large page. 4123 */ 4124 4125 /* 4126 * we need to make sure same mapping size is used for 4127 * the same address range if there's a possibility the 4128 * adddress is already mapped because hat layer panics 4129 * when translation is loaded for the range already 4130 * mapped with a different page size. We achieve it 4131 * by always using largest page size possible subject 4132 * to the constraints of page size, segment page size 4133 * and page alignment. Since mappings are invalidated 4134 * when those constraints change and make it 4135 * impossible to use previously used mapping size no 4136 * mapping size conflicts should happen. 4137 */ 4138 4139 chkszc: 4140 if ((pszc = ppa[0]->p_szc) == szc && 4141 IS_P2ALIGNED(pfn, pages)) { 4142 4143 SEGVN_VMSTAT_FLTVNPAGES(30); 4144 #ifdef DEBUG 4145 for (i = 0; i < pages; i++) { 4146 ASSERT(PAGE_LOCKED(ppa[i])); 4147 ASSERT(!PP_ISFREE(ppa[i])); 4148 ASSERT(page_pptonum(ppa[i]) == 4149 pfn + i); 4150 ASSERT(ppa[i]->p_szc == szc); 4151 ASSERT(ppa[i]->p_vnode == vp); 4152 ASSERT(ppa[i]->p_offset == 4153 off + (i << PAGESHIFT)); 4154 } 4155 #endif /* DEBUG */ 4156 /* 4157 * All pages are of szc we need and they are 4158 * all locked so they can't change szc. load 4159 * translations. 4160 * 4161 * if page got promoted since last check 4162 * we don't need pplist. 4163 */ 4164 if (pplist != NULL) { 4165 page_free_replacement_page(pplist); 4166 page_create_putback(pages); 4167 } 4168 if (PP_ISMIGRATE(ppa[0])) { 4169 page_migrate(seg, a, ppa, pages); 4170 } 4171 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4172 prot, vpprot); 4173 if (!xhat) { 4174 hat_memload_array_region(hat, a, pgsz, 4175 ppa, prot & vpprot, hat_flag, 4176 svd->rcookie); 4177 } else { 4178 /* 4179 * avoid large xhat mappings to FS 4180 * pages so that hat_page_demote() 4181 * doesn't need to check for xhat 4182 * large mappings. 4183 * Don't use regions with xhats. 4184 */ 4185 for (i = 0; i < pages; i++) { 4186 hat_memload(hat, 4187 a + (i << PAGESHIFT), 4188 ppa[i], prot & vpprot, 4189 hat_flag); 4190 } 4191 } 4192 4193 if (!(hat_flag & HAT_LOAD_LOCK)) { 4194 for (i = 0; i < pages; i++) { 4195 page_unlock(ppa[i]); 4196 } 4197 } 4198 if (amp != NULL) { 4199 anon_array_exit(&an_cookie); 4200 ANON_LOCK_EXIT(&->a_rwlock); 4201 } 4202 goto next; 4203 } 4204 4205 /* 4206 * See if upsize is possible. 4207 */ 4208 if (pszc > szc && szc < seg->s_szc && 4209 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) { 4210 pgcnt_t aphase; 4211 uint_t pszc1 = MIN(pszc, seg->s_szc); 4212 ppgsz = page_get_pagesize(pszc1); 4213 ppages = btop(ppgsz); 4214 aphase = btop(P2PHASE((uintptr_t)a, ppgsz)); 4215 4216 ASSERT(type != F_SOFTLOCK); 4217 4218 SEGVN_VMSTAT_FLTVNPAGES(31); 4219 if (aphase != P2PHASE(pfn, ppages)) { 4220 segvn_faultvnmpss_align_err4++; 4221 } else { 4222 SEGVN_VMSTAT_FLTVNPAGES(32); 4223 if (pplist != NULL) { 4224 page_t *pl = pplist; 4225 page_free_replacement_page(pl); 4226 page_create_putback(pages); 4227 } 4228 for (i = 0; i < pages; i++) { 4229 page_unlock(ppa[i]); 4230 } 4231 if (amp != NULL) { 4232 anon_array_exit(&an_cookie); 4233 ANON_LOCK_EXIT(&->a_rwlock); 4234 } 4235 pszc = pszc1; 4236 ierr = -2; 4237 break; 4238 } 4239 } 4240 4241 /* 4242 * check if we should use smallest mapping size. 4243 */ 4244 upgrdfail = 0; 4245 if (szc == 0 || xhat || 4246 (pszc >= szc && 4247 !IS_P2ALIGNED(pfn, pages)) || 4248 (pszc < szc && 4249 !segvn_full_szcpages(ppa, szc, &upgrdfail, 4250 &pszc))) { 4251 4252 if (upgrdfail && type != F_SOFTLOCK) { 4253 /* 4254 * segvn_full_szcpages failed to lock 4255 * all pages EXCL. Size down. 4256 */ 4257 ASSERT(pszc < szc); 4258 4259 SEGVN_VMSTAT_FLTVNPAGES(33); 4260 4261 if (pplist != NULL) { 4262 page_t *pl = pplist; 4263 page_free_replacement_page(pl); 4264 page_create_putback(pages); 4265 } 4266 4267 for (i = 0; i < pages; i++) { 4268 page_unlock(ppa[i]); 4269 } 4270 if (amp != NULL) { 4271 anon_array_exit(&an_cookie); 4272 ANON_LOCK_EXIT(&->a_rwlock); 4273 } 4274 ierr = -1; 4275 break; 4276 } 4277 if (szc != 0 && !xhat && !upgrdfail) { 4278 segvn_faultvnmpss_align_err5++; 4279 } 4280 SEGVN_VMSTAT_FLTVNPAGES(34); 4281 if (pplist != NULL) { 4282 page_free_replacement_page(pplist); 4283 page_create_putback(pages); 4284 } 4285 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4286 prot, vpprot); 4287 if (upgrdfail && segvn_anypgsz_vnode) { 4288 /* SOFTLOCK case */ 4289 hat_memload_array_region(hat, a, pgsz, 4290 ppa, prot & vpprot, hat_flag, 4291 svd->rcookie); 4292 } else { 4293 for (i = 0; i < pages; i++) { 4294 hat_memload_region(hat, 4295 a + (i << PAGESHIFT), 4296 ppa[i], prot & vpprot, 4297 hat_flag, svd->rcookie); 4298 } 4299 } 4300 if (!(hat_flag & HAT_LOAD_LOCK)) { 4301 for (i = 0; i < pages; i++) { 4302 page_unlock(ppa[i]); 4303 } 4304 } 4305 if (amp != NULL) { 4306 anon_array_exit(&an_cookie); 4307 ANON_LOCK_EXIT(&->a_rwlock); 4308 } 4309 goto next; 4310 } 4311 4312 if (pszc == szc) { 4313 /* 4314 * segvn_full_szcpages() upgraded pages szc. 4315 */ 4316 ASSERT(pszc == ppa[0]->p_szc); 4317 ASSERT(IS_P2ALIGNED(pfn, pages)); 4318 goto chkszc; 4319 } 4320 4321 if (pszc > szc) { 4322 kmutex_t *szcmtx; 4323 SEGVN_VMSTAT_FLTVNPAGES(35); 4324 /* 4325 * p_szc of ppa[0] can change since we haven't 4326 * locked all constituent pages. Call 4327 * page_lock_szc() to prevent szc changes. 4328 * This should be a rare case that happens when 4329 * multiple segments use a different page size 4330 * to map the same file offsets. 4331 */ 4332 szcmtx = page_szc_lock(ppa[0]); 4333 pszc = ppa[0]->p_szc; 4334 ASSERT(szcmtx != NULL || pszc == 0); 4335 ASSERT(ppa[0]->p_szc <= pszc); 4336 if (pszc <= szc) { 4337 SEGVN_VMSTAT_FLTVNPAGES(36); 4338 if (szcmtx != NULL) { 4339 mutex_exit(szcmtx); 4340 } 4341 goto chkszc; 4342 } 4343 if (pplist != NULL) { 4344 /* 4345 * page got promoted since last check. 4346 * we don't need preaalocated large 4347 * page. 4348 */ 4349 SEGVN_VMSTAT_FLTVNPAGES(37); 4350 page_free_replacement_page(pplist); 4351 page_create_putback(pages); 4352 } 4353 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4354 prot, vpprot); 4355 hat_memload_array_region(hat, a, pgsz, ppa, 4356 prot & vpprot, hat_flag, svd->rcookie); 4357 mutex_exit(szcmtx); 4358 if (!(hat_flag & HAT_LOAD_LOCK)) { 4359 for (i = 0; i < pages; i++) { 4360 page_unlock(ppa[i]); 4361 } 4362 } 4363 if (amp != NULL) { 4364 anon_array_exit(&an_cookie); 4365 ANON_LOCK_EXIT(&->a_rwlock); 4366 } 4367 goto next; 4368 } 4369 4370 /* 4371 * if page got demoted since last check 4372 * we could have not allocated larger page. 4373 * allocate now. 4374 */ 4375 if (pplist == NULL && 4376 page_alloc_pages(vp, seg, a, &pplist, NULL, 4377 szc, 0, 0) && type != F_SOFTLOCK) { 4378 SEGVN_VMSTAT_FLTVNPAGES(38); 4379 for (i = 0; i < pages; i++) { 4380 page_unlock(ppa[i]); 4381 } 4382 if (amp != NULL) { 4383 anon_array_exit(&an_cookie); 4384 ANON_LOCK_EXIT(&->a_rwlock); 4385 } 4386 ierr = -1; 4387 alloc_failed |= (1 << szc); 4388 break; 4389 } 4390 4391 SEGVN_VMSTAT_FLTVNPAGES(39); 4392 4393 if (pplist != NULL) { 4394 segvn_relocate_pages(ppa, pplist); 4395 #ifdef DEBUG 4396 } else { 4397 ASSERT(type == F_SOFTLOCK); 4398 SEGVN_VMSTAT_FLTVNPAGES(40); 4399 #endif /* DEBUG */ 4400 } 4401 4402 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot); 4403 4404 if (pplist == NULL && segvn_anypgsz_vnode == 0) { 4405 ASSERT(type == F_SOFTLOCK); 4406 for (i = 0; i < pages; i++) { 4407 ASSERT(ppa[i]->p_szc < szc); 4408 hat_memload_region(hat, 4409 a + (i << PAGESHIFT), 4410 ppa[i], prot & vpprot, hat_flag, 4411 svd->rcookie); 4412 } 4413 } else { 4414 ASSERT(pplist != NULL || type == F_SOFTLOCK); 4415 hat_memload_array_region(hat, a, pgsz, ppa, 4416 prot & vpprot, hat_flag, svd->rcookie); 4417 } 4418 if (!(hat_flag & HAT_LOAD_LOCK)) { 4419 for (i = 0; i < pages; i++) { 4420 ASSERT(PAGE_SHARED(ppa[i])); 4421 page_unlock(ppa[i]); 4422 } 4423 } 4424 if (amp != NULL) { 4425 anon_array_exit(&an_cookie); 4426 ANON_LOCK_EXIT(&->a_rwlock); 4427 } 4428 4429 next: 4430 if (vpage != NULL) { 4431 vpage += pages; 4432 } 4433 adjszc_chk = 1; 4434 } 4435 if (a == lpgeaddr) 4436 break; 4437 ASSERT(a < lpgeaddr); 4438 4439 ASSERT(!brkcow && !tron && type != F_SOFTLOCK); 4440 4441 /* 4442 * ierr == -1 means we failed to map with a large page. 4443 * (either due to allocation/relocation failures or 4444 * misalignment with other mappings to this file. 4445 * 4446 * ierr == -2 means some other thread allocated a large page 4447 * after we gave up tp map with a large page. retry with 4448 * larger mapping. 4449 */ 4450 ASSERT(ierr == -1 || ierr == -2); 4451 ASSERT(ierr == -2 || szc != 0); 4452 ASSERT(ierr == -1 || szc < seg->s_szc); 4453 if (ierr == -2) { 4454 SEGVN_VMSTAT_FLTVNPAGES(41); 4455 ASSERT(pszc > szc && pszc <= seg->s_szc); 4456 szc = pszc; 4457 } else if (segvn_anypgsz_vnode) { 4458 SEGVN_VMSTAT_FLTVNPAGES(42); 4459 szc--; 4460 } else { 4461 SEGVN_VMSTAT_FLTVNPAGES(43); 4462 ASSERT(pszc < szc); 4463 /* 4464 * other process created pszc large page. 4465 * but we still have to drop to 0 szc. 4466 */ 4467 szc = 0; 4468 } 4469 4470 pgsz = page_get_pagesize(szc); 4471 pages = btop(pgsz); 4472 if (ierr == -2) { 4473 /* 4474 * Size up case. Note lpgaddr may only be needed for 4475 * softlock case so we don't adjust it here. 4476 */ 4477 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4478 ASSERT(a >= lpgaddr); 4479 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4480 off = svd->offset + (uintptr_t)(a - seg->s_base); 4481 aindx = svd->anon_index + seg_page(seg, a); 4482 vpage = (svd->vpage != NULL) ? 4483 &svd->vpage[seg_page(seg, a)] : NULL; 4484 } else { 4485 /* 4486 * Size down case. Note lpgaddr may only be needed for 4487 * softlock case so we don't adjust it here. 4488 */ 4489 ASSERT(IS_P2ALIGNED(a, pgsz)); 4490 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4491 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4492 ASSERT(a < lpgeaddr); 4493 if (a < addr) { 4494 SEGVN_VMSTAT_FLTVNPAGES(44); 4495 /* 4496 * The beginning of the large page region can 4497 * be pulled to the right to make a smaller 4498 * region. We haven't yet faulted a single 4499 * page. 4500 */ 4501 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4502 ASSERT(a >= lpgaddr); 4503 off = svd->offset + 4504 (uintptr_t)(a - seg->s_base); 4505 aindx = svd->anon_index + seg_page(seg, a); 4506 vpage = (svd->vpage != NULL) ? 4507 &svd->vpage[seg_page(seg, a)] : NULL; 4508 } 4509 } 4510 } 4511 out: 4512 kmem_free(ppa, ppasize); 4513 if (!err && !vop_size_err) { 4514 SEGVN_VMSTAT_FLTVNPAGES(45); 4515 return (0); 4516 } 4517 if (type == F_SOFTLOCK && a > lpgaddr) { 4518 SEGVN_VMSTAT_FLTVNPAGES(46); 4519 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4520 } 4521 if (!vop_size_err) { 4522 SEGVN_VMSTAT_FLTVNPAGES(47); 4523 return (err); 4524 } 4525 ASSERT(brkcow || tron || type == F_SOFTLOCK); 4526 /* 4527 * Large page end is mapped beyond the end of file and it's a cow 4528 * fault (can be a text replication induced cow) or softlock so we can't 4529 * reduce the map area. For now just demote the segment. This should 4530 * really only happen if the end of the file changed after the mapping 4531 * was established since when large page segments are created we make 4532 * sure they don't extend beyond the end of the file. 4533 */ 4534 SEGVN_VMSTAT_FLTVNPAGES(48); 4535 4536 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4537 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4538 err = 0; 4539 if (seg->s_szc != 0) { 4540 segvn_fltvnpages_clrszc_cnt++; 4541 ASSERT(svd->softlockcnt == 0); 4542 err = segvn_clrszc(seg); 4543 if (err != 0) { 4544 segvn_fltvnpages_clrszc_err++; 4545 } 4546 } 4547 ASSERT(err || seg->s_szc == 0); 4548 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock); 4549 /* segvn_fault will do its job as if szc had been zero to begin with */ 4550 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err)); 4551 } 4552 4553 /* 4554 * This routine will attempt to fault in one large page. 4555 * it will use smaller pages if that fails. 4556 * It should only be called for pure anonymous segments. 4557 */ 4558 static faultcode_t 4559 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 4560 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 4561 caddr_t eaddr, int brkcow) 4562 { 4563 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4564 struct anon_map *amp = svd->amp; 4565 uchar_t segtype = svd->type; 4566 uint_t szc = seg->s_szc; 4567 size_t pgsz = page_get_pagesize(szc); 4568 size_t maxpgsz = pgsz; 4569 pgcnt_t pages = btop(pgsz); 4570 uint_t ppaszc = szc; 4571 caddr_t a = lpgaddr; 4572 ulong_t aindx = svd->anon_index + seg_page(seg, a); 4573 struct vpage *vpage = (svd->vpage != NULL) ? 4574 &svd->vpage[seg_page(seg, a)] : NULL; 4575 page_t **ppa; 4576 uint_t ppa_szc; 4577 faultcode_t err; 4578 int ierr; 4579 uint_t protchk, prot, vpprot; 4580 ulong_t i; 4581 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 4582 anon_sync_obj_t cookie; 4583 int adjszc_chk; 4584 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0; 4585 4586 ASSERT(szc != 0); 4587 ASSERT(amp != NULL); 4588 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 4589 ASSERT(!(svd->flags & MAP_NORESERVE)); 4590 ASSERT(type != F_SOFTUNLOCK); 4591 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4592 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF); 4593 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4594 4595 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 4596 4597 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]); 4598 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]); 4599 4600 if (svd->flags & MAP_TEXT) { 4601 hat_flag |= HAT_LOAD_TEXT; 4602 } 4603 4604 if (svd->pageprot) { 4605 switch (rw) { 4606 case S_READ: 4607 protchk = PROT_READ; 4608 break; 4609 case S_WRITE: 4610 protchk = PROT_WRITE; 4611 break; 4612 case S_EXEC: 4613 protchk = PROT_EXEC; 4614 break; 4615 case S_OTHER: 4616 default: 4617 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 4618 break; 4619 } 4620 VM_STAT_ADD(segvnvmstats.fltanpages[2]); 4621 } else { 4622 prot = svd->prot; 4623 /* caller has already done segment level protection check. */ 4624 } 4625 4626 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP); 4627 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4628 for (;;) { 4629 adjszc_chk = 0; 4630 for (; a < lpgeaddr; a += pgsz, aindx += pages) { 4631 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 4632 VM_STAT_ADD(segvnvmstats.fltanpages[3]); 4633 ASSERT(vpage != NULL); 4634 prot = VPP_PROT(vpage); 4635 ASSERT(sameprot(seg, a, maxpgsz)); 4636 if ((prot & protchk) == 0) { 4637 err = FC_PROT; 4638 goto error; 4639 } 4640 } 4641 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) && 4642 pgsz < maxpgsz) { 4643 ASSERT(a > lpgaddr); 4644 szc = seg->s_szc; 4645 pgsz = maxpgsz; 4646 pages = btop(pgsz); 4647 ASSERT(IS_P2ALIGNED(aindx, pages)); 4648 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, 4649 pgsz); 4650 } 4651 if (type == F_SOFTLOCK) { 4652 atomic_add_long((ulong_t *)&svd->softlockcnt, 4653 pages); 4654 } 4655 anon_array_enter(amp, aindx, &cookie); 4656 ppa_szc = (uint_t)-1; 4657 ierr = anon_map_getpages(amp, aindx, szc, seg, a, 4658 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow, 4659 segvn_anypgsz, pgflags, svd->cred); 4660 if (ierr != 0) { 4661 anon_array_exit(&cookie); 4662 VM_STAT_ADD(segvnvmstats.fltanpages[4]); 4663 if (type == F_SOFTLOCK) { 4664 atomic_add_long( 4665 (ulong_t *)&svd->softlockcnt, 4666 -pages); 4667 } 4668 if (ierr > 0) { 4669 VM_STAT_ADD(segvnvmstats.fltanpages[6]); 4670 err = FC_MAKE_ERR(ierr); 4671 goto error; 4672 } 4673 break; 4674 } 4675 4676 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4677 4678 ASSERT(segtype == MAP_SHARED || 4679 ppa[0]->p_szc <= szc); 4680 ASSERT(segtype == MAP_PRIVATE || 4681 ppa[0]->p_szc >= szc); 4682 4683 /* 4684 * Handle pages that have been marked for migration 4685 */ 4686 if (lgrp_optimizations()) 4687 page_migrate(seg, a, ppa, pages); 4688 4689 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 4690 4691 if (segtype == MAP_SHARED) { 4692 vpprot |= PROT_WRITE; 4693 } 4694 4695 hat_memload_array(hat, a, pgsz, ppa, 4696 prot & vpprot, hat_flag); 4697 4698 if (hat_flag & HAT_LOAD_LOCK) { 4699 VM_STAT_ADD(segvnvmstats.fltanpages[7]); 4700 } else { 4701 VM_STAT_ADD(segvnvmstats.fltanpages[8]); 4702 for (i = 0; i < pages; i++) 4703 page_unlock(ppa[i]); 4704 } 4705 if (vpage != NULL) 4706 vpage += pages; 4707 4708 anon_array_exit(&cookie); 4709 adjszc_chk = 1; 4710 } 4711 if (a == lpgeaddr) 4712 break; 4713 ASSERT(a < lpgeaddr); 4714 /* 4715 * ierr == -1 means we failed to allocate a large page. 4716 * so do a size down operation. 4717 * 4718 * ierr == -2 means some other process that privately shares 4719 * pages with this process has allocated a larger page and we 4720 * need to retry with larger pages. So do a size up 4721 * operation. This relies on the fact that large pages are 4722 * never partially shared i.e. if we share any constituent 4723 * page of a large page with another process we must share the 4724 * entire large page. Note this cannot happen for SOFTLOCK 4725 * case, unless current address (a) is at the beginning of the 4726 * next page size boundary because the other process couldn't 4727 * have relocated locked pages. 4728 */ 4729 ASSERT(ierr == -1 || ierr == -2); 4730 4731 if (segvn_anypgsz) { 4732 ASSERT(ierr == -2 || szc != 0); 4733 ASSERT(ierr == -1 || szc < seg->s_szc); 4734 szc = (ierr == -1) ? szc - 1 : szc + 1; 4735 } else { 4736 /* 4737 * For non COW faults and segvn_anypgsz == 0 4738 * we need to be careful not to loop forever 4739 * if existing page is found with szc other 4740 * than 0 or seg->s_szc. This could be due 4741 * to page relocations on behalf of DR or 4742 * more likely large page creation. For this 4743 * case simply re-size to existing page's szc 4744 * if returned by anon_map_getpages(). 4745 */ 4746 if (ppa_szc == (uint_t)-1) { 4747 szc = (ierr == -1) ? 0 : seg->s_szc; 4748 } else { 4749 ASSERT(ppa_szc <= seg->s_szc); 4750 ASSERT(ierr == -2 || ppa_szc < szc); 4751 ASSERT(ierr == -1 || ppa_szc > szc); 4752 szc = ppa_szc; 4753 } 4754 } 4755 4756 pgsz = page_get_pagesize(szc); 4757 pages = btop(pgsz); 4758 ASSERT(type != F_SOFTLOCK || ierr == -1 || 4759 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz))); 4760 if (type == F_SOFTLOCK) { 4761 /* 4762 * For softlocks we cannot reduce the fault area 4763 * (calculated based on the largest page size for this 4764 * segment) for size down and a is already next 4765 * page size aligned as assertted above for size 4766 * ups. Therefore just continue in case of softlock. 4767 */ 4768 VM_STAT_ADD(segvnvmstats.fltanpages[9]); 4769 continue; /* keep lint happy */ 4770 } else if (ierr == -2) { 4771 4772 /* 4773 * Size up case. Note lpgaddr may only be needed for 4774 * softlock case so we don't adjust it here. 4775 */ 4776 VM_STAT_ADD(segvnvmstats.fltanpages[10]); 4777 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4778 ASSERT(a >= lpgaddr); 4779 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4780 aindx = svd->anon_index + seg_page(seg, a); 4781 vpage = (svd->vpage != NULL) ? 4782 &svd->vpage[seg_page(seg, a)] : NULL; 4783 } else { 4784 /* 4785 * Size down case. Note lpgaddr may only be needed for 4786 * softlock case so we don't adjust it here. 4787 */ 4788 VM_STAT_ADD(segvnvmstats.fltanpages[11]); 4789 ASSERT(IS_P2ALIGNED(a, pgsz)); 4790 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4791 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4792 ASSERT(a < lpgeaddr); 4793 if (a < addr) { 4794 /* 4795 * The beginning of the large page region can 4796 * be pulled to the right to make a smaller 4797 * region. We haven't yet faulted a single 4798 * page. 4799 */ 4800 VM_STAT_ADD(segvnvmstats.fltanpages[12]); 4801 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4802 ASSERT(a >= lpgaddr); 4803 aindx = svd->anon_index + seg_page(seg, a); 4804 vpage = (svd->vpage != NULL) ? 4805 &svd->vpage[seg_page(seg, a)] : NULL; 4806 } 4807 } 4808 } 4809 VM_STAT_ADD(segvnvmstats.fltanpages[13]); 4810 ANON_LOCK_EXIT(&->a_rwlock); 4811 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); 4812 return (0); 4813 error: 4814 VM_STAT_ADD(segvnvmstats.fltanpages[14]); 4815 ANON_LOCK_EXIT(&->a_rwlock); 4816 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); 4817 if (type == F_SOFTLOCK && a > lpgaddr) { 4818 VM_STAT_ADD(segvnvmstats.fltanpages[15]); 4819 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4820 } 4821 return (err); 4822 } 4823 4824 int fltadvice = 1; /* set to free behind pages for sequential access */ 4825 4826 /* 4827 * This routine is called via a machine specific fault handling routine. 4828 * It is also called by software routines wishing to lock or unlock 4829 * a range of addresses. 4830 * 4831 * Here is the basic algorithm: 4832 * If unlocking 4833 * Call segvn_softunlock 4834 * Return 4835 * endif 4836 * Checking and set up work 4837 * If we will need some non-anonymous pages 4838 * Call VOP_GETPAGE over the range of non-anonymous pages 4839 * endif 4840 * Loop over all addresses requested 4841 * Call segvn_faultpage passing in page list 4842 * to load up translations and handle anonymous pages 4843 * endloop 4844 * Load up translation to any additional pages in page list not 4845 * already handled that fit into this segment 4846 */ 4847 static faultcode_t 4848 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len, 4849 enum fault_type type, enum seg_rw rw) 4850 { 4851 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4852 page_t **plp, **ppp, *pp; 4853 u_offset_t off; 4854 caddr_t a; 4855 struct vpage *vpage; 4856 uint_t vpprot, prot; 4857 int err; 4858 page_t *pl[PVN_GETPAGE_NUM + 1]; 4859 size_t plsz, pl_alloc_sz; 4860 size_t page; 4861 ulong_t anon_index; 4862 struct anon_map *amp; 4863 int dogetpage = 0; 4864 caddr_t lpgaddr, lpgeaddr; 4865 size_t pgsz; 4866 anon_sync_obj_t cookie; 4867 int brkcow = BREAK_COW_SHARE(rw, type, svd->type); 4868 4869 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 4870 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE); 4871 4872 /* 4873 * First handle the easy stuff 4874 */ 4875 if (type == F_SOFTUNLOCK) { 4876 if (rw == S_READ_NOCOW) { 4877 rw = S_READ; 4878 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 4879 } 4880 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 4881 pgsz = (seg->s_szc == 0) ? PAGESIZE : 4882 page_get_pagesize(seg->s_szc); 4883 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]); 4884 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 4885 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw); 4886 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4887 return (0); 4888 } 4889 4890 ASSERT(svd->tr_state == SEGVN_TR_OFF || 4891 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 4892 if (brkcow == 0) { 4893 if (svd->tr_state == SEGVN_TR_INIT) { 4894 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4895 if (svd->tr_state == SEGVN_TR_INIT) { 4896 ASSERT(svd->vp != NULL && svd->amp == NULL); 4897 ASSERT(svd->flags & MAP_TEXT); 4898 ASSERT(svd->type == MAP_PRIVATE); 4899 segvn_textrepl(seg); 4900 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4901 ASSERT(svd->tr_state != SEGVN_TR_ON || 4902 svd->amp != NULL); 4903 } 4904 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4905 } 4906 } else if (svd->tr_state != SEGVN_TR_OFF) { 4907 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4908 4909 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) { 4910 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 4911 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4912 return (FC_PROT); 4913 } 4914 4915 if (svd->tr_state == SEGVN_TR_ON) { 4916 ASSERT(svd->vp != NULL && svd->amp != NULL); 4917 segvn_textunrepl(seg, 0); 4918 ASSERT(svd->amp == NULL && 4919 svd->tr_state == SEGVN_TR_OFF); 4920 } else if (svd->tr_state != SEGVN_TR_OFF) { 4921 svd->tr_state = SEGVN_TR_OFF; 4922 } 4923 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 4924 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4925 } 4926 4927 top: 4928 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 4929 4930 /* 4931 * If we have the same protections for the entire segment, 4932 * insure that the access being attempted is legitimate. 4933 */ 4934 4935 if (svd->pageprot == 0) { 4936 uint_t protchk; 4937 4938 switch (rw) { 4939 case S_READ: 4940 case S_READ_NOCOW: 4941 protchk = PROT_READ; 4942 break; 4943 case S_WRITE: 4944 protchk = PROT_WRITE; 4945 break; 4946 case S_EXEC: 4947 protchk = PROT_EXEC; 4948 break; 4949 case S_OTHER: 4950 default: 4951 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 4952 break; 4953 } 4954 4955 if ((svd->prot & protchk) == 0) { 4956 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4957 return (FC_PROT); /* illegal access type */ 4958 } 4959 } 4960 4961 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 4962 /* this must be SOFTLOCK S_READ fault */ 4963 ASSERT(svd->amp == NULL); 4964 ASSERT(svd->tr_state == SEGVN_TR_OFF); 4965 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4966 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4967 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 4968 /* 4969 * this must be the first ever non S_READ_NOCOW 4970 * softlock for this segment. 4971 */ 4972 ASSERT(svd->softlockcnt == 0); 4973 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 4974 HAT_REGION_TEXT); 4975 svd->rcookie = HAT_INVALID_REGION_COOKIE; 4976 } 4977 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4978 goto top; 4979 } 4980 4981 /* 4982 * We can't allow the long term use of softlocks for vmpss segments, 4983 * because in some file truncation cases we should be able to demote 4984 * the segment, which requires that there are no softlocks. The 4985 * only case where it's ok to allow a SOFTLOCK fault against a vmpss 4986 * segment is S_READ_NOCOW, where the caller holds the address space 4987 * locked as writer and calls softunlock before dropping the as lock. 4988 * S_READ_NOCOW is used by /proc to read memory from another user. 4989 * 4990 * Another deadlock between SOFTLOCK and file truncation can happen 4991 * because segvn_fault_vnodepages() calls the FS one pagesize at 4992 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages() 4993 * can cause a deadlock because the first set of page_t's remain 4994 * locked SE_SHARED. To avoid this, we demote segments on a first 4995 * SOFTLOCK if they have a length greater than the segment's 4996 * page size. 4997 * 4998 * So for now, we only avoid demoting a segment on a SOFTLOCK when 4999 * the access type is S_READ_NOCOW and the fault length is less than 5000 * or equal to the segment's page size. While this is quite restrictive, 5001 * it should be the most common case of SOFTLOCK against a vmpss 5002 * segment. 5003 * 5004 * For S_READ_NOCOW, it's safe not to do a copy on write because the 5005 * caller makes sure no COW will be caused by another thread for a 5006 * softlocked page. 5007 */ 5008 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) { 5009 int demote = 0; 5010 5011 if (rw != S_READ_NOCOW) { 5012 demote = 1; 5013 } 5014 if (!demote && len > PAGESIZE) { 5015 pgsz = page_get_pagesize(seg->s_szc); 5016 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, 5017 lpgeaddr); 5018 if (lpgeaddr - lpgaddr > pgsz) { 5019 demote = 1; 5020 } 5021 } 5022 5023 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5024 5025 if (demote) { 5026 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5027 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5028 if (seg->s_szc != 0) { 5029 segvn_vmpss_clrszc_cnt++; 5030 ASSERT(svd->softlockcnt == 0); 5031 err = segvn_clrszc(seg); 5032 if (err) { 5033 segvn_vmpss_clrszc_err++; 5034 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5035 return (FC_MAKE_ERR(err)); 5036 } 5037 } 5038 ASSERT(seg->s_szc == 0); 5039 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5040 goto top; 5041 } 5042 } 5043 5044 /* 5045 * Check to see if we need to allocate an anon_map structure. 5046 */ 5047 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) { 5048 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5049 /* 5050 * Drop the "read" lock on the segment and acquire 5051 * the "write" version since we have to allocate the 5052 * anon_map. 5053 */ 5054 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5055 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5056 5057 if (svd->amp == NULL) { 5058 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 5059 svd->amp->a_szc = seg->s_szc; 5060 } 5061 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5062 5063 /* 5064 * Start all over again since segment protections 5065 * may have changed after we dropped the "read" lock. 5066 */ 5067 goto top; 5068 } 5069 5070 /* 5071 * S_READ_NOCOW vs S_READ distinction was 5072 * only needed for the code above. After 5073 * that we treat it as S_READ. 5074 */ 5075 if (rw == S_READ_NOCOW) { 5076 ASSERT(type == F_SOFTLOCK); 5077 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5078 rw = S_READ; 5079 } 5080 5081 amp = svd->amp; 5082 5083 /* 5084 * MADV_SEQUENTIAL work is ignored for large page segments. 5085 */ 5086 if (seg->s_szc != 0) { 5087 pgsz = page_get_pagesize(seg->s_szc); 5088 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 5089 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 5090 if (svd->vp == NULL) { 5091 err = segvn_fault_anonpages(hat, seg, lpgaddr, 5092 lpgeaddr, type, rw, addr, addr + len, brkcow); 5093 } else { 5094 err = segvn_fault_vnodepages(hat, seg, lpgaddr, 5095 lpgeaddr, type, rw, addr, addr + len, brkcow); 5096 if (err == IE_RETRY) { 5097 ASSERT(seg->s_szc == 0); 5098 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 5099 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5100 goto top; 5101 } 5102 } 5103 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5104 return (err); 5105 } 5106 5107 page = seg_page(seg, addr); 5108 if (amp != NULL) { 5109 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5110 anon_index = svd->anon_index + page; 5111 5112 if (type == F_PROT && rw == S_READ && 5113 svd->tr_state == SEGVN_TR_OFF && 5114 svd->type == MAP_PRIVATE && svd->pageprot == 0) { 5115 size_t index = anon_index; 5116 struct anon *ap; 5117 5118 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5119 /* 5120 * The fast path could apply to S_WRITE also, except 5121 * that the protection fault could be caused by lazy 5122 * tlb flush when ro->rw. In this case, the pte is 5123 * RW already. But RO in the other cpu's tlb causes 5124 * the fault. Since hat_chgprot won't do anything if 5125 * pte doesn't change, we may end up faulting 5126 * indefinitely until the RO tlb entry gets replaced. 5127 */ 5128 for (a = addr; a < addr + len; a += PAGESIZE, index++) { 5129 anon_array_enter(amp, index, &cookie); 5130 ap = anon_get_ptr(amp->ahp, index); 5131 anon_array_exit(&cookie); 5132 if ((ap == NULL) || (ap->an_refcnt != 1)) { 5133 ANON_LOCK_EXIT(&->a_rwlock); 5134 goto slow; 5135 } 5136 } 5137 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot); 5138 ANON_LOCK_EXIT(&->a_rwlock); 5139 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5140 return (0); 5141 } 5142 } 5143 slow: 5144 5145 if (svd->vpage == NULL) 5146 vpage = NULL; 5147 else 5148 vpage = &svd->vpage[page]; 5149 5150 off = svd->offset + (uintptr_t)(addr - seg->s_base); 5151 5152 /* 5153 * If MADV_SEQUENTIAL has been set for the particular page we 5154 * are faulting on, free behind all pages in the segment and put 5155 * them on the free list. 5156 */ 5157 5158 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) { 5159 struct vpage *vpp; 5160 ulong_t fanon_index; 5161 size_t fpage; 5162 u_offset_t pgoff, fpgoff; 5163 struct vnode *fvp; 5164 struct anon *fap = NULL; 5165 5166 if (svd->advice == MADV_SEQUENTIAL || 5167 (svd->pageadvice && 5168 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) { 5169 pgoff = off - PAGESIZE; 5170 fpage = page - 1; 5171 if (vpage != NULL) 5172 vpp = &svd->vpage[fpage]; 5173 if (amp != NULL) 5174 fanon_index = svd->anon_index + fpage; 5175 5176 while (pgoff > svd->offset) { 5177 if (svd->advice != MADV_SEQUENTIAL && 5178 (!svd->pageadvice || (vpage && 5179 VPP_ADVICE(vpp) != MADV_SEQUENTIAL))) 5180 break; 5181 5182 /* 5183 * If this is an anon page, we must find the 5184 * correct <vp, offset> for it 5185 */ 5186 fap = NULL; 5187 if (amp != NULL) { 5188 ANON_LOCK_ENTER(&->a_rwlock, 5189 RW_READER); 5190 anon_array_enter(amp, fanon_index, 5191 &cookie); 5192 fap = anon_get_ptr(amp->ahp, 5193 fanon_index); 5194 if (fap != NULL) { 5195 swap_xlate(fap, &fvp, &fpgoff); 5196 } else { 5197 fpgoff = pgoff; 5198 fvp = svd->vp; 5199 } 5200 anon_array_exit(&cookie); 5201 ANON_LOCK_EXIT(&->a_rwlock); 5202 } else { 5203 fpgoff = pgoff; 5204 fvp = svd->vp; 5205 } 5206 if (fvp == NULL) 5207 break; /* XXX */ 5208 /* 5209 * Skip pages that are free or have an 5210 * "exclusive" lock. 5211 */ 5212 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED); 5213 if (pp == NULL) 5214 break; 5215 /* 5216 * We don't need the page_struct_lock to test 5217 * as this is only advisory; even if we 5218 * acquire it someone might race in and lock 5219 * the page after we unlock and before the 5220 * PUTPAGE, then VOP_PUTPAGE will do nothing. 5221 */ 5222 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) { 5223 /* 5224 * Hold the vnode before releasing 5225 * the page lock to prevent it from 5226 * being freed and re-used by some 5227 * other thread. 5228 */ 5229 VN_HOLD(fvp); 5230 page_unlock(pp); 5231 /* 5232 * We should build a page list 5233 * to kluster putpages XXX 5234 */ 5235 (void) VOP_PUTPAGE(fvp, 5236 (offset_t)fpgoff, PAGESIZE, 5237 (B_DONTNEED|B_FREE|B_ASYNC), 5238 svd->cred, NULL); 5239 VN_RELE(fvp); 5240 } else { 5241 /* 5242 * XXX - Should the loop terminate if 5243 * the page is `locked'? 5244 */ 5245 page_unlock(pp); 5246 } 5247 --vpp; 5248 --fanon_index; 5249 pgoff -= PAGESIZE; 5250 } 5251 } 5252 } 5253 5254 plp = pl; 5255 *plp = NULL; 5256 pl_alloc_sz = 0; 5257 5258 /* 5259 * See if we need to call VOP_GETPAGE for 5260 * *any* of the range being faulted on. 5261 * We can skip all of this work if there 5262 * was no original vnode. 5263 */ 5264 if (svd->vp != NULL) { 5265 u_offset_t vp_off; 5266 size_t vp_len; 5267 struct anon *ap; 5268 vnode_t *vp; 5269 5270 vp_off = off; 5271 vp_len = len; 5272 5273 if (amp == NULL) 5274 dogetpage = 1; 5275 else { 5276 /* 5277 * Only acquire reader lock to prevent amp->ahp 5278 * from being changed. It's ok to miss pages, 5279 * hence we don't do anon_array_enter 5280 */ 5281 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5282 ap = anon_get_ptr(amp->ahp, anon_index); 5283 5284 if (len <= PAGESIZE) 5285 /* inline non_anon() */ 5286 dogetpage = (ap == NULL); 5287 else 5288 dogetpage = non_anon(amp->ahp, anon_index, 5289 &vp_off, &vp_len); 5290 ANON_LOCK_EXIT(&->a_rwlock); 5291 } 5292 5293 if (dogetpage) { 5294 enum seg_rw arw; 5295 struct as *as = seg->s_as; 5296 5297 if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) { 5298 /* 5299 * Page list won't fit in local array, 5300 * allocate one of the needed size. 5301 */ 5302 pl_alloc_sz = 5303 (btop(len) + 1) * sizeof (page_t *); 5304 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP); 5305 plp[0] = NULL; 5306 plsz = len; 5307 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE || 5308 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER || 5309 (((size_t)(addr + PAGESIZE) < 5310 (size_t)(seg->s_base + seg->s_size)) && 5311 hat_probe(as->a_hat, addr + PAGESIZE))) { 5312 /* 5313 * Ask VOP_GETPAGE to return the exact number 5314 * of pages if 5315 * (a) this is a COW fault, or 5316 * (b) this is a software fault, or 5317 * (c) next page is already mapped. 5318 */ 5319 plsz = len; 5320 } else { 5321 /* 5322 * Ask VOP_GETPAGE to return adjacent pages 5323 * within the segment. 5324 */ 5325 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t) 5326 ((seg->s_base + seg->s_size) - addr)); 5327 ASSERT((addr + plsz) <= 5328 (seg->s_base + seg->s_size)); 5329 } 5330 5331 /* 5332 * Need to get some non-anonymous pages. 5333 * We need to make only one call to GETPAGE to do 5334 * this to prevent certain deadlocking conditions 5335 * when we are doing locking. In this case 5336 * non_anon() should have picked up the smallest 5337 * range which includes all the non-anonymous 5338 * pages in the requested range. We have to 5339 * be careful regarding which rw flag to pass in 5340 * because on a private mapping, the underlying 5341 * object is never allowed to be written. 5342 */ 5343 if (rw == S_WRITE && svd->type == MAP_PRIVATE) { 5344 arw = S_READ; 5345 } else { 5346 arw = rw; 5347 } 5348 vp = svd->vp; 5349 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5350 "segvn_getpage:seg %p addr %p vp %p", 5351 seg, addr, vp); 5352 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len, 5353 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw, 5354 svd->cred, NULL); 5355 if (err) { 5356 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5357 segvn_pagelist_rele(plp); 5358 if (pl_alloc_sz) 5359 kmem_free(plp, pl_alloc_sz); 5360 return (FC_MAKE_ERR(err)); 5361 } 5362 if (svd->type == MAP_PRIVATE) 5363 vpprot &= ~PROT_WRITE; 5364 } 5365 } 5366 5367 /* 5368 * N.B. at this time the plp array has all the needed non-anon 5369 * pages in addition to (possibly) having some adjacent pages. 5370 */ 5371 5372 /* 5373 * Always acquire the anon_array_lock to prevent 5374 * 2 threads from allocating separate anon slots for 5375 * the same "addr". 5376 * 5377 * If this is a copy-on-write fault and we don't already 5378 * have the anon_array_lock, acquire it to prevent the 5379 * fault routine from handling multiple copy-on-write faults 5380 * on the same "addr" in the same address space. 5381 * 5382 * Only one thread should deal with the fault since after 5383 * it is handled, the other threads can acquire a translation 5384 * to the newly created private page. This prevents two or 5385 * more threads from creating different private pages for the 5386 * same fault. 5387 * 5388 * We grab "serialization" lock here if this is a MAP_PRIVATE segment 5389 * to prevent deadlock between this thread and another thread 5390 * which has soft-locked this page and wants to acquire serial_lock. 5391 * ( bug 4026339 ) 5392 * 5393 * The fix for bug 4026339 becomes unnecessary when using the 5394 * locking scheme with per amp rwlock and a global set of hash 5395 * lock, anon_array_lock. If we steal a vnode page when low 5396 * on memory and upgrad the page lock through page_rename, 5397 * then the page is PAGE_HANDLED, nothing needs to be done 5398 * for this page after returning from segvn_faultpage. 5399 * 5400 * But really, the page lock should be downgraded after 5401 * the stolen page is page_rename'd. 5402 */ 5403 5404 if (amp != NULL) 5405 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5406 5407 /* 5408 * Ok, now loop over the address range and handle faults 5409 */ 5410 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) { 5411 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot, 5412 type, rw, brkcow); 5413 if (err) { 5414 if (amp != NULL) 5415 ANON_LOCK_EXIT(&->a_rwlock); 5416 if (type == F_SOFTLOCK && a > addr) { 5417 segvn_softunlock(seg, addr, (a - addr), 5418 S_OTHER); 5419 } 5420 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5421 segvn_pagelist_rele(plp); 5422 if (pl_alloc_sz) 5423 kmem_free(plp, pl_alloc_sz); 5424 return (err); 5425 } 5426 if (vpage) { 5427 vpage++; 5428 } else if (svd->vpage) { 5429 page = seg_page(seg, addr); 5430 vpage = &svd->vpage[++page]; 5431 } 5432 } 5433 5434 /* Didn't get pages from the underlying fs so we're done */ 5435 if (!dogetpage) 5436 goto done; 5437 5438 /* 5439 * Now handle any other pages in the list returned. 5440 * If the page can be used, load up the translations now. 5441 * Note that the for loop will only be entered if "plp" 5442 * is pointing to a non-NULL page pointer which means that 5443 * VOP_GETPAGE() was called and vpprot has been initialized. 5444 */ 5445 if (svd->pageprot == 0) 5446 prot = svd->prot & vpprot; 5447 5448 5449 /* 5450 * Large Files: diff should be unsigned value because we started 5451 * supporting > 2GB segment sizes from 2.5.1 and when a 5452 * large file of size > 2GB gets mapped to address space 5453 * the diff value can be > 2GB. 5454 */ 5455 5456 for (ppp = plp; (pp = *ppp) != NULL; ppp++) { 5457 size_t diff; 5458 struct anon *ap; 5459 int anon_index; 5460 anon_sync_obj_t cookie; 5461 int hat_flag = HAT_LOAD_ADV; 5462 5463 if (svd->flags & MAP_TEXT) { 5464 hat_flag |= HAT_LOAD_TEXT; 5465 } 5466 5467 if (pp == PAGE_HANDLED) 5468 continue; 5469 5470 if (svd->tr_state != SEGVN_TR_ON && 5471 pp->p_offset >= svd->offset && 5472 pp->p_offset < svd->offset + seg->s_size) { 5473 5474 diff = pp->p_offset - svd->offset; 5475 5476 /* 5477 * Large Files: Following is the assertion 5478 * validating the above cast. 5479 */ 5480 ASSERT(svd->vp == pp->p_vnode); 5481 5482 page = btop(diff); 5483 if (svd->pageprot) 5484 prot = VPP_PROT(&svd->vpage[page]) & vpprot; 5485 5486 /* 5487 * Prevent other threads in the address space from 5488 * creating private pages (i.e., allocating anon slots) 5489 * while we are in the process of loading translations 5490 * to additional pages returned by the underlying 5491 * object. 5492 */ 5493 if (amp != NULL) { 5494 anon_index = svd->anon_index + page; 5495 anon_array_enter(amp, anon_index, &cookie); 5496 ap = anon_get_ptr(amp->ahp, anon_index); 5497 } 5498 if ((amp == NULL) || (ap == NULL)) { 5499 if (IS_VMODSORT(pp->p_vnode) || 5500 enable_mbit_wa) { 5501 if (rw == S_WRITE) 5502 hat_setmod(pp); 5503 else if (rw != S_OTHER && 5504 !hat_ismod(pp)) 5505 prot &= ~PROT_WRITE; 5506 } 5507 /* 5508 * Skip mapping read ahead pages marked 5509 * for migration, so they will get migrated 5510 * properly on fault 5511 */ 5512 ASSERT(amp == NULL || 5513 svd->rcookie == HAT_INVALID_REGION_COOKIE); 5514 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) { 5515 hat_memload_region(hat, 5516 seg->s_base + diff, 5517 pp, prot, hat_flag, 5518 svd->rcookie); 5519 } 5520 } 5521 if (amp != NULL) 5522 anon_array_exit(&cookie); 5523 } 5524 page_unlock(pp); 5525 } 5526 done: 5527 if (amp != NULL) 5528 ANON_LOCK_EXIT(&->a_rwlock); 5529 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5530 if (pl_alloc_sz) 5531 kmem_free(plp, pl_alloc_sz); 5532 return (0); 5533 } 5534 5535 /* 5536 * This routine is used to start I/O on pages asynchronously. XXX it will 5537 * only create PAGESIZE pages. At fault time they will be relocated into 5538 * larger pages. 5539 */ 5540 static faultcode_t 5541 segvn_faulta(struct seg *seg, caddr_t addr) 5542 { 5543 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5544 int err; 5545 struct anon_map *amp; 5546 vnode_t *vp; 5547 5548 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5549 5550 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 5551 if ((amp = svd->amp) != NULL) { 5552 struct anon *ap; 5553 5554 /* 5555 * Reader lock to prevent amp->ahp from being changed. 5556 * This is advisory, it's ok to miss a page, so 5557 * we don't do anon_array_enter lock. 5558 */ 5559 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5560 if ((ap = anon_get_ptr(amp->ahp, 5561 svd->anon_index + seg_page(seg, addr))) != NULL) { 5562 5563 err = anon_getpage(&ap, NULL, NULL, 5564 0, seg, addr, S_READ, svd->cred); 5565 5566 ANON_LOCK_EXIT(&->a_rwlock); 5567 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5568 if (err) 5569 return (FC_MAKE_ERR(err)); 5570 return (0); 5571 } 5572 ANON_LOCK_EXIT(&->a_rwlock); 5573 } 5574 5575 if (svd->vp == NULL) { 5576 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5577 return (0); /* zfod page - do nothing now */ 5578 } 5579 5580 vp = svd->vp; 5581 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5582 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp); 5583 err = VOP_GETPAGE(vp, 5584 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)), 5585 PAGESIZE, NULL, NULL, 0, seg, addr, 5586 S_OTHER, svd->cred, NULL); 5587 5588 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5589 if (err) 5590 return (FC_MAKE_ERR(err)); 5591 return (0); 5592 } 5593 5594 static int 5595 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 5596 { 5597 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5598 struct vpage *cvp, *svp, *evp; 5599 struct vnode *vp; 5600 size_t pgsz; 5601 pgcnt_t pgcnt; 5602 anon_sync_obj_t cookie; 5603 int unload_done = 0; 5604 5605 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5606 5607 if ((svd->maxprot & prot) != prot) 5608 return (EACCES); /* violated maxprot */ 5609 5610 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5611 5612 /* return if prot is the same */ 5613 if (!svd->pageprot && svd->prot == prot) { 5614 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5615 return (0); 5616 } 5617 5618 /* 5619 * Since we change protections we first have to flush the cache. 5620 * This makes sure all the pagelock calls have to recheck 5621 * protections. 5622 */ 5623 if (svd->softlockcnt > 0) { 5624 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5625 5626 /* 5627 * If this is shared segment non 0 softlockcnt 5628 * means locked pages are still in use. 5629 */ 5630 if (svd->type == MAP_SHARED) { 5631 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5632 return (EAGAIN); 5633 } 5634 5635 /* 5636 * Since we do have the segvn writers lock nobody can fill 5637 * the cache with entries belonging to this seg during 5638 * the purge. The flush either succeeds or we still have 5639 * pending I/Os. 5640 */ 5641 segvn_purge(seg); 5642 if (svd->softlockcnt > 0) { 5643 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5644 return (EAGAIN); 5645 } 5646 } 5647 5648 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5649 ASSERT(svd->amp == NULL); 5650 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5651 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5652 HAT_REGION_TEXT); 5653 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5654 unload_done = 1; 5655 } else if (svd->tr_state == SEGVN_TR_INIT) { 5656 svd->tr_state = SEGVN_TR_OFF; 5657 } else if (svd->tr_state == SEGVN_TR_ON) { 5658 ASSERT(svd->amp != NULL); 5659 segvn_textunrepl(seg, 0); 5660 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 5661 unload_done = 1; 5662 } 5663 5664 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED && 5665 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) { 5666 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 5667 segvn_inval_trcache(svd->vp); 5668 } 5669 if (seg->s_szc != 0) { 5670 int err; 5671 pgsz = page_get_pagesize(seg->s_szc); 5672 pgcnt = pgsz >> PAGESHIFT; 5673 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 5674 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 5675 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5676 ASSERT(seg->s_base != addr || seg->s_size != len); 5677 /* 5678 * If we are holding the as lock as a reader then 5679 * we need to return IE_RETRY and let the as 5680 * layer drop and re-acquire the lock as a writer. 5681 */ 5682 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) 5683 return (IE_RETRY); 5684 VM_STAT_ADD(segvnvmstats.demoterange[1]); 5685 if (svd->type == MAP_PRIVATE || svd->vp != NULL) { 5686 err = segvn_demote_range(seg, addr, len, 5687 SDR_END, 0); 5688 } else { 5689 uint_t szcvec = map_pgszcvec(seg->s_base, 5690 pgsz, (uintptr_t)seg->s_base, 5691 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0); 5692 err = segvn_demote_range(seg, addr, len, 5693 SDR_END, szcvec); 5694 } 5695 if (err == 0) 5696 return (IE_RETRY); 5697 if (err == ENOMEM) 5698 return (IE_NOMEM); 5699 return (err); 5700 } 5701 } 5702 5703 5704 /* 5705 * If it's a private mapping and we're making it writable then we 5706 * may have to reserve the additional swap space now. If we are 5707 * making writable only a part of the segment then we use its vpage 5708 * array to keep a record of the pages for which we have reserved 5709 * swap. In this case we set the pageswap field in the segment's 5710 * segvn structure to record this. 5711 * 5712 * If it's a private mapping to a file (i.e., vp != NULL) and we're 5713 * removing write permission on the entire segment and we haven't 5714 * modified any pages, we can release the swap space. 5715 */ 5716 if (svd->type == MAP_PRIVATE) { 5717 if (prot & PROT_WRITE) { 5718 if (!(svd->flags & MAP_NORESERVE) && 5719 !(svd->swresv && svd->pageswap == 0)) { 5720 size_t sz = 0; 5721 5722 /* 5723 * Start by determining how much swap 5724 * space is required. 5725 */ 5726 if (addr == seg->s_base && 5727 len == seg->s_size && 5728 svd->pageswap == 0) { 5729 /* The whole segment */ 5730 sz = seg->s_size; 5731 } else { 5732 /* 5733 * Make sure that the vpage array 5734 * exists, and make a note of the 5735 * range of elements corresponding 5736 * to len. 5737 */ 5738 segvn_vpage(seg); 5739 svp = &svd->vpage[seg_page(seg, addr)]; 5740 evp = &svd->vpage[seg_page(seg, 5741 addr + len)]; 5742 5743 if (svd->pageswap == 0) { 5744 /* 5745 * This is the first time we've 5746 * asked for a part of this 5747 * segment, so we need to 5748 * reserve everything we've 5749 * been asked for. 5750 */ 5751 sz = len; 5752 } else { 5753 /* 5754 * We have to count the number 5755 * of pages required. 5756 */ 5757 for (cvp = svp; cvp < evp; 5758 cvp++) { 5759 if (!VPP_ISSWAPRES(cvp)) 5760 sz++; 5761 } 5762 sz <<= PAGESHIFT; 5763 } 5764 } 5765 5766 /* Try to reserve the necessary swap. */ 5767 if (anon_resv_zone(sz, 5768 seg->s_as->a_proc->p_zone) == 0) { 5769 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5770 return (IE_NOMEM); 5771 } 5772 5773 /* 5774 * Make a note of how much swap space 5775 * we've reserved. 5776 */ 5777 if (svd->pageswap == 0 && sz == seg->s_size) { 5778 svd->swresv = sz; 5779 } else { 5780 ASSERT(svd->vpage != NULL); 5781 svd->swresv += sz; 5782 svd->pageswap = 1; 5783 for (cvp = svp; cvp < evp; cvp++) { 5784 if (!VPP_ISSWAPRES(cvp)) 5785 VPP_SETSWAPRES(cvp); 5786 } 5787 } 5788 } 5789 } else { 5790 /* 5791 * Swap space is released only if this segment 5792 * does not map anonymous memory, since read faults 5793 * on such segments still need an anon slot to read 5794 * in the data. 5795 */ 5796 if (svd->swresv != 0 && svd->vp != NULL && 5797 svd->amp == NULL && addr == seg->s_base && 5798 len == seg->s_size && svd->pageprot == 0) { 5799 ASSERT(svd->pageswap == 0); 5800 anon_unresv_zone(svd->swresv, 5801 seg->s_as->a_proc->p_zone); 5802 svd->swresv = 0; 5803 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 5804 "anon proc:%p %lu %u", seg, 0, 0); 5805 } 5806 } 5807 } 5808 5809 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) { 5810 if (svd->prot == prot) { 5811 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5812 return (0); /* all done */ 5813 } 5814 svd->prot = (uchar_t)prot; 5815 } else if (svd->type == MAP_PRIVATE) { 5816 struct anon *ap = NULL; 5817 page_t *pp; 5818 u_offset_t offset, off; 5819 struct anon_map *amp; 5820 ulong_t anon_idx = 0; 5821 5822 /* 5823 * A vpage structure exists or else the change does not 5824 * involve the entire segment. Establish a vpage structure 5825 * if none is there. Then, for each page in the range, 5826 * adjust its individual permissions. Note that write- 5827 * enabling a MAP_PRIVATE page can affect the claims for 5828 * locked down memory. Overcommitting memory terminates 5829 * the operation. 5830 */ 5831 segvn_vpage(seg); 5832 svd->pageprot = 1; 5833 if ((amp = svd->amp) != NULL) { 5834 anon_idx = svd->anon_index + seg_page(seg, addr); 5835 ASSERT(seg->s_szc == 0 || 5836 IS_P2ALIGNED(anon_idx, pgcnt)); 5837 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5838 } 5839 5840 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 5841 evp = &svd->vpage[seg_page(seg, addr + len)]; 5842 5843 /* 5844 * See Statement at the beginning of segvn_lockop regarding 5845 * the way cowcnts and lckcnts are handled. 5846 */ 5847 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 5848 5849 if (seg->s_szc != 0) { 5850 if (amp != NULL) { 5851 anon_array_enter(amp, anon_idx, 5852 &cookie); 5853 } 5854 if (IS_P2ALIGNED(anon_idx, pgcnt) && 5855 !segvn_claim_pages(seg, svp, offset, 5856 anon_idx, prot)) { 5857 if (amp != NULL) { 5858 anon_array_exit(&cookie); 5859 } 5860 break; 5861 } 5862 if (amp != NULL) { 5863 anon_array_exit(&cookie); 5864 } 5865 anon_idx++; 5866 } else { 5867 if (amp != NULL) { 5868 anon_array_enter(amp, anon_idx, 5869 &cookie); 5870 ap = anon_get_ptr(amp->ahp, anon_idx++); 5871 } 5872 5873 if (VPP_ISPPLOCK(svp) && 5874 VPP_PROT(svp) != prot) { 5875 5876 if (amp == NULL || ap == NULL) { 5877 vp = svd->vp; 5878 off = offset; 5879 } else 5880 swap_xlate(ap, &vp, &off); 5881 if (amp != NULL) 5882 anon_array_exit(&cookie); 5883 5884 if ((pp = page_lookup(vp, off, 5885 SE_SHARED)) == NULL) { 5886 panic("segvn_setprot: no page"); 5887 /*NOTREACHED*/ 5888 } 5889 ASSERT(seg->s_szc == 0); 5890 if ((VPP_PROT(svp) ^ prot) & 5891 PROT_WRITE) { 5892 if (prot & PROT_WRITE) { 5893 if (!page_addclaim( 5894 pp)) { 5895 page_unlock(pp); 5896 break; 5897 } 5898 } else { 5899 if (!page_subclaim( 5900 pp)) { 5901 page_unlock(pp); 5902 break; 5903 } 5904 } 5905 } 5906 page_unlock(pp); 5907 } else if (amp != NULL) 5908 anon_array_exit(&cookie); 5909 } 5910 VPP_SETPROT(svp, prot); 5911 offset += PAGESIZE; 5912 } 5913 if (amp != NULL) 5914 ANON_LOCK_EXIT(&->a_rwlock); 5915 5916 /* 5917 * Did we terminate prematurely? If so, simply unload 5918 * the translations to the things we've updated so far. 5919 */ 5920 if (svp != evp) { 5921 if (unload_done) { 5922 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5923 return (IE_NOMEM); 5924 } 5925 len = (svp - &svd->vpage[seg_page(seg, addr)]) * 5926 PAGESIZE; 5927 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz)); 5928 if (len != 0) 5929 hat_unload(seg->s_as->a_hat, addr, 5930 len, HAT_UNLOAD); 5931 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5932 return (IE_NOMEM); 5933 } 5934 } else { 5935 segvn_vpage(seg); 5936 svd->pageprot = 1; 5937 evp = &svd->vpage[seg_page(seg, addr + len)]; 5938 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 5939 VPP_SETPROT(svp, prot); 5940 } 5941 } 5942 5943 if (unload_done) { 5944 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5945 return (0); 5946 } 5947 5948 if (((prot & PROT_WRITE) != 0 && 5949 (svd->vp != NULL || svd->type == MAP_PRIVATE)) || 5950 (prot & ~PROT_USER) == PROT_NONE) { 5951 /* 5952 * Either private or shared data with write access (in 5953 * which case we need to throw out all former translations 5954 * so that we get the right translations set up on fault 5955 * and we don't allow write access to any copy-on-write pages 5956 * that might be around or to prevent write access to pages 5957 * representing holes in a file), or we don't have permission 5958 * to access the memory at all (in which case we have to 5959 * unload any current translations that might exist). 5960 */ 5961 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 5962 } else { 5963 /* 5964 * A shared mapping or a private mapping in which write 5965 * protection is going to be denied - just change all the 5966 * protections over the range of addresses in question. 5967 * segvn does not support any other attributes other 5968 * than prot so we can use hat_chgattr. 5969 */ 5970 hat_chgattr(seg->s_as->a_hat, addr, len, prot); 5971 } 5972 5973 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5974 5975 return (0); 5976 } 5977 5978 /* 5979 * segvn_setpagesize is called via SEGOP_SETPAGESIZE from as_setpagesize, 5980 * to determine if the seg is capable of mapping the requested szc. 5981 */ 5982 static int 5983 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 5984 { 5985 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5986 struct segvn_data *nsvd; 5987 struct anon_map *amp = svd->amp; 5988 struct seg *nseg; 5989 caddr_t eaddr = addr + len, a; 5990 size_t pgsz = page_get_pagesize(szc); 5991 pgcnt_t pgcnt = page_get_pagecnt(szc); 5992 int err; 5993 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base); 5994 extern struct vnode kvp; 5995 5996 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5997 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 5998 5999 if (seg->s_szc == szc || segvn_lpg_disable != 0) { 6000 return (0); 6001 } 6002 6003 /* 6004 * addr should always be pgsz aligned but eaddr may be misaligned if 6005 * it's at the end of the segment. 6006 * 6007 * XXX we should assert this condition since as_setpagesize() logic 6008 * guarantees it. 6009 */ 6010 if (!IS_P2ALIGNED(addr, pgsz) || 6011 (!IS_P2ALIGNED(eaddr, pgsz) && 6012 eaddr != seg->s_base + seg->s_size)) { 6013 6014 segvn_setpgsz_align_err++; 6015 return (EINVAL); 6016 } 6017 6018 if (amp != NULL && svd->type == MAP_SHARED) { 6019 ulong_t an_idx = svd->anon_index + seg_page(seg, addr); 6020 if (!IS_P2ALIGNED(an_idx, pgcnt)) { 6021 6022 segvn_setpgsz_anon_align_err++; 6023 return (EINVAL); 6024 } 6025 } 6026 6027 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas || 6028 szc > segvn_maxpgszc) { 6029 return (EINVAL); 6030 } 6031 6032 /* paranoid check */ 6033 if (svd->vp != NULL && 6034 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) { 6035 return (EINVAL); 6036 } 6037 6038 if (seg->s_szc == 0 && svd->vp != NULL && 6039 map_addr_vacalign_check(addr, off)) { 6040 return (EINVAL); 6041 } 6042 6043 /* 6044 * Check that protections are the same within new page 6045 * size boundaries. 6046 */ 6047 if (svd->pageprot) { 6048 for (a = addr; a < eaddr; a += pgsz) { 6049 if ((a + pgsz) > eaddr) { 6050 if (!sameprot(seg, a, eaddr - a)) { 6051 return (EINVAL); 6052 } 6053 } else { 6054 if (!sameprot(seg, a, pgsz)) { 6055 return (EINVAL); 6056 } 6057 } 6058 } 6059 } 6060 6061 /* 6062 * Since we are changing page size we first have to flush 6063 * the cache. This makes sure all the pagelock calls have 6064 * to recheck protections. 6065 */ 6066 if (svd->softlockcnt > 0) { 6067 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6068 6069 /* 6070 * If this is shared segment non 0 softlockcnt 6071 * means locked pages are still in use. 6072 */ 6073 if (svd->type == MAP_SHARED) { 6074 return (EAGAIN); 6075 } 6076 6077 /* 6078 * Since we do have the segvn writers lock nobody can fill 6079 * the cache with entries belonging to this seg during 6080 * the purge. The flush either succeeds or we still have 6081 * pending I/Os. 6082 */ 6083 segvn_purge(seg); 6084 if (svd->softlockcnt > 0) { 6085 return (EAGAIN); 6086 } 6087 } 6088 6089 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6090 ASSERT(svd->amp == NULL); 6091 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6092 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6093 HAT_REGION_TEXT); 6094 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6095 } else if (svd->tr_state == SEGVN_TR_INIT) { 6096 svd->tr_state = SEGVN_TR_OFF; 6097 } else if (svd->tr_state == SEGVN_TR_ON) { 6098 ASSERT(svd->amp != NULL); 6099 segvn_textunrepl(seg, 1); 6100 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6101 amp = NULL; 6102 } 6103 6104 /* 6105 * Operation for sub range of existing segment. 6106 */ 6107 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) { 6108 if (szc < seg->s_szc) { 6109 VM_STAT_ADD(segvnvmstats.demoterange[2]); 6110 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0); 6111 if (err == 0) { 6112 return (IE_RETRY); 6113 } 6114 if (err == ENOMEM) { 6115 return (IE_NOMEM); 6116 } 6117 return (err); 6118 } 6119 if (addr != seg->s_base) { 6120 nseg = segvn_split_seg(seg, addr); 6121 if (eaddr != (nseg->s_base + nseg->s_size)) { 6122 /* eaddr is szc aligned */ 6123 (void) segvn_split_seg(nseg, eaddr); 6124 } 6125 return (IE_RETRY); 6126 } 6127 if (eaddr != (seg->s_base + seg->s_size)) { 6128 /* eaddr is szc aligned */ 6129 (void) segvn_split_seg(seg, eaddr); 6130 } 6131 return (IE_RETRY); 6132 } 6133 6134 /* 6135 * Break any low level sharing and reset seg->s_szc to 0. 6136 */ 6137 if ((err = segvn_clrszc(seg)) != 0) { 6138 if (err == ENOMEM) { 6139 err = IE_NOMEM; 6140 } 6141 return (err); 6142 } 6143 ASSERT(seg->s_szc == 0); 6144 6145 /* 6146 * If the end of the current segment is not pgsz aligned 6147 * then attempt to concatenate with the next segment. 6148 */ 6149 if (!IS_P2ALIGNED(eaddr, pgsz)) { 6150 nseg = AS_SEGNEXT(seg->s_as, seg); 6151 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) { 6152 return (ENOMEM); 6153 } 6154 if (nseg->s_ops != &segvn_ops) { 6155 return (EINVAL); 6156 } 6157 nsvd = (struct segvn_data *)nseg->s_data; 6158 if (nsvd->softlockcnt > 0) { 6159 /* 6160 * If this is shared segment non 0 softlockcnt 6161 * means locked pages are still in use. 6162 */ 6163 if (nsvd->type == MAP_SHARED) { 6164 return (EAGAIN); 6165 } 6166 segvn_purge(nseg); 6167 if (nsvd->softlockcnt > 0) { 6168 return (EAGAIN); 6169 } 6170 } 6171 err = segvn_clrszc(nseg); 6172 if (err == ENOMEM) { 6173 err = IE_NOMEM; 6174 } 6175 if (err != 0) { 6176 return (err); 6177 } 6178 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6179 err = segvn_concat(seg, nseg, 1); 6180 if (err == -1) { 6181 return (EINVAL); 6182 } 6183 if (err == -2) { 6184 return (IE_NOMEM); 6185 } 6186 return (IE_RETRY); 6187 } 6188 6189 /* 6190 * May need to re-align anon array to 6191 * new szc. 6192 */ 6193 if (amp != NULL) { 6194 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) { 6195 struct anon_hdr *nahp; 6196 6197 ASSERT(svd->type == MAP_PRIVATE); 6198 6199 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6200 ASSERT(amp->refcnt == 1); 6201 nahp = anon_create(btop(amp->size), ANON_NOSLEEP); 6202 if (nahp == NULL) { 6203 ANON_LOCK_EXIT(&->a_rwlock); 6204 return (IE_NOMEM); 6205 } 6206 if (anon_copy_ptr(amp->ahp, svd->anon_index, 6207 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) { 6208 anon_release(nahp, btop(amp->size)); 6209 ANON_LOCK_EXIT(&->a_rwlock); 6210 return (IE_NOMEM); 6211 } 6212 anon_release(amp->ahp, btop(amp->size)); 6213 amp->ahp = nahp; 6214 svd->anon_index = 0; 6215 ANON_LOCK_EXIT(&->a_rwlock); 6216 } 6217 } 6218 if (svd->vp != NULL && szc != 0) { 6219 struct vattr va; 6220 u_offset_t eoffpage = svd->offset; 6221 va.va_mask = AT_SIZE; 6222 eoffpage += seg->s_size; 6223 eoffpage = btopr(eoffpage); 6224 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) { 6225 segvn_setpgsz_getattr_err++; 6226 return (EINVAL); 6227 } 6228 if (btopr(va.va_size) < eoffpage) { 6229 segvn_setpgsz_eof_err++; 6230 return (EINVAL); 6231 } 6232 if (amp != NULL) { 6233 /* 6234 * anon_fill_cow_holes() may call VOP_GETPAGE(). 6235 * don't take anon map lock here to avoid holding it 6236 * across VOP_GETPAGE() calls that may call back into 6237 * segvn for klsutering checks. We don't really need 6238 * anon map lock here since it's a private segment and 6239 * we hold as level lock as writers. 6240 */ 6241 if ((err = anon_fill_cow_holes(seg, seg->s_base, 6242 amp->ahp, svd->anon_index, svd->vp, svd->offset, 6243 seg->s_size, szc, svd->prot, svd->vpage, 6244 svd->cred)) != 0) { 6245 return (EINVAL); 6246 } 6247 } 6248 segvn_setvnode_mpss(svd->vp); 6249 } 6250 6251 if (amp != NULL) { 6252 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6253 if (svd->type == MAP_PRIVATE) { 6254 amp->a_szc = szc; 6255 } else if (szc > amp->a_szc) { 6256 amp->a_szc = szc; 6257 } 6258 ANON_LOCK_EXIT(&->a_rwlock); 6259 } 6260 6261 seg->s_szc = szc; 6262 6263 return (0); 6264 } 6265 6266 static int 6267 segvn_clrszc(struct seg *seg) 6268 { 6269 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6270 struct anon_map *amp = svd->amp; 6271 size_t pgsz; 6272 pgcnt_t pages; 6273 int err = 0; 6274 caddr_t a = seg->s_base; 6275 caddr_t ea = a + seg->s_size; 6276 ulong_t an_idx = svd->anon_index; 6277 vnode_t *vp = svd->vp; 6278 struct vpage *vpage = svd->vpage; 6279 page_t *anon_pl[1 + 1], *pp; 6280 struct anon *ap, *oldap; 6281 uint_t prot = svd->prot, vpprot; 6282 int pageflag = 0; 6283 6284 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6285 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 6286 ASSERT(svd->softlockcnt == 0); 6287 6288 if (vp == NULL && amp == NULL) { 6289 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6290 seg->s_szc = 0; 6291 return (0); 6292 } 6293 6294 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6295 ASSERT(svd->amp == NULL); 6296 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6297 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6298 HAT_REGION_TEXT); 6299 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6300 } else if (svd->tr_state == SEGVN_TR_ON) { 6301 ASSERT(svd->amp != NULL); 6302 segvn_textunrepl(seg, 1); 6303 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6304 amp = NULL; 6305 } else { 6306 if (svd->tr_state != SEGVN_TR_OFF) { 6307 ASSERT(svd->tr_state == SEGVN_TR_INIT); 6308 svd->tr_state = SEGVN_TR_OFF; 6309 } 6310 6311 /* 6312 * do HAT_UNLOAD_UNMAP since we are changing the pagesize. 6313 * unload argument is 0 when we are freeing the segment 6314 * and unload was already done. 6315 */ 6316 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size, 6317 HAT_UNLOAD_UNMAP); 6318 } 6319 6320 if (amp == NULL || svd->type == MAP_SHARED) { 6321 seg->s_szc = 0; 6322 return (0); 6323 } 6324 6325 pgsz = page_get_pagesize(seg->s_szc); 6326 pages = btop(pgsz); 6327 6328 /* 6329 * XXX anon rwlock is not really needed because this is a 6330 * private segment and we are writers. 6331 */ 6332 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6333 6334 for (; a < ea; a += pgsz, an_idx += pages) { 6335 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) { 6336 ASSERT(vpage != NULL || svd->pageprot == 0); 6337 if (vpage != NULL) { 6338 ASSERT(sameprot(seg, a, pgsz)); 6339 prot = VPP_PROT(vpage); 6340 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0; 6341 } 6342 if (seg->s_szc != 0) { 6343 ASSERT(vp == NULL || anon_pages(amp->ahp, 6344 an_idx, pages) == pages); 6345 if ((err = anon_map_demotepages(amp, an_idx, 6346 seg, a, prot, vpage, svd->cred)) != 0) { 6347 goto out; 6348 } 6349 } else { 6350 if (oldap->an_refcnt == 1) { 6351 continue; 6352 } 6353 if ((err = anon_getpage(&oldap, &vpprot, 6354 anon_pl, PAGESIZE, seg, a, S_READ, 6355 svd->cred))) { 6356 goto out; 6357 } 6358 if ((pp = anon_private(&ap, seg, a, prot, 6359 anon_pl[0], pageflag, svd->cred)) == NULL) { 6360 err = ENOMEM; 6361 goto out; 6362 } 6363 anon_decref(oldap); 6364 (void) anon_set_ptr(amp->ahp, an_idx, ap, 6365 ANON_SLEEP); 6366 page_unlock(pp); 6367 } 6368 } 6369 vpage = (vpage == NULL) ? NULL : vpage + pages; 6370 } 6371 6372 amp->a_szc = 0; 6373 seg->s_szc = 0; 6374 out: 6375 ANON_LOCK_EXIT(&->a_rwlock); 6376 return (err); 6377 } 6378 6379 static int 6380 segvn_claim_pages( 6381 struct seg *seg, 6382 struct vpage *svp, 6383 u_offset_t off, 6384 ulong_t anon_idx, 6385 uint_t prot) 6386 { 6387 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6388 size_t ppasize = (pgcnt + 1) * sizeof (page_t *); 6389 page_t **ppa; 6390 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6391 struct anon_map *amp = svd->amp; 6392 struct vpage *evp = svp + pgcnt; 6393 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT) 6394 + seg->s_base; 6395 struct anon *ap; 6396 struct vnode *vp = svd->vp; 6397 page_t *pp; 6398 pgcnt_t pg_idx, i; 6399 int err = 0; 6400 anoff_t aoff; 6401 int anon = (amp != NULL) ? 1 : 0; 6402 6403 ASSERT(svd->type == MAP_PRIVATE); 6404 ASSERT(svd->vpage != NULL); 6405 ASSERT(seg->s_szc != 0); 6406 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 6407 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt)); 6408 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT)); 6409 6410 if (VPP_PROT(svp) == prot) 6411 return (1); 6412 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE)) 6413 return (1); 6414 6415 ppa = kmem_alloc(ppasize, KM_SLEEP); 6416 if (anon && vp != NULL) { 6417 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) { 6418 anon = 0; 6419 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt)); 6420 } 6421 ASSERT(!anon || 6422 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt); 6423 } 6424 6425 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) { 6426 if (!VPP_ISPPLOCK(svp)) 6427 continue; 6428 if (anon) { 6429 ap = anon_get_ptr(amp->ahp, anon_idx); 6430 if (ap == NULL) { 6431 panic("segvn_claim_pages: no anon slot"); 6432 } 6433 swap_xlate(ap, &vp, &aoff); 6434 off = (u_offset_t)aoff; 6435 } 6436 ASSERT(vp != NULL); 6437 if ((pp = page_lookup(vp, 6438 (u_offset_t)off, SE_SHARED)) == NULL) { 6439 panic("segvn_claim_pages: no page"); 6440 } 6441 ppa[pg_idx++] = pp; 6442 off += PAGESIZE; 6443 } 6444 6445 if (ppa[0] == NULL) { 6446 kmem_free(ppa, ppasize); 6447 return (1); 6448 } 6449 6450 ASSERT(pg_idx <= pgcnt); 6451 ppa[pg_idx] = NULL; 6452 6453 if (prot & PROT_WRITE) 6454 err = page_addclaim_pages(ppa); 6455 else 6456 err = page_subclaim_pages(ppa); 6457 6458 for (i = 0; i < pg_idx; i++) { 6459 ASSERT(ppa[i] != NULL); 6460 page_unlock(ppa[i]); 6461 } 6462 6463 kmem_free(ppa, ppasize); 6464 return (err); 6465 } 6466 6467 /* 6468 * Returns right (upper address) segment if split occurred. 6469 * If the address is equal to the beginning or end of its segment it returns 6470 * the current segment. 6471 */ 6472 static struct seg * 6473 segvn_split_seg(struct seg *seg, caddr_t addr) 6474 { 6475 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6476 struct seg *nseg; 6477 size_t nsize; 6478 struct segvn_data *nsvd; 6479 6480 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6481 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6482 6483 ASSERT(addr >= seg->s_base); 6484 ASSERT(addr <= seg->s_base + seg->s_size); 6485 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6486 6487 if (addr == seg->s_base || addr == seg->s_base + seg->s_size) 6488 return (seg); 6489 6490 nsize = seg->s_base + seg->s_size - addr; 6491 seg->s_size = addr - seg->s_base; 6492 nseg = seg_alloc(seg->s_as, addr, nsize); 6493 ASSERT(nseg != NULL); 6494 nseg->s_ops = seg->s_ops; 6495 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 6496 nseg->s_data = (void *)nsvd; 6497 nseg->s_szc = seg->s_szc; 6498 *nsvd = *svd; 6499 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6500 nsvd->seg = nseg; 6501 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL); 6502 6503 if (nsvd->vp != NULL) { 6504 VN_HOLD(nsvd->vp); 6505 nsvd->offset = svd->offset + 6506 (uintptr_t)(nseg->s_base - seg->s_base); 6507 if (nsvd->type == MAP_SHARED) 6508 lgrp_shm_policy_init(NULL, nsvd->vp); 6509 } else { 6510 /* 6511 * The offset for an anonymous segment has no signifigance in 6512 * terms of an offset into a file. If we were to use the above 6513 * calculation instead, the structures read out of 6514 * /proc/<pid>/xmap would be more difficult to decipher since 6515 * it would be unclear whether two seemingly contiguous 6516 * prxmap_t structures represented different segments or a 6517 * single segment that had been split up into multiple prxmap_t 6518 * structures (e.g. if some part of the segment had not yet 6519 * been faulted in). 6520 */ 6521 nsvd->offset = 0; 6522 } 6523 6524 ASSERT(svd->softlockcnt == 0); 6525 ASSERT(svd->softlockcnt_sbase == 0); 6526 ASSERT(svd->softlockcnt_send == 0); 6527 crhold(svd->cred); 6528 6529 if (svd->vpage != NULL) { 6530 size_t bytes = vpgtob(seg_pages(seg)); 6531 size_t nbytes = vpgtob(seg_pages(nseg)); 6532 struct vpage *ovpage = svd->vpage; 6533 6534 svd->vpage = kmem_alloc(bytes, KM_SLEEP); 6535 bcopy(ovpage, svd->vpage, bytes); 6536 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 6537 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes); 6538 kmem_free(ovpage, bytes + nbytes); 6539 } 6540 if (svd->amp != NULL && svd->type == MAP_PRIVATE) { 6541 struct anon_map *oamp = svd->amp, *namp; 6542 struct anon_hdr *nahp; 6543 6544 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER); 6545 ASSERT(oamp->refcnt == 1); 6546 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 6547 (void) anon_copy_ptr(oamp->ahp, svd->anon_index, 6548 nahp, 0, btop(seg->s_size), ANON_SLEEP); 6549 6550 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 6551 namp->a_szc = nseg->s_szc; 6552 (void) anon_copy_ptr(oamp->ahp, 6553 svd->anon_index + btop(seg->s_size), 6554 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 6555 anon_release(oamp->ahp, btop(oamp->size)); 6556 oamp->ahp = nahp; 6557 oamp->size = seg->s_size; 6558 svd->anon_index = 0; 6559 nsvd->amp = namp; 6560 nsvd->anon_index = 0; 6561 ANON_LOCK_EXIT(&oamp->a_rwlock); 6562 } else if (svd->amp != NULL) { 6563 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6564 ASSERT(svd->amp == nsvd->amp); 6565 ASSERT(seg->s_szc <= svd->amp->a_szc); 6566 nsvd->anon_index = svd->anon_index + seg_pages(seg); 6567 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt)); 6568 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER); 6569 svd->amp->refcnt++; 6570 ANON_LOCK_EXIT(&svd->amp->a_rwlock); 6571 } 6572 6573 /* 6574 * Split the amount of swap reserved. 6575 */ 6576 if (svd->swresv) { 6577 /* 6578 * For MAP_NORESERVE, only allocate swap reserve for pages 6579 * being used. Other segments get enough to cover whole 6580 * segment. 6581 */ 6582 if (svd->flags & MAP_NORESERVE) { 6583 size_t oswresv; 6584 6585 ASSERT(svd->amp); 6586 oswresv = svd->swresv; 6587 svd->swresv = ptob(anon_pages(svd->amp->ahp, 6588 svd->anon_index, btop(seg->s_size))); 6589 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 6590 nsvd->anon_index, btop(nseg->s_size))); 6591 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 6592 } else { 6593 if (svd->pageswap) { 6594 svd->swresv = segvn_count_swap_by_vpages(seg); 6595 ASSERT(nsvd->swresv >= svd->swresv); 6596 nsvd->swresv -= svd->swresv; 6597 } else { 6598 ASSERT(svd->swresv == seg->s_size + 6599 nseg->s_size); 6600 svd->swresv = seg->s_size; 6601 nsvd->swresv = nseg->s_size; 6602 } 6603 } 6604 } 6605 6606 return (nseg); 6607 } 6608 6609 /* 6610 * called on memory operations (unmap, setprot, setpagesize) for a subset 6611 * of a large page segment to either demote the memory range (SDR_RANGE) 6612 * or the ends (SDR_END) by addr/len. 6613 * 6614 * returns 0 on success. returns errno, including ENOMEM, on failure. 6615 */ 6616 static int 6617 segvn_demote_range( 6618 struct seg *seg, 6619 caddr_t addr, 6620 size_t len, 6621 int flag, 6622 uint_t szcvec) 6623 { 6624 caddr_t eaddr = addr + len; 6625 caddr_t lpgaddr, lpgeaddr; 6626 struct seg *nseg; 6627 struct seg *badseg1 = NULL; 6628 struct seg *badseg2 = NULL; 6629 size_t pgsz; 6630 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6631 int err; 6632 uint_t szc = seg->s_szc; 6633 uint_t tszcvec; 6634 6635 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6636 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6637 ASSERT(szc != 0); 6638 pgsz = page_get_pagesize(szc); 6639 ASSERT(seg->s_base != addr || seg->s_size != len); 6640 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6641 ASSERT(svd->softlockcnt == 0); 6642 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6643 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED)); 6644 6645 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 6646 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr); 6647 if (flag == SDR_RANGE) { 6648 /* demote entire range */ 6649 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6650 (void) segvn_split_seg(nseg, lpgeaddr); 6651 ASSERT(badseg1->s_base == lpgaddr); 6652 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr); 6653 } else if (addr != lpgaddr) { 6654 ASSERT(flag == SDR_END); 6655 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6656 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz && 6657 eaddr < lpgaddr + 2 * pgsz) { 6658 (void) segvn_split_seg(nseg, lpgeaddr); 6659 ASSERT(badseg1->s_base == lpgaddr); 6660 ASSERT(badseg1->s_size == 2 * pgsz); 6661 } else { 6662 nseg = segvn_split_seg(nseg, lpgaddr + pgsz); 6663 ASSERT(badseg1->s_base == lpgaddr); 6664 ASSERT(badseg1->s_size == pgsz); 6665 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) { 6666 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz); 6667 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz); 6668 badseg2 = nseg; 6669 (void) segvn_split_seg(nseg, lpgeaddr); 6670 ASSERT(badseg2->s_base == lpgeaddr - pgsz); 6671 ASSERT(badseg2->s_size == pgsz); 6672 } 6673 } 6674 } else { 6675 ASSERT(flag == SDR_END); 6676 ASSERT(eaddr < lpgeaddr); 6677 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz); 6678 (void) segvn_split_seg(nseg, lpgeaddr); 6679 ASSERT(badseg1->s_base == lpgeaddr - pgsz); 6680 ASSERT(badseg1->s_size == pgsz); 6681 } 6682 6683 ASSERT(badseg1 != NULL); 6684 ASSERT(badseg1->s_szc == szc); 6685 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz || 6686 badseg1->s_size == 2 * pgsz); 6687 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz)); 6688 ASSERT(badseg1->s_size == pgsz || 6689 sameprot(badseg1, badseg1->s_base + pgsz, pgsz)); 6690 if (err = segvn_clrszc(badseg1)) { 6691 return (err); 6692 } 6693 ASSERT(badseg1->s_szc == 0); 6694 6695 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6696 uint_t tszc = highbit(tszcvec) - 1; 6697 caddr_t ta = MAX(addr, badseg1->s_base); 6698 caddr_t te; 6699 size_t tpgsz = page_get_pagesize(tszc); 6700 6701 ASSERT(svd->type == MAP_SHARED); 6702 ASSERT(flag == SDR_END); 6703 ASSERT(tszc < szc && tszc > 0); 6704 6705 if (eaddr > badseg1->s_base + badseg1->s_size) { 6706 te = badseg1->s_base + badseg1->s_size; 6707 } else { 6708 te = eaddr; 6709 } 6710 6711 ASSERT(ta <= te); 6712 badseg1->s_szc = tszc; 6713 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) { 6714 if (badseg2 != NULL) { 6715 err = segvn_demote_range(badseg1, ta, te - ta, 6716 SDR_END, tszcvec); 6717 if (err != 0) { 6718 return (err); 6719 } 6720 } else { 6721 return (segvn_demote_range(badseg1, ta, 6722 te - ta, SDR_END, tszcvec)); 6723 } 6724 } 6725 } 6726 6727 if (badseg2 == NULL) 6728 return (0); 6729 ASSERT(badseg2->s_szc == szc); 6730 ASSERT(badseg2->s_size == pgsz); 6731 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size)); 6732 if (err = segvn_clrszc(badseg2)) { 6733 return (err); 6734 } 6735 ASSERT(badseg2->s_szc == 0); 6736 6737 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6738 uint_t tszc = highbit(tszcvec) - 1; 6739 size_t tpgsz = page_get_pagesize(tszc); 6740 6741 ASSERT(svd->type == MAP_SHARED); 6742 ASSERT(flag == SDR_END); 6743 ASSERT(tszc < szc && tszc > 0); 6744 ASSERT(badseg2->s_base > addr); 6745 ASSERT(eaddr > badseg2->s_base); 6746 ASSERT(eaddr < badseg2->s_base + badseg2->s_size); 6747 6748 badseg2->s_szc = tszc; 6749 if (!IS_P2ALIGNED(eaddr, tpgsz)) { 6750 return (segvn_demote_range(badseg2, badseg2->s_base, 6751 eaddr - badseg2->s_base, SDR_END, tszcvec)); 6752 } 6753 } 6754 6755 return (0); 6756 } 6757 6758 static int 6759 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 6760 { 6761 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6762 struct vpage *vp, *evp; 6763 6764 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6765 6766 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6767 /* 6768 * If segment protection can be used, simply check against them. 6769 */ 6770 if (svd->pageprot == 0) { 6771 int err; 6772 6773 err = ((svd->prot & prot) != prot) ? EACCES : 0; 6774 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6775 return (err); 6776 } 6777 6778 /* 6779 * Have to check down to the vpage level. 6780 */ 6781 evp = &svd->vpage[seg_page(seg, addr + len)]; 6782 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) { 6783 if ((VPP_PROT(vp) & prot) != prot) { 6784 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6785 return (EACCES); 6786 } 6787 } 6788 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6789 return (0); 6790 } 6791 6792 static int 6793 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 6794 { 6795 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6796 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1; 6797 6798 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6799 6800 if (pgno != 0) { 6801 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6802 if (svd->pageprot == 0) { 6803 do { 6804 protv[--pgno] = svd->prot; 6805 } while (pgno != 0); 6806 } else { 6807 size_t pgoff = seg_page(seg, addr); 6808 6809 do { 6810 pgno--; 6811 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]); 6812 } while (pgno != 0); 6813 } 6814 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6815 } 6816 return (0); 6817 } 6818 6819 static u_offset_t 6820 segvn_getoffset(struct seg *seg, caddr_t addr) 6821 { 6822 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6823 6824 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6825 6826 return (svd->offset + (uintptr_t)(addr - seg->s_base)); 6827 } 6828 6829 /*ARGSUSED*/ 6830 static int 6831 segvn_gettype(struct seg *seg, caddr_t addr) 6832 { 6833 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6834 6835 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6836 6837 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT | 6838 MAP_INITDATA))); 6839 } 6840 6841 /*ARGSUSED*/ 6842 static int 6843 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 6844 { 6845 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6846 6847 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6848 6849 *vpp = svd->vp; 6850 return (0); 6851 } 6852 6853 /* 6854 * Check to see if it makes sense to do kluster/read ahead to 6855 * addr + delta relative to the mapping at addr. We assume here 6856 * that delta is a signed PAGESIZE'd multiple (which can be negative). 6857 * 6858 * For segvn, we currently "approve" of the action if we are 6859 * still in the segment and it maps from the same vp/off, 6860 * or if the advice stored in segvn_data or vpages allows it. 6861 * Currently, klustering is not allowed only if MADV_RANDOM is set. 6862 */ 6863 static int 6864 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 6865 { 6866 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6867 struct anon *oap, *ap; 6868 ssize_t pd; 6869 size_t page; 6870 struct vnode *vp1, *vp2; 6871 u_offset_t off1, off2; 6872 struct anon_map *amp; 6873 6874 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6875 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6876 SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 6877 6878 if (addr + delta < seg->s_base || 6879 addr + delta >= (seg->s_base + seg->s_size)) 6880 return (-1); /* exceeded segment bounds */ 6881 6882 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */ 6883 page = seg_page(seg, addr); 6884 6885 /* 6886 * Check to see if either of the pages addr or addr + delta 6887 * have advice set that prevents klustering (if MADV_RANDOM advice 6888 * is set for entire segment, or MADV_SEQUENTIAL is set and delta 6889 * is negative). 6890 */ 6891 if (svd->advice == MADV_RANDOM || 6892 svd->advice == MADV_SEQUENTIAL && delta < 0) 6893 return (-1); 6894 else if (svd->pageadvice && svd->vpage) { 6895 struct vpage *bvpp, *evpp; 6896 6897 bvpp = &svd->vpage[page]; 6898 evpp = &svd->vpage[page + pd]; 6899 if (VPP_ADVICE(bvpp) == MADV_RANDOM || 6900 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0) 6901 return (-1); 6902 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) && 6903 VPP_ADVICE(evpp) == MADV_RANDOM) 6904 return (-1); 6905 } 6906 6907 if (svd->type == MAP_SHARED) 6908 return (0); /* shared mapping - all ok */ 6909 6910 if ((amp = svd->amp) == NULL) 6911 return (0); /* off original vnode */ 6912 6913 page += svd->anon_index; 6914 6915 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 6916 6917 oap = anon_get_ptr(amp->ahp, page); 6918 ap = anon_get_ptr(amp->ahp, page + pd); 6919 6920 ANON_LOCK_EXIT(&->a_rwlock); 6921 6922 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) { 6923 return (-1); /* one with and one without an anon */ 6924 } 6925 6926 if (oap == NULL) { /* implies that ap == NULL */ 6927 return (0); /* off original vnode */ 6928 } 6929 6930 /* 6931 * Now we know we have two anon pointers - check to 6932 * see if they happen to be properly allocated. 6933 */ 6934 6935 /* 6936 * XXX We cheat here and don't lock the anon slots. We can't because 6937 * we may have been called from the anon layer which might already 6938 * have locked them. We are holding a refcnt on the slots so they 6939 * can't disappear. The worst that will happen is we'll get the wrong 6940 * names (vp, off) for the slots and make a poor klustering decision. 6941 */ 6942 swap_xlate(ap, &vp1, &off1); 6943 swap_xlate(oap, &vp2, &off2); 6944 6945 6946 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta) 6947 return (-1); 6948 return (0); 6949 } 6950 6951 /* 6952 * Swap the pages of seg out to secondary storage, returning the 6953 * number of bytes of storage freed. 6954 * 6955 * The basic idea is first to unload all translations and then to call 6956 * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the 6957 * swap device. Pages to which other segments have mappings will remain 6958 * mapped and won't be swapped. Our caller (as_swapout) has already 6959 * performed the unloading step. 6960 * 6961 * The value returned is intended to correlate well with the process's 6962 * memory requirements. However, there are some caveats: 6963 * 1) When given a shared segment as argument, this routine will 6964 * only succeed in swapping out pages for the last sharer of the 6965 * segment. (Previous callers will only have decremented mapping 6966 * reference counts.) 6967 * 2) We assume that the hat layer maintains a large enough translation 6968 * cache to capture process reference patterns. 6969 */ 6970 static size_t 6971 segvn_swapout(struct seg *seg) 6972 { 6973 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6974 struct anon_map *amp; 6975 pgcnt_t pgcnt = 0; 6976 pgcnt_t npages; 6977 pgcnt_t page; 6978 ulong_t anon_index; 6979 6980 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6981 6982 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6983 /* 6984 * Find pages unmapped by our caller and force them 6985 * out to the virtual swap device. 6986 */ 6987 if ((amp = svd->amp) != NULL) 6988 anon_index = svd->anon_index; 6989 npages = seg->s_size >> PAGESHIFT; 6990 for (page = 0; page < npages; page++) { 6991 page_t *pp; 6992 struct anon *ap; 6993 struct vnode *vp; 6994 u_offset_t off; 6995 anon_sync_obj_t cookie; 6996 6997 /* 6998 * Obtain <vp, off> pair for the page, then look it up. 6999 * 7000 * Note that this code is willing to consider regular 7001 * pages as well as anon pages. Is this appropriate here? 7002 */ 7003 ap = NULL; 7004 if (amp != NULL) { 7005 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7006 if (anon_array_try_enter(amp, anon_index + page, 7007 &cookie)) { 7008 ANON_LOCK_EXIT(&->a_rwlock); 7009 continue; 7010 } 7011 ap = anon_get_ptr(amp->ahp, anon_index + page); 7012 if (ap != NULL) { 7013 swap_xlate(ap, &vp, &off); 7014 } else { 7015 vp = svd->vp; 7016 off = svd->offset + ptob(page); 7017 } 7018 anon_array_exit(&cookie); 7019 ANON_LOCK_EXIT(&->a_rwlock); 7020 } else { 7021 vp = svd->vp; 7022 off = svd->offset + ptob(page); 7023 } 7024 if (vp == NULL) { /* untouched zfod page */ 7025 ASSERT(ap == NULL); 7026 continue; 7027 } 7028 7029 pp = page_lookup_nowait(vp, off, SE_SHARED); 7030 if (pp == NULL) 7031 continue; 7032 7033 7034 /* 7035 * Examine the page to see whether it can be tossed out, 7036 * keeping track of how many we've found. 7037 */ 7038 if (!page_tryupgrade(pp)) { 7039 /* 7040 * If the page has an i/o lock and no mappings, 7041 * it's very likely that the page is being 7042 * written out as a result of klustering. 7043 * Assume this is so and take credit for it here. 7044 */ 7045 if (!page_io_trylock(pp)) { 7046 if (!hat_page_is_mapped(pp)) 7047 pgcnt++; 7048 } else { 7049 page_io_unlock(pp); 7050 } 7051 page_unlock(pp); 7052 continue; 7053 } 7054 ASSERT(!page_iolock_assert(pp)); 7055 7056 7057 /* 7058 * Skip if page is locked or has mappings. 7059 * We don't need the page_struct_lock to look at lckcnt 7060 * and cowcnt because the page is exclusive locked. 7061 */ 7062 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 7063 hat_page_is_mapped(pp)) { 7064 page_unlock(pp); 7065 continue; 7066 } 7067 7068 /* 7069 * dispose skips large pages so try to demote first. 7070 */ 7071 if (pp->p_szc != 0 && !page_try_demote_pages(pp)) { 7072 page_unlock(pp); 7073 /* 7074 * XXX should skip the remaining page_t's of this 7075 * large page. 7076 */ 7077 continue; 7078 } 7079 7080 ASSERT(pp->p_szc == 0); 7081 7082 /* 7083 * No longer mapped -- we can toss it out. How 7084 * we do so depends on whether or not it's dirty. 7085 */ 7086 if (hat_ismod(pp) && pp->p_vnode) { 7087 /* 7088 * We must clean the page before it can be 7089 * freed. Setting B_FREE will cause pvn_done 7090 * to free the page when the i/o completes. 7091 * XXX: This also causes it to be accounted 7092 * as a pageout instead of a swap: need 7093 * B_SWAPOUT bit to use instead of B_FREE. 7094 * 7095 * Hold the vnode before releasing the page lock 7096 * to prevent it from being freed and re-used by 7097 * some other thread. 7098 */ 7099 VN_HOLD(vp); 7100 page_unlock(pp); 7101 7102 /* 7103 * Queue all i/o requests for the pageout thread 7104 * to avoid saturating the pageout devices. 7105 */ 7106 if (!queue_io_request(vp, off)) 7107 VN_RELE(vp); 7108 } else { 7109 /* 7110 * The page was clean, free it. 7111 * 7112 * XXX: Can we ever encounter modified pages 7113 * with no associated vnode here? 7114 */ 7115 ASSERT(pp->p_vnode != NULL); 7116 /*LINTED: constant in conditional context*/ 7117 VN_DISPOSE(pp, B_FREE, 0, kcred); 7118 } 7119 7120 /* 7121 * Credit now even if i/o is in progress. 7122 */ 7123 pgcnt++; 7124 } 7125 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7126 7127 /* 7128 * Wakeup pageout to initiate i/o on all queued requests. 7129 */ 7130 cv_signal_pageout(); 7131 return (ptob(pgcnt)); 7132 } 7133 7134 /* 7135 * Synchronize primary storage cache with real object in virtual memory. 7136 * 7137 * XXX - Anonymous pages should not be sync'ed out at all. 7138 */ 7139 static int 7140 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) 7141 { 7142 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7143 struct vpage *vpp; 7144 page_t *pp; 7145 u_offset_t offset; 7146 struct vnode *vp; 7147 u_offset_t off; 7148 caddr_t eaddr; 7149 int bflags; 7150 int err = 0; 7151 int segtype; 7152 int pageprot; 7153 int prot; 7154 ulong_t anon_index; 7155 struct anon_map *amp; 7156 struct anon *ap; 7157 anon_sync_obj_t cookie; 7158 7159 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7160 7161 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7162 7163 if (svd->softlockcnt > 0) { 7164 /* 7165 * If this is shared segment non 0 softlockcnt 7166 * means locked pages are still in use. 7167 */ 7168 if (svd->type == MAP_SHARED) { 7169 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7170 return (EAGAIN); 7171 } 7172 7173 /* 7174 * flush all pages from seg cache 7175 * otherwise we may deadlock in swap_putpage 7176 * for B_INVAL page (4175402). 7177 * 7178 * Even if we grab segvn WRITER's lock 7179 * here, there might be another thread which could've 7180 * successfully performed lookup/insert just before 7181 * we acquired the lock here. So, grabbing either 7182 * lock here is of not much use. Until we devise 7183 * a strategy at upper layers to solve the 7184 * synchronization issues completely, we expect 7185 * applications to handle this appropriately. 7186 */ 7187 segvn_purge(seg); 7188 if (svd->softlockcnt > 0) { 7189 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7190 return (EAGAIN); 7191 } 7192 } else if (svd->type == MAP_SHARED && svd->amp != NULL && 7193 svd->amp->a_softlockcnt > 0) { 7194 /* 7195 * Try to purge this amp's entries from pcache. It will 7196 * succeed only if other segments that share the amp have no 7197 * outstanding softlock's. 7198 */ 7199 segvn_purge(seg); 7200 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) { 7201 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7202 return (EAGAIN); 7203 } 7204 } 7205 7206 vpp = svd->vpage; 7207 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7208 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) | 7209 ((flags & MS_INVALIDATE) ? B_INVAL : 0); 7210 7211 if (attr) { 7212 pageprot = attr & ~(SHARED|PRIVATE); 7213 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE; 7214 7215 /* 7216 * We are done if the segment types don't match 7217 * or if we have segment level protections and 7218 * they don't match. 7219 */ 7220 if (svd->type != segtype) { 7221 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7222 return (0); 7223 } 7224 if (vpp == NULL) { 7225 if (svd->prot != pageprot) { 7226 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7227 return (0); 7228 } 7229 prot = svd->prot; 7230 } else 7231 vpp = &svd->vpage[seg_page(seg, addr)]; 7232 7233 } else if (svd->vp && svd->amp == NULL && 7234 (flags & MS_INVALIDATE) == 0) { 7235 7236 /* 7237 * No attributes, no anonymous pages and MS_INVALIDATE flag 7238 * is not on, just use one big request. 7239 */ 7240 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len, 7241 bflags, svd->cred, NULL); 7242 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7243 return (err); 7244 } 7245 7246 if ((amp = svd->amp) != NULL) 7247 anon_index = svd->anon_index + seg_page(seg, addr); 7248 7249 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) { 7250 ap = NULL; 7251 if (amp != NULL) { 7252 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7253 anon_array_enter(amp, anon_index, &cookie); 7254 ap = anon_get_ptr(amp->ahp, anon_index++); 7255 if (ap != NULL) { 7256 swap_xlate(ap, &vp, &off); 7257 } else { 7258 vp = svd->vp; 7259 off = offset; 7260 } 7261 anon_array_exit(&cookie); 7262 ANON_LOCK_EXIT(&->a_rwlock); 7263 } else { 7264 vp = svd->vp; 7265 off = offset; 7266 } 7267 offset += PAGESIZE; 7268 7269 if (vp == NULL) /* untouched zfod page */ 7270 continue; 7271 7272 if (attr) { 7273 if (vpp) { 7274 prot = VPP_PROT(vpp); 7275 vpp++; 7276 } 7277 if (prot != pageprot) { 7278 continue; 7279 } 7280 } 7281 7282 /* 7283 * See if any of these pages are locked -- if so, then we 7284 * will have to truncate an invalidate request at the first 7285 * locked one. We don't need the page_struct_lock to test 7286 * as this is only advisory; even if we acquire it someone 7287 * might race in and lock the page after we unlock and before 7288 * we do the PUTPAGE, then PUTPAGE simply does nothing. 7289 */ 7290 if (flags & MS_INVALIDATE) { 7291 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) { 7292 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 7293 page_unlock(pp); 7294 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7295 return (EBUSY); 7296 } 7297 if (ap != NULL && pp->p_szc != 0 && 7298 page_tryupgrade(pp)) { 7299 if (pp->p_lckcnt == 0 && 7300 pp->p_cowcnt == 0) { 7301 /* 7302 * swapfs VN_DISPOSE() won't 7303 * invalidate large pages. 7304 * Attempt to demote. 7305 * XXX can't help it if it 7306 * fails. But for swapfs 7307 * pages it is no big deal. 7308 */ 7309 (void) page_try_demote_pages( 7310 pp); 7311 } 7312 } 7313 page_unlock(pp); 7314 } 7315 } else if (svd->type == MAP_SHARED && amp != NULL) { 7316 /* 7317 * Avoid writing out to disk ISM's large pages 7318 * because segspt_free_pages() relies on NULL an_pvp 7319 * of anon slots of such pages. 7320 */ 7321 7322 ASSERT(svd->vp == NULL); 7323 /* 7324 * swapfs uses page_lookup_nowait if not freeing or 7325 * invalidating and skips a page if 7326 * page_lookup_nowait returns NULL. 7327 */ 7328 pp = page_lookup_nowait(vp, off, SE_SHARED); 7329 if (pp == NULL) { 7330 continue; 7331 } 7332 if (pp->p_szc != 0) { 7333 page_unlock(pp); 7334 continue; 7335 } 7336 7337 /* 7338 * Note ISM pages are created large so (vp, off)'s 7339 * page cannot suddenly become large after we unlock 7340 * pp. 7341 */ 7342 page_unlock(pp); 7343 } 7344 /* 7345 * XXX - Should ultimately try to kluster 7346 * calls to VOP_PUTPAGE() for performance. 7347 */ 7348 VN_HOLD(vp); 7349 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 7350 bflags, svd->cred, NULL); 7351 VN_RELE(vp); 7352 if (err) 7353 break; 7354 } 7355 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7356 return (err); 7357 } 7358 7359 /* 7360 * Determine if we have data corresponding to pages in the 7361 * primary storage virtual memory cache (i.e., "in core"). 7362 */ 7363 static size_t 7364 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec) 7365 { 7366 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7367 struct vnode *vp, *avp; 7368 u_offset_t offset, aoffset; 7369 size_t p, ep; 7370 int ret; 7371 struct vpage *vpp; 7372 page_t *pp; 7373 uint_t start; 7374 struct anon_map *amp; /* XXX - for locknest */ 7375 struct anon *ap; 7376 uint_t attr; 7377 anon_sync_obj_t cookie; 7378 7379 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7380 7381 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7382 if (svd->amp == NULL && svd->vp == NULL) { 7383 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7384 bzero(vec, btopr(len)); 7385 return (len); /* no anonymous pages created yet */ 7386 } 7387 7388 p = seg_page(seg, addr); 7389 ep = seg_page(seg, addr + len); 7390 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0; 7391 7392 amp = svd->amp; 7393 for (; p < ep; p++, addr += PAGESIZE) { 7394 vpp = (svd->vpage) ? &svd->vpage[p]: NULL; 7395 ret = start; 7396 ap = NULL; 7397 avp = NULL; 7398 /* Grab the vnode/offset for the anon slot */ 7399 if (amp != NULL) { 7400 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7401 anon_array_enter(amp, svd->anon_index + p, &cookie); 7402 ap = anon_get_ptr(amp->ahp, svd->anon_index + p); 7403 if (ap != NULL) { 7404 swap_xlate(ap, &avp, &aoffset); 7405 } 7406 anon_array_exit(&cookie); 7407 ANON_LOCK_EXIT(&->a_rwlock); 7408 } 7409 if ((avp != NULL) && page_exists(avp, aoffset)) { 7410 /* A page exists for the anon slot */ 7411 ret |= SEG_PAGE_INCORE; 7412 7413 /* 7414 * If page is mapped and writable 7415 */ 7416 attr = (uint_t)0; 7417 if ((hat_getattr(seg->s_as->a_hat, addr, 7418 &attr) != -1) && (attr & PROT_WRITE)) { 7419 ret |= SEG_PAGE_ANON; 7420 } 7421 /* 7422 * Don't get page_struct lock for lckcnt and cowcnt, 7423 * since this is purely advisory. 7424 */ 7425 if ((pp = page_lookup_nowait(avp, aoffset, 7426 SE_SHARED)) != NULL) { 7427 if (pp->p_lckcnt) 7428 ret |= SEG_PAGE_SOFTLOCK; 7429 if (pp->p_cowcnt) 7430 ret |= SEG_PAGE_HASCOW; 7431 page_unlock(pp); 7432 } 7433 } 7434 7435 /* Gather vnode statistics */ 7436 vp = svd->vp; 7437 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7438 7439 if (vp != NULL) { 7440 /* 7441 * Try to obtain a "shared" lock on the page 7442 * without blocking. If this fails, determine 7443 * if the page is in memory. 7444 */ 7445 pp = page_lookup_nowait(vp, offset, SE_SHARED); 7446 if ((pp == NULL) && (page_exists(vp, offset))) { 7447 /* Page is incore, and is named */ 7448 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7449 } 7450 /* 7451 * Don't get page_struct lock for lckcnt and cowcnt, 7452 * since this is purely advisory. 7453 */ 7454 if (pp != NULL) { 7455 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7456 if (pp->p_lckcnt) 7457 ret |= SEG_PAGE_SOFTLOCK; 7458 if (pp->p_cowcnt) 7459 ret |= SEG_PAGE_HASCOW; 7460 page_unlock(pp); 7461 } 7462 } 7463 7464 /* Gather virtual page information */ 7465 if (vpp) { 7466 if (VPP_ISPPLOCK(vpp)) 7467 ret |= SEG_PAGE_LOCKED; 7468 vpp++; 7469 } 7470 7471 *vec++ = (char)ret; 7472 } 7473 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7474 return (len); 7475 } 7476 7477 /* 7478 * Statement for p_cowcnts/p_lckcnts. 7479 * 7480 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region 7481 * irrespective of the following factors or anything else: 7482 * 7483 * (1) anon slots are populated or not 7484 * (2) cow is broken or not 7485 * (3) refcnt on ap is 1 or greater than 1 7486 * 7487 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock 7488 * and munlock. 7489 * 7490 * 7491 * Handling p_cowcnts/p_lckcnts during copy-on-write fault: 7492 * 7493 * if vpage has PROT_WRITE 7494 * transfer cowcnt on the oldpage -> cowcnt on the newpage 7495 * else 7496 * transfer lckcnt on the oldpage -> lckcnt on the newpage 7497 * 7498 * During copy-on-write, decrement p_cowcnt on the oldpage and increment 7499 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE. 7500 * 7501 * We may also break COW if softlocking on read access in the physio case. 7502 * In this case, vpage may not have PROT_WRITE. So, we need to decrement 7503 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the 7504 * vpage doesn't have PROT_WRITE. 7505 * 7506 * 7507 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region: 7508 * 7509 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and 7510 * increment p_lckcnt by calling page_subclaim() which takes care of 7511 * availrmem accounting and p_lckcnt overflow. 7512 * 7513 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and 7514 * increment p_cowcnt by calling page_addclaim() which takes care of 7515 * availrmem availability and p_cowcnt overflow. 7516 */ 7517 7518 /* 7519 * Lock down (or unlock) pages mapped by this segment. 7520 * 7521 * XXX only creates PAGESIZE pages if anon slots are not initialized. 7522 * At fault time they will be relocated into larger pages. 7523 */ 7524 static int 7525 segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 7526 int attr, int op, ulong_t *lockmap, size_t pos) 7527 { 7528 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7529 struct vpage *vpp; 7530 struct vpage *evp; 7531 page_t *pp; 7532 u_offset_t offset; 7533 u_offset_t off; 7534 int segtype; 7535 int pageprot; 7536 int claim; 7537 struct vnode *vp; 7538 ulong_t anon_index; 7539 struct anon_map *amp; 7540 struct anon *ap; 7541 struct vattr va; 7542 anon_sync_obj_t cookie; 7543 struct kshmid *sp = NULL; 7544 struct proc *p = curproc; 7545 kproject_t *proj = NULL; 7546 int chargeproc = 1; 7547 size_t locked_bytes = 0; 7548 size_t unlocked_bytes = 0; 7549 int err = 0; 7550 7551 /* 7552 * Hold write lock on address space because may split or concatenate 7553 * segments 7554 */ 7555 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7556 7557 /* 7558 * If this is a shm, use shm's project and zone, else use 7559 * project and zone of calling process 7560 */ 7561 7562 /* Determine if this segment backs a sysV shm */ 7563 if (svd->amp != NULL && svd->amp->a_sp != NULL) { 7564 ASSERT(svd->type == MAP_SHARED); 7565 ASSERT(svd->tr_state == SEGVN_TR_OFF); 7566 sp = svd->amp->a_sp; 7567 proj = sp->shm_perm.ipc_proj; 7568 chargeproc = 0; 7569 } 7570 7571 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 7572 if (attr) { 7573 pageprot = attr & ~(SHARED|PRIVATE); 7574 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE; 7575 7576 /* 7577 * We are done if the segment types don't match 7578 * or if we have segment level protections and 7579 * they don't match. 7580 */ 7581 if (svd->type != segtype) { 7582 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7583 return (0); 7584 } 7585 if (svd->pageprot == 0 && svd->prot != pageprot) { 7586 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7587 return (0); 7588 } 7589 } 7590 7591 if (op == MC_LOCK) { 7592 if (svd->tr_state == SEGVN_TR_INIT) { 7593 svd->tr_state = SEGVN_TR_OFF; 7594 } else if (svd->tr_state == SEGVN_TR_ON) { 7595 ASSERT(svd->amp != NULL); 7596 segvn_textunrepl(seg, 0); 7597 ASSERT(svd->amp == NULL && 7598 svd->tr_state == SEGVN_TR_OFF); 7599 } 7600 } 7601 7602 /* 7603 * If we're locking, then we must create a vpage structure if 7604 * none exists. If we're unlocking, then check to see if there 7605 * is a vpage -- if not, then we could not have locked anything. 7606 */ 7607 7608 if ((vpp = svd->vpage) == NULL) { 7609 if (op == MC_LOCK) 7610 segvn_vpage(seg); 7611 else { 7612 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7613 return (0); 7614 } 7615 } 7616 7617 /* 7618 * The anonymous data vector (i.e., previously 7619 * unreferenced mapping to swap space) can be allocated 7620 * by lazily testing for its existence. 7621 */ 7622 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) { 7623 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 7624 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 7625 svd->amp->a_szc = seg->s_szc; 7626 } 7627 7628 if ((amp = svd->amp) != NULL) { 7629 anon_index = svd->anon_index + seg_page(seg, addr); 7630 } 7631 7632 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7633 evp = &svd->vpage[seg_page(seg, addr + len)]; 7634 7635 if (sp != NULL) 7636 mutex_enter(&sp->shm_mlock); 7637 7638 /* determine number of unlocked bytes in range for lock operation */ 7639 if (op == MC_LOCK) { 7640 7641 if (sp == NULL) { 7642 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7643 vpp++) { 7644 if (!VPP_ISPPLOCK(vpp)) 7645 unlocked_bytes += PAGESIZE; 7646 } 7647 } else { 7648 ulong_t i_idx, i_edx; 7649 anon_sync_obj_t i_cookie; 7650 struct anon *i_ap; 7651 struct vnode *i_vp; 7652 u_offset_t i_off; 7653 7654 /* Only count sysV pages once for locked memory */ 7655 i_edx = svd->anon_index + seg_page(seg, addr + len); 7656 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7657 for (i_idx = anon_index; i_idx < i_edx; i_idx++) { 7658 anon_array_enter(amp, i_idx, &i_cookie); 7659 i_ap = anon_get_ptr(amp->ahp, i_idx); 7660 if (i_ap == NULL) { 7661 unlocked_bytes += PAGESIZE; 7662 anon_array_exit(&i_cookie); 7663 continue; 7664 } 7665 swap_xlate(i_ap, &i_vp, &i_off); 7666 anon_array_exit(&i_cookie); 7667 pp = page_lookup(i_vp, i_off, SE_SHARED); 7668 if (pp == NULL) { 7669 unlocked_bytes += PAGESIZE; 7670 continue; 7671 } else if (pp->p_lckcnt == 0) 7672 unlocked_bytes += PAGESIZE; 7673 page_unlock(pp); 7674 } 7675 ANON_LOCK_EXIT(&->a_rwlock); 7676 } 7677 7678 mutex_enter(&p->p_lock); 7679 err = rctl_incr_locked_mem(p, proj, unlocked_bytes, 7680 chargeproc); 7681 mutex_exit(&p->p_lock); 7682 7683 if (err) { 7684 if (sp != NULL) 7685 mutex_exit(&sp->shm_mlock); 7686 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7687 return (err); 7688 } 7689 } 7690 /* 7691 * Loop over all pages in the range. Process if we're locking and 7692 * page has not already been locked in this mapping; or if we're 7693 * unlocking and the page has been locked. 7694 */ 7695 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7696 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) { 7697 if ((attr == 0 || VPP_PROT(vpp) == pageprot) && 7698 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) || 7699 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) { 7700 7701 if (amp != NULL) 7702 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7703 /* 7704 * If this isn't a MAP_NORESERVE segment and 7705 * we're locking, allocate anon slots if they 7706 * don't exist. The page is brought in later on. 7707 */ 7708 if (op == MC_LOCK && svd->vp == NULL && 7709 ((svd->flags & MAP_NORESERVE) == 0) && 7710 amp != NULL && 7711 ((ap = anon_get_ptr(amp->ahp, anon_index)) 7712 == NULL)) { 7713 anon_array_enter(amp, anon_index, &cookie); 7714 7715 if ((ap = anon_get_ptr(amp->ahp, 7716 anon_index)) == NULL) { 7717 pp = anon_zero(seg, addr, &ap, 7718 svd->cred); 7719 if (pp == NULL) { 7720 anon_array_exit(&cookie); 7721 ANON_LOCK_EXIT(&->a_rwlock); 7722 err = ENOMEM; 7723 goto out; 7724 } 7725 ASSERT(anon_get_ptr(amp->ahp, 7726 anon_index) == NULL); 7727 (void) anon_set_ptr(amp->ahp, 7728 anon_index, ap, ANON_SLEEP); 7729 page_unlock(pp); 7730 } 7731 anon_array_exit(&cookie); 7732 } 7733 7734 /* 7735 * Get name for page, accounting for 7736 * existence of private copy. 7737 */ 7738 ap = NULL; 7739 if (amp != NULL) { 7740 anon_array_enter(amp, anon_index, &cookie); 7741 ap = anon_get_ptr(amp->ahp, anon_index); 7742 if (ap != NULL) { 7743 swap_xlate(ap, &vp, &off); 7744 } else { 7745 if (svd->vp == NULL && 7746 (svd->flags & MAP_NORESERVE)) { 7747 anon_array_exit(&cookie); 7748 ANON_LOCK_EXIT(&->a_rwlock); 7749 continue; 7750 } 7751 vp = svd->vp; 7752 off = offset; 7753 } 7754 if (op != MC_LOCK || ap == NULL) { 7755 anon_array_exit(&cookie); 7756 ANON_LOCK_EXIT(&->a_rwlock); 7757 } 7758 } else { 7759 vp = svd->vp; 7760 off = offset; 7761 } 7762 7763 /* 7764 * Get page frame. It's ok if the page is 7765 * not available when we're unlocking, as this 7766 * may simply mean that a page we locked got 7767 * truncated out of existence after we locked it. 7768 * 7769 * Invoke VOP_GETPAGE() to obtain the page struct 7770 * since we may need to read it from disk if its 7771 * been paged out. 7772 */ 7773 if (op != MC_LOCK) 7774 pp = page_lookup(vp, off, SE_SHARED); 7775 else { 7776 page_t *pl[1 + 1]; 7777 int error; 7778 7779 ASSERT(vp != NULL); 7780 7781 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 7782 (uint_t *)NULL, pl, PAGESIZE, seg, addr, 7783 S_OTHER, svd->cred, NULL); 7784 7785 if (error && ap != NULL) { 7786 anon_array_exit(&cookie); 7787 ANON_LOCK_EXIT(&->a_rwlock); 7788 } 7789 7790 /* 7791 * If the error is EDEADLK then we must bounce 7792 * up and drop all vm subsystem locks and then 7793 * retry the operation later 7794 * This behavior is a temporary measure because 7795 * ufs/sds logging is badly designed and will 7796 * deadlock if we don't allow this bounce to 7797 * happen. The real solution is to re-design 7798 * the logging code to work properly. See bug 7799 * 4125102 for details of the problem. 7800 */ 7801 if (error == EDEADLK) { 7802 err = error; 7803 goto out; 7804 } 7805 /* 7806 * Quit if we fail to fault in the page. Treat 7807 * the failure as an error, unless the addr 7808 * is mapped beyond the end of a file. 7809 */ 7810 if (error && svd->vp) { 7811 va.va_mask = AT_SIZE; 7812 if (VOP_GETATTR(svd->vp, &va, 0, 7813 svd->cred, NULL) != 0) { 7814 err = EIO; 7815 goto out; 7816 } 7817 if (btopr(va.va_size) >= 7818 btopr(off + 1)) { 7819 err = EIO; 7820 goto out; 7821 } 7822 goto out; 7823 7824 } else if (error) { 7825 err = EIO; 7826 goto out; 7827 } 7828 pp = pl[0]; 7829 ASSERT(pp != NULL); 7830 } 7831 7832 /* 7833 * See Statement at the beginning of this routine. 7834 * 7835 * claim is always set if MAP_PRIVATE and PROT_WRITE 7836 * irrespective of following factors: 7837 * 7838 * (1) anon slots are populated or not 7839 * (2) cow is broken or not 7840 * (3) refcnt on ap is 1 or greater than 1 7841 * 7842 * See 4140683 for details 7843 */ 7844 claim = ((VPP_PROT(vpp) & PROT_WRITE) && 7845 (svd->type == MAP_PRIVATE)); 7846 7847 /* 7848 * Perform page-level operation appropriate to 7849 * operation. If locking, undo the SOFTLOCK 7850 * performed to bring the page into memory 7851 * after setting the lock. If unlocking, 7852 * and no page was found, account for the claim 7853 * separately. 7854 */ 7855 if (op == MC_LOCK) { 7856 int ret = 1; /* Assume success */ 7857 7858 ASSERT(!VPP_ISPPLOCK(vpp)); 7859 7860 ret = page_pp_lock(pp, claim, 0); 7861 if (ap != NULL) { 7862 if (ap->an_pvp != NULL) { 7863 anon_swap_free(ap, pp); 7864 } 7865 anon_array_exit(&cookie); 7866 ANON_LOCK_EXIT(&->a_rwlock); 7867 } 7868 if (ret == 0) { 7869 /* locking page failed */ 7870 page_unlock(pp); 7871 err = EAGAIN; 7872 goto out; 7873 } 7874 VPP_SETPPLOCK(vpp); 7875 if (sp != NULL) { 7876 if (pp->p_lckcnt == 1) 7877 locked_bytes += PAGESIZE; 7878 } else 7879 locked_bytes += PAGESIZE; 7880 7881 if (lockmap != (ulong_t *)NULL) 7882 BT_SET(lockmap, pos); 7883 7884 page_unlock(pp); 7885 } else { 7886 ASSERT(VPP_ISPPLOCK(vpp)); 7887 if (pp != NULL) { 7888 /* sysV pages should be locked */ 7889 ASSERT(sp == NULL || pp->p_lckcnt > 0); 7890 page_pp_unlock(pp, claim, 0); 7891 if (sp != NULL) { 7892 if (pp->p_lckcnt == 0) 7893 unlocked_bytes 7894 += PAGESIZE; 7895 } else 7896 unlocked_bytes += PAGESIZE; 7897 page_unlock(pp); 7898 } else { 7899 ASSERT(sp == NULL); 7900 unlocked_bytes += PAGESIZE; 7901 } 7902 VPP_CLRPPLOCK(vpp); 7903 } 7904 } 7905 } 7906 out: 7907 if (op == MC_LOCK) { 7908 /* Credit back bytes that did not get locked */ 7909 if ((unlocked_bytes - locked_bytes) > 0) { 7910 if (proj == NULL) 7911 mutex_enter(&p->p_lock); 7912 rctl_decr_locked_mem(p, proj, 7913 (unlocked_bytes - locked_bytes), chargeproc); 7914 if (proj == NULL) 7915 mutex_exit(&p->p_lock); 7916 } 7917 7918 } else { 7919 /* Account bytes that were unlocked */ 7920 if (unlocked_bytes > 0) { 7921 if (proj == NULL) 7922 mutex_enter(&p->p_lock); 7923 rctl_decr_locked_mem(p, proj, unlocked_bytes, 7924 chargeproc); 7925 if (proj == NULL) 7926 mutex_exit(&p->p_lock); 7927 } 7928 } 7929 if (sp != NULL) 7930 mutex_exit(&sp->shm_mlock); 7931 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7932 7933 return (err); 7934 } 7935 7936 /* 7937 * Set advice from user for specified pages 7938 * There are 5 types of advice: 7939 * MADV_NORMAL - Normal (default) behavior (whatever that is) 7940 * MADV_RANDOM - Random page references 7941 * do not allow readahead or 'klustering' 7942 * MADV_SEQUENTIAL - Sequential page references 7943 * Pages previous to the one currently being 7944 * accessed (determined by fault) are 'not needed' 7945 * and are freed immediately 7946 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl) 7947 * MADV_DONTNEED - Pages are not needed (synced out in mctl) 7948 * MADV_FREE - Contents can be discarded 7949 * MADV_ACCESS_DEFAULT- Default access 7950 * MADV_ACCESS_LWP - Next LWP will access heavily 7951 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily 7952 */ 7953 static int 7954 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 7955 { 7956 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7957 size_t page; 7958 int err = 0; 7959 int already_set; 7960 struct anon_map *amp; 7961 ulong_t anon_index; 7962 struct seg *next; 7963 lgrp_mem_policy_t policy; 7964 struct seg *prev; 7965 struct vnode *vp; 7966 7967 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7968 7969 /* 7970 * In case of MADV_FREE, we won't be modifying any segment private 7971 * data structures; so, we only need to grab READER's lock 7972 */ 7973 if (behav != MADV_FREE) { 7974 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 7975 if (svd->tr_state != SEGVN_TR_OFF) { 7976 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7977 return (0); 7978 } 7979 } else { 7980 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7981 } 7982 7983 /* 7984 * Large pages are assumed to be only turned on when accesses to the 7985 * segment's address range have spatial and temporal locality. That 7986 * justifies ignoring MADV_SEQUENTIAL for large page segments. 7987 * Also, ignore advice affecting lgroup memory allocation 7988 * if don't need to do lgroup optimizations on this system 7989 */ 7990 7991 if ((behav == MADV_SEQUENTIAL && 7992 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) || 7993 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT || 7994 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) { 7995 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7996 return (0); 7997 } 7998 7999 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT || 8000 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) { 8001 /* 8002 * Since we are going to unload hat mappings 8003 * we first have to flush the cache. Otherwise 8004 * this might lead to system panic if another 8005 * thread is doing physio on the range whose 8006 * mappings are unloaded by madvise(3C). 8007 */ 8008 if (svd->softlockcnt > 0) { 8009 /* 8010 * If this is shared segment non 0 softlockcnt 8011 * means locked pages are still in use. 8012 */ 8013 if (svd->type == MAP_SHARED) { 8014 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8015 return (EAGAIN); 8016 } 8017 /* 8018 * Since we do have the segvn writers lock 8019 * nobody can fill the cache with entries 8020 * belonging to this seg during the purge. 8021 * The flush either succeeds or we still 8022 * have pending I/Os. In the later case, 8023 * madvise(3C) fails. 8024 */ 8025 segvn_purge(seg); 8026 if (svd->softlockcnt > 0) { 8027 /* 8028 * Since madvise(3C) is advisory and 8029 * it's not part of UNIX98, madvise(3C) 8030 * failure here doesn't cause any hardship. 8031 * Note that we don't block in "as" layer. 8032 */ 8033 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8034 return (EAGAIN); 8035 } 8036 } else if (svd->type == MAP_SHARED && svd->amp != NULL && 8037 svd->amp->a_softlockcnt > 0) { 8038 /* 8039 * Try to purge this amp's entries from pcache. It 8040 * will succeed only if other segments that share the 8041 * amp have no outstanding softlock's. 8042 */ 8043 segvn_purge(seg); 8044 } 8045 } 8046 8047 amp = svd->amp; 8048 vp = svd->vp; 8049 if (behav == MADV_FREE) { 8050 /* 8051 * MADV_FREE is not supported for segments with 8052 * underlying object; if anonmap is NULL, anon slots 8053 * are not yet populated and there is nothing for 8054 * us to do. As MADV_FREE is advisory, we don't 8055 * return error in either case. 8056 */ 8057 if (vp != NULL || amp == NULL) { 8058 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8059 return (0); 8060 } 8061 8062 segvn_purge(seg); 8063 8064 page = seg_page(seg, addr); 8065 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8066 anon_disclaim(amp, svd->anon_index + page, len); 8067 ANON_LOCK_EXIT(&->a_rwlock); 8068 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8069 return (0); 8070 } 8071 8072 /* 8073 * If advice is to be applied to entire segment, 8074 * use advice field in seg_data structure 8075 * otherwise use appropriate vpage entry. 8076 */ 8077 if ((addr == seg->s_base) && (len == seg->s_size)) { 8078 switch (behav) { 8079 case MADV_ACCESS_LWP: 8080 case MADV_ACCESS_MANY: 8081 case MADV_ACCESS_DEFAULT: 8082 /* 8083 * Set memory allocation policy for this segment 8084 */ 8085 policy = lgrp_madv_to_policy(behav, len, svd->type); 8086 if (svd->type == MAP_SHARED) 8087 already_set = lgrp_shm_policy_set(policy, amp, 8088 svd->anon_index, vp, svd->offset, len); 8089 else { 8090 /* 8091 * For private memory, need writers lock on 8092 * address space because the segment may be 8093 * split or concatenated when changing policy 8094 */ 8095 if (AS_READ_HELD(seg->s_as, 8096 &seg->s_as->a_lock)) { 8097 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8098 return (IE_RETRY); 8099 } 8100 8101 already_set = lgrp_privm_policy_set(policy, 8102 &svd->policy_info, len); 8103 } 8104 8105 /* 8106 * If policy set already and it shouldn't be reapplied, 8107 * don't do anything. 8108 */ 8109 if (already_set && 8110 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8111 break; 8112 8113 /* 8114 * Mark any existing pages in given range for 8115 * migration 8116 */ 8117 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8118 vp, svd->offset, 1); 8119 8120 /* 8121 * If same policy set already or this is a shared 8122 * memory segment, don't need to try to concatenate 8123 * segment with adjacent ones. 8124 */ 8125 if (already_set || svd->type == MAP_SHARED) 8126 break; 8127 8128 /* 8129 * Try to concatenate this segment with previous 8130 * one and next one, since we changed policy for 8131 * this one and it may be compatible with adjacent 8132 * ones now. 8133 */ 8134 prev = AS_SEGPREV(seg->s_as, seg); 8135 next = AS_SEGNEXT(seg->s_as, seg); 8136 8137 if (next && next->s_ops == &segvn_ops && 8138 addr + len == next->s_base) 8139 (void) segvn_concat(seg, next, 1); 8140 8141 if (prev && prev->s_ops == &segvn_ops && 8142 addr == prev->s_base + prev->s_size) { 8143 /* 8144 * Drop lock for private data of current 8145 * segment before concatenating (deleting) it 8146 * and return IE_REATTACH to tell as_ctl() that 8147 * current segment has changed 8148 */ 8149 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8150 if (!segvn_concat(prev, seg, 1)) 8151 err = IE_REATTACH; 8152 8153 return (err); 8154 } 8155 break; 8156 8157 case MADV_SEQUENTIAL: 8158 /* 8159 * unloading mapping guarantees 8160 * detection in segvn_fault 8161 */ 8162 ASSERT(seg->s_szc == 0); 8163 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8164 hat_unload(seg->s_as->a_hat, addr, len, 8165 HAT_UNLOAD); 8166 /* FALLTHROUGH */ 8167 case MADV_NORMAL: 8168 case MADV_RANDOM: 8169 svd->advice = (uchar_t)behav; 8170 svd->pageadvice = 0; 8171 break; 8172 case MADV_WILLNEED: /* handled in memcntl */ 8173 case MADV_DONTNEED: /* handled in memcntl */ 8174 case MADV_FREE: /* handled above */ 8175 break; 8176 default: 8177 err = EINVAL; 8178 } 8179 } else { 8180 caddr_t eaddr; 8181 struct seg *new_seg; 8182 struct segvn_data *new_svd; 8183 u_offset_t off; 8184 caddr_t oldeaddr; 8185 8186 page = seg_page(seg, addr); 8187 8188 segvn_vpage(seg); 8189 8190 switch (behav) { 8191 struct vpage *bvpp, *evpp; 8192 8193 case MADV_ACCESS_LWP: 8194 case MADV_ACCESS_MANY: 8195 case MADV_ACCESS_DEFAULT: 8196 /* 8197 * Set memory allocation policy for portion of this 8198 * segment 8199 */ 8200 8201 /* 8202 * Align address and length of advice to page 8203 * boundaries for large pages 8204 */ 8205 if (seg->s_szc != 0) { 8206 size_t pgsz; 8207 8208 pgsz = page_get_pagesize(seg->s_szc); 8209 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 8210 len = P2ROUNDUP(len, pgsz); 8211 } 8212 8213 /* 8214 * Check to see whether policy is set already 8215 */ 8216 policy = lgrp_madv_to_policy(behav, len, svd->type); 8217 8218 anon_index = svd->anon_index + page; 8219 off = svd->offset + (uintptr_t)(addr - seg->s_base); 8220 8221 if (svd->type == MAP_SHARED) 8222 already_set = lgrp_shm_policy_set(policy, amp, 8223 anon_index, vp, off, len); 8224 else 8225 already_set = 8226 (policy == svd->policy_info.mem_policy); 8227 8228 /* 8229 * If policy set already and it shouldn't be reapplied, 8230 * don't do anything. 8231 */ 8232 if (already_set && 8233 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8234 break; 8235 8236 /* 8237 * For private memory, need writers lock on 8238 * address space because the segment may be 8239 * split or concatenated when changing policy 8240 */ 8241 if (svd->type == MAP_PRIVATE && 8242 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) { 8243 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8244 return (IE_RETRY); 8245 } 8246 8247 /* 8248 * Mark any existing pages in given range for 8249 * migration 8250 */ 8251 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8252 vp, svd->offset, 1); 8253 8254 /* 8255 * Don't need to try to split or concatenate 8256 * segments, since policy is same or this is a shared 8257 * memory segment 8258 */ 8259 if (already_set || svd->type == MAP_SHARED) 8260 break; 8261 8262 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 8263 ASSERT(svd->amp == NULL); 8264 ASSERT(svd->tr_state == SEGVN_TR_OFF); 8265 ASSERT(svd->softlockcnt == 0); 8266 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 8267 HAT_REGION_TEXT); 8268 svd->rcookie = HAT_INVALID_REGION_COOKIE; 8269 } 8270 8271 /* 8272 * Split off new segment if advice only applies to a 8273 * portion of existing segment starting in middle 8274 */ 8275 new_seg = NULL; 8276 eaddr = addr + len; 8277 oldeaddr = seg->s_base + seg->s_size; 8278 if (addr > seg->s_base) { 8279 /* 8280 * Must flush I/O page cache 8281 * before splitting segment 8282 */ 8283 if (svd->softlockcnt > 0) 8284 segvn_purge(seg); 8285 8286 /* 8287 * Split segment and return IE_REATTACH to tell 8288 * as_ctl() that current segment changed 8289 */ 8290 new_seg = segvn_split_seg(seg, addr); 8291 new_svd = (struct segvn_data *)new_seg->s_data; 8292 err = IE_REATTACH; 8293 8294 /* 8295 * If new segment ends where old one 8296 * did, try to concatenate the new 8297 * segment with next one. 8298 */ 8299 if (eaddr == oldeaddr) { 8300 /* 8301 * Set policy for new segment 8302 */ 8303 (void) lgrp_privm_policy_set(policy, 8304 &new_svd->policy_info, 8305 new_seg->s_size); 8306 8307 next = AS_SEGNEXT(new_seg->s_as, 8308 new_seg); 8309 8310 if (next && 8311 next->s_ops == &segvn_ops && 8312 eaddr == next->s_base) 8313 (void) segvn_concat(new_seg, 8314 next, 1); 8315 } 8316 } 8317 8318 /* 8319 * Split off end of existing segment if advice only 8320 * applies to a portion of segment ending before 8321 * end of the existing segment 8322 */ 8323 if (eaddr < oldeaddr) { 8324 /* 8325 * Must flush I/O page cache 8326 * before splitting segment 8327 */ 8328 if (svd->softlockcnt > 0) 8329 segvn_purge(seg); 8330 8331 /* 8332 * If beginning of old segment was already 8333 * split off, use new segment to split end off 8334 * from. 8335 */ 8336 if (new_seg != NULL && new_seg != seg) { 8337 /* 8338 * Split segment 8339 */ 8340 (void) segvn_split_seg(new_seg, eaddr); 8341 8342 /* 8343 * Set policy for new segment 8344 */ 8345 (void) lgrp_privm_policy_set(policy, 8346 &new_svd->policy_info, 8347 new_seg->s_size); 8348 } else { 8349 /* 8350 * Split segment and return IE_REATTACH 8351 * to tell as_ctl() that current 8352 * segment changed 8353 */ 8354 (void) segvn_split_seg(seg, eaddr); 8355 err = IE_REATTACH; 8356 8357 (void) lgrp_privm_policy_set(policy, 8358 &svd->policy_info, seg->s_size); 8359 8360 /* 8361 * If new segment starts where old one 8362 * did, try to concatenate it with 8363 * previous segment. 8364 */ 8365 if (addr == seg->s_base) { 8366 prev = AS_SEGPREV(seg->s_as, 8367 seg); 8368 8369 /* 8370 * Drop lock for private data 8371 * of current segment before 8372 * concatenating (deleting) it 8373 */ 8374 if (prev && 8375 prev->s_ops == 8376 &segvn_ops && 8377 addr == prev->s_base + 8378 prev->s_size) { 8379 SEGVN_LOCK_EXIT( 8380 seg->s_as, 8381 &svd->lock); 8382 (void) segvn_concat( 8383 prev, seg, 1); 8384 return (err); 8385 } 8386 } 8387 } 8388 } 8389 break; 8390 case MADV_SEQUENTIAL: 8391 ASSERT(seg->s_szc == 0); 8392 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8393 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 8394 /* FALLTHROUGH */ 8395 case MADV_NORMAL: 8396 case MADV_RANDOM: 8397 bvpp = &svd->vpage[page]; 8398 evpp = &svd->vpage[page + (len >> PAGESHIFT)]; 8399 for (; bvpp < evpp; bvpp++) 8400 VPP_SETADVICE(bvpp, behav); 8401 svd->advice = MADV_NORMAL; 8402 break; 8403 case MADV_WILLNEED: /* handled in memcntl */ 8404 case MADV_DONTNEED: /* handled in memcntl */ 8405 case MADV_FREE: /* handled above */ 8406 break; 8407 default: 8408 err = EINVAL; 8409 } 8410 } 8411 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8412 return (err); 8413 } 8414 8415 /* 8416 * Create a vpage structure for this seg. 8417 */ 8418 static void 8419 segvn_vpage(struct seg *seg) 8420 { 8421 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8422 struct vpage *vp, *evp; 8423 8424 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 8425 8426 /* 8427 * If no vpage structure exists, allocate one. Copy the protections 8428 * and the advice from the segment itself to the individual pages. 8429 */ 8430 if (svd->vpage == NULL) { 8431 svd->pageadvice = 1; 8432 svd->vpage = kmem_zalloc(seg_pages(seg) * sizeof (struct vpage), 8433 KM_SLEEP); 8434 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 8435 for (vp = svd->vpage; vp < evp; vp++) { 8436 VPP_SETPROT(vp, svd->prot); 8437 VPP_SETADVICE(vp, svd->advice); 8438 } 8439 } 8440 } 8441 8442 /* 8443 * Dump the pages belonging to this segvn segment. 8444 */ 8445 static void 8446 segvn_dump(struct seg *seg) 8447 { 8448 struct segvn_data *svd; 8449 page_t *pp; 8450 struct anon_map *amp; 8451 ulong_t anon_index; 8452 struct vnode *vp; 8453 u_offset_t off, offset; 8454 pfn_t pfn; 8455 pgcnt_t page, npages; 8456 caddr_t addr; 8457 8458 npages = seg_pages(seg); 8459 svd = (struct segvn_data *)seg->s_data; 8460 vp = svd->vp; 8461 off = offset = svd->offset; 8462 addr = seg->s_base; 8463 8464 if ((amp = svd->amp) != NULL) { 8465 anon_index = svd->anon_index; 8466 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8467 } 8468 8469 for (page = 0; page < npages; page++, offset += PAGESIZE) { 8470 struct anon *ap; 8471 int we_own_it = 0; 8472 8473 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) { 8474 swap_xlate_nopanic(ap, &vp, &off); 8475 } else { 8476 vp = svd->vp; 8477 off = offset; 8478 } 8479 8480 /* 8481 * If pp == NULL, the page either does not exist 8482 * or is exclusively locked. So determine if it 8483 * exists before searching for it. 8484 */ 8485 8486 if ((pp = page_lookup_nowait(vp, off, SE_SHARED))) 8487 we_own_it = 1; 8488 else 8489 pp = page_exists(vp, off); 8490 8491 if (pp) { 8492 pfn = page_pptonum(pp); 8493 dump_addpage(seg->s_as, addr, pfn); 8494 if (we_own_it) 8495 page_unlock(pp); 8496 } 8497 addr += PAGESIZE; 8498 dump_timeleft = dump_timeout; 8499 } 8500 8501 if (amp != NULL) 8502 ANON_LOCK_EXIT(&->a_rwlock); 8503 } 8504 8505 #ifdef DEBUG 8506 static uint32_t segvn_pglock_mtbf = 0; 8507 #endif 8508 8509 #define PCACHE_SHWLIST ((page_t *)-2) 8510 #define NOPCACHE_SHWLIST ((page_t *)-1) 8511 8512 /* 8513 * Lock/Unlock anon pages over a given range. Return shadow list. This routine 8514 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages 8515 * to avoid the overhead of per page locking, unlocking for subsequent IOs to 8516 * the same parts of the segment. Currently shadow list creation is only 8517 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are 8518 * tagged with segment pointer, starting virtual address and length. This 8519 * approach for MAP_SHARED segments may add many pcache entries for the same 8520 * set of pages and lead to long hash chains that decrease pcache lookup 8521 * performance. To avoid this issue for shared segments shared anon map and 8522 * starting anon index are used for pcache entry tagging. This allows all 8523 * segments to share pcache entries for the same anon range and reduces pcache 8524 * chain's length as well as memory overhead from duplicate shadow lists and 8525 * pcache entries. 8526 * 8527 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd 8528 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock 8529 * part of softlockcnt accounting is done differently for private and shared 8530 * segments. In private segment case softlock is only incremented when a new 8531 * shadow list is created but not when an existing one is found via 8532 * seg_plookup(). pcache entries have reference count incremented/decremented 8533 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0 8534 * reference count can be purged (and purging is needed before segment can be 8535 * freed). When a private segment pcache entry is purged segvn_reclaim() will 8536 * decrement softlockcnt. Since in private segment case each of its pcache 8537 * entries only belongs to this segment we can expect that when 8538 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this 8539 * segment purge will succeed and softlockcnt will drop to 0. In shared 8540 * segment case reference count in pcache entry counts active locks from many 8541 * different segments so we can't expect segment purging to succeed even when 8542 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this 8543 * segment. To be able to determine when there're no pending pagelocks in 8544 * shared segment case we don't rely on purging to make softlockcnt drop to 0 8545 * but instead softlockcnt is incremented and decremented for every 8546 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow 8547 * list was created or an existing one was found. When softlockcnt drops to 0 8548 * this segment no longer has any claims for pcached shadow lists and the 8549 * segment can be freed even if there're still active pcache entries 8550 * shared by this segment anon map. Shared segment pcache entries belong to 8551 * anon map and are typically removed when anon map is freed after all 8552 * processes destroy the segments that use this anon map. 8553 */ 8554 static int 8555 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp, 8556 enum lock_type type, enum seg_rw rw) 8557 { 8558 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8559 size_t np; 8560 pgcnt_t adjustpages; 8561 pgcnt_t npages; 8562 ulong_t anon_index; 8563 uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE; 8564 uint_t error; 8565 struct anon_map *amp; 8566 pgcnt_t anpgcnt; 8567 struct page **pplist, **pl, *pp; 8568 caddr_t a; 8569 size_t page; 8570 caddr_t lpgaddr, lpgeaddr; 8571 anon_sync_obj_t cookie; 8572 int anlock; 8573 struct anon_map *pamp; 8574 caddr_t paddr; 8575 seg_preclaim_cbfunc_t preclaim_callback; 8576 size_t pgsz; 8577 int use_pcache; 8578 size_t wlen; 8579 uint_t pflags = 0; 8580 int sftlck_sbase = 0; 8581 int sftlck_send = 0; 8582 8583 #ifdef DEBUG 8584 if (type == L_PAGELOCK && segvn_pglock_mtbf) { 8585 hrtime_t ts = gethrtime(); 8586 if ((ts % segvn_pglock_mtbf) == 0) { 8587 return (ENOTSUP); 8588 } 8589 if ((ts % segvn_pglock_mtbf) == 1) { 8590 return (EFAULT); 8591 } 8592 } 8593 #endif 8594 8595 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START, 8596 "segvn_pagelock: start seg %p addr %p", seg, addr); 8597 8598 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8599 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); 8600 8601 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8602 8603 /* 8604 * for now we only support pagelock to anon memory. We would have to 8605 * check protections for vnode objects and call into the vnode driver. 8606 * That's too much for a fast path. Let the fault entry point handle 8607 * it. 8608 */ 8609 if (svd->vp != NULL) { 8610 if (type == L_PAGELOCK) { 8611 error = ENOTSUP; 8612 goto out; 8613 } 8614 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL"); 8615 } 8616 if ((amp = svd->amp) == NULL) { 8617 if (type == L_PAGELOCK) { 8618 error = EFAULT; 8619 goto out; 8620 } 8621 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL"); 8622 } 8623 if (rw != S_READ && rw != S_WRITE) { 8624 if (type == L_PAGELOCK) { 8625 error = ENOTSUP; 8626 goto out; 8627 } 8628 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw"); 8629 } 8630 8631 if (seg->s_szc != 0) { 8632 /* 8633 * We are adjusting the pagelock region to the large page size 8634 * boundary because the unlocked part of a large page cannot 8635 * be freed anyway unless all constituent pages of a large 8636 * page are locked. Bigger regions reduce pcache chain length 8637 * and improve lookup performance. The tradeoff is that the 8638 * very first segvn_pagelock() call for a given page is more 8639 * expensive if only 1 page_t is needed for IO. This is only 8640 * an issue if pcache entry doesn't get reused by several 8641 * subsequent calls. We optimize here for the case when pcache 8642 * is heavily used by repeated IOs to the same address range. 8643 * 8644 * Note segment's page size cannot change while we are holding 8645 * as lock. And then it cannot change while softlockcnt is 8646 * not 0. This will allow us to correctly recalculate large 8647 * page size region for the matching pageunlock/reclaim call 8648 * since as_pageunlock() caller must always match 8649 * as_pagelock() call's addr and len. 8650 * 8651 * For pageunlock *ppp points to the pointer of page_t that 8652 * corresponds to the real unadjusted start address. Similar 8653 * for pagelock *ppp must point to the pointer of page_t that 8654 * corresponds to the real unadjusted start address. 8655 */ 8656 pgsz = page_get_pagesize(seg->s_szc); 8657 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 8658 adjustpages = btop((uintptr_t)(addr - lpgaddr)); 8659 } else if (len < segvn_pglock_comb_thrshld) { 8660 lpgaddr = addr; 8661 lpgeaddr = addr + len; 8662 adjustpages = 0; 8663 pgsz = PAGESIZE; 8664 } else { 8665 /* 8666 * Align the address range of large enough requests to allow 8667 * combining of different shadow lists into 1 to reduce memory 8668 * overhead from potentially overlapping large shadow lists 8669 * (worst case is we have a 1MB IO into buffers with start 8670 * addresses separated by 4K). Alignment is only possible if 8671 * padded chunks have sufficient access permissions. Note 8672 * permissions won't change between L_PAGELOCK and 8673 * L_PAGEUNLOCK calls since non 0 softlockcnt will force 8674 * segvn_setprot() to wait until softlockcnt drops to 0. This 8675 * allows us to determine in L_PAGEUNLOCK the same range we 8676 * computed in L_PAGELOCK. 8677 * 8678 * If alignment is limited by segment ends set 8679 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when 8680 * these flags are set bump softlockcnt_sbase/softlockcnt_send 8681 * per segment counters. In L_PAGEUNLOCK case decrease 8682 * softlockcnt_sbase/softlockcnt_send counters if 8683 * sftlck_sbase/sftlck_send flags are set. When 8684 * softlockcnt_sbase/softlockcnt_send are non 0 8685 * segvn_concat()/segvn_extend_prev()/segvn_extend_next() 8686 * won't merge the segments. This restriction combined with 8687 * restriction on segment unmapping and splitting for segments 8688 * that have non 0 softlockcnt allows L_PAGEUNLOCK to 8689 * correctly determine the same range that was previously 8690 * locked by matching L_PAGELOCK. 8691 */ 8692 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16); 8693 pgsz = PAGESIZE; 8694 if (svd->type == MAP_PRIVATE) { 8695 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr, 8696 segvn_pglock_comb_balign); 8697 if (lpgaddr < seg->s_base) { 8698 lpgaddr = seg->s_base; 8699 sftlck_sbase = 1; 8700 } 8701 } else { 8702 ulong_t aix = svd->anon_index + seg_page(seg, addr); 8703 ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign); 8704 if (aaix < svd->anon_index) { 8705 lpgaddr = seg->s_base; 8706 sftlck_sbase = 1; 8707 } else { 8708 lpgaddr = addr - ptob(aix - aaix); 8709 ASSERT(lpgaddr >= seg->s_base); 8710 } 8711 } 8712 if (svd->pageprot && lpgaddr != addr) { 8713 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)]; 8714 struct vpage *evp = &svd->vpage[seg_page(seg, addr)]; 8715 while (vp < evp) { 8716 if ((VPP_PROT(vp) & protchk) == 0) { 8717 break; 8718 } 8719 vp++; 8720 } 8721 if (vp < evp) { 8722 lpgaddr = addr; 8723 pflags = 0; 8724 } 8725 } 8726 lpgeaddr = addr + len; 8727 if (pflags) { 8728 if (svd->type == MAP_PRIVATE) { 8729 lpgeaddr = (caddr_t)P2ROUNDUP( 8730 (uintptr_t)lpgeaddr, 8731 segvn_pglock_comb_balign); 8732 } else { 8733 ulong_t aix = svd->anon_index + 8734 seg_page(seg, lpgeaddr); 8735 ulong_t aaix = P2ROUNDUP(aix, 8736 segvn_pglock_comb_palign); 8737 if (aaix < aix) { 8738 lpgeaddr = 0; 8739 } else { 8740 lpgeaddr += ptob(aaix - aix); 8741 } 8742 } 8743 if (lpgeaddr == 0 || 8744 lpgeaddr > seg->s_base + seg->s_size) { 8745 lpgeaddr = seg->s_base + seg->s_size; 8746 sftlck_send = 1; 8747 } 8748 } 8749 if (svd->pageprot && lpgeaddr != addr + len) { 8750 struct vpage *vp; 8751 struct vpage *evp; 8752 8753 vp = &svd->vpage[seg_page(seg, addr + len)]; 8754 evp = &svd->vpage[seg_page(seg, lpgeaddr)]; 8755 8756 while (vp < evp) { 8757 if ((VPP_PROT(vp) & protchk) == 0) { 8758 break; 8759 } 8760 vp++; 8761 } 8762 if (vp < evp) { 8763 lpgeaddr = addr + len; 8764 } 8765 } 8766 adjustpages = btop((uintptr_t)(addr - lpgaddr)); 8767 } 8768 8769 /* 8770 * For MAP_SHARED segments we create pcache entries tagged by amp and 8771 * anon index so that we can share pcache entries with other segments 8772 * that map this amp. For private segments pcache entries are tagged 8773 * with segment and virtual address. 8774 */ 8775 if (svd->type == MAP_SHARED) { 8776 pamp = amp; 8777 paddr = (caddr_t)((lpgaddr - seg->s_base) + 8778 ptob(svd->anon_index)); 8779 preclaim_callback = shamp_reclaim; 8780 } else { 8781 pamp = NULL; 8782 paddr = lpgaddr; 8783 preclaim_callback = segvn_reclaim; 8784 } 8785 8786 if (type == L_PAGEUNLOCK) { 8787 VM_STAT_ADD(segvnvmstats.pagelock[0]); 8788 8789 /* 8790 * update hat ref bits for /proc. We need to make sure 8791 * that threads tracing the ref and mod bits of the 8792 * address space get the right data. 8793 * Note: page ref and mod bits are updated at reclaim time 8794 */ 8795 if (seg->s_as->a_vbits) { 8796 for (a = addr; a < addr + len; a += PAGESIZE) { 8797 if (rw == S_WRITE) { 8798 hat_setstat(seg->s_as, a, 8799 PAGESIZE, P_REF | P_MOD); 8800 } else { 8801 hat_setstat(seg->s_as, a, 8802 PAGESIZE, P_REF); 8803 } 8804 } 8805 } 8806 8807 /* 8808 * Check the shadow list entry after the last page used in 8809 * this IO request. If it's NOPCACHE_SHWLIST the shadow list 8810 * was not inserted into pcache and is not large page 8811 * adjusted. In this case call reclaim callback directly and 8812 * don't adjust the shadow list start and size for large 8813 * pages. 8814 */ 8815 npages = btop(len); 8816 if ((*ppp)[npages] == NOPCACHE_SHWLIST) { 8817 void *ptag; 8818 if (pamp != NULL) { 8819 ASSERT(svd->type == MAP_SHARED); 8820 ptag = (void *)pamp; 8821 paddr = (caddr_t)((addr - seg->s_base) + 8822 ptob(svd->anon_index)); 8823 } else { 8824 ptag = (void *)seg; 8825 paddr = addr; 8826 } 8827 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0); 8828 } else { 8829 ASSERT((*ppp)[npages] == PCACHE_SHWLIST || 8830 IS_SWAPFSVP((*ppp)[npages]->p_vnode)); 8831 len = lpgeaddr - lpgaddr; 8832 npages = btop(len); 8833 seg_pinactive(seg, pamp, paddr, len, 8834 *ppp - adjustpages, rw, pflags, preclaim_callback); 8835 } 8836 8837 if (pamp != NULL) { 8838 ASSERT(svd->type == MAP_SHARED); 8839 ASSERT(svd->softlockcnt >= npages); 8840 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages); 8841 } 8842 8843 if (sftlck_sbase) { 8844 ASSERT(svd->softlockcnt_sbase > 0); 8845 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, -1); 8846 } 8847 if (sftlck_send) { 8848 ASSERT(svd->softlockcnt_send > 0); 8849 atomic_add_long((ulong_t *)&svd->softlockcnt_send, -1); 8850 } 8851 8852 /* 8853 * If someone is blocked while unmapping, we purge 8854 * segment page cache and thus reclaim pplist synchronously 8855 * without waiting for seg_pasync_thread. This speeds up 8856 * unmapping in cases where munmap(2) is called, while 8857 * raw async i/o is still in progress or where a thread 8858 * exits on data fault in a multithreaded application. 8859 */ 8860 if (AS_ISUNMAPWAIT(seg->s_as)) { 8861 if (svd->softlockcnt == 0) { 8862 mutex_enter(&seg->s_as->a_contents); 8863 if (AS_ISUNMAPWAIT(seg->s_as)) { 8864 AS_CLRUNMAPWAIT(seg->s_as); 8865 cv_broadcast(&seg->s_as->a_cv); 8866 } 8867 mutex_exit(&seg->s_as->a_contents); 8868 } else if (pamp == NULL) { 8869 /* 8870 * softlockcnt is not 0 and this is a 8871 * MAP_PRIVATE segment. Try to purge its 8872 * pcache entries to reduce softlockcnt. 8873 * If it drops to 0 segvn_reclaim() 8874 * will wake up a thread waiting on 8875 * unmapwait flag. 8876 * 8877 * We don't purge MAP_SHARED segments with non 8878 * 0 softlockcnt since IO is still in progress 8879 * for such segments. 8880 */ 8881 ASSERT(svd->type == MAP_PRIVATE); 8882 segvn_purge(seg); 8883 } 8884 } 8885 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8886 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END, 8887 "segvn_pagelock: unlock seg %p addr %p", seg, addr); 8888 return (0); 8889 } 8890 8891 /* The L_PAGELOCK case ... */ 8892 8893 VM_STAT_ADD(segvnvmstats.pagelock[1]); 8894 8895 /* 8896 * For MAP_SHARED segments we have to check protections before 8897 * seg_plookup() since pcache entries may be shared by many segments 8898 * with potentially different page protections. 8899 */ 8900 if (pamp != NULL) { 8901 ASSERT(svd->type == MAP_SHARED); 8902 if (svd->pageprot == 0) { 8903 if ((svd->prot & protchk) == 0) { 8904 error = EACCES; 8905 goto out; 8906 } 8907 } else { 8908 /* 8909 * check page protections 8910 */ 8911 caddr_t ea; 8912 8913 if (seg->s_szc) { 8914 a = lpgaddr; 8915 ea = lpgeaddr; 8916 } else { 8917 a = addr; 8918 ea = addr + len; 8919 } 8920 for (; a < ea; a += pgsz) { 8921 struct vpage *vp; 8922 8923 ASSERT(seg->s_szc == 0 || 8924 sameprot(seg, a, pgsz)); 8925 vp = &svd->vpage[seg_page(seg, a)]; 8926 if ((VPP_PROT(vp) & protchk) == 0) { 8927 error = EACCES; 8928 goto out; 8929 } 8930 } 8931 } 8932 } 8933 8934 /* 8935 * try to find pages in segment page cache 8936 */ 8937 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags); 8938 if (pplist != NULL) { 8939 if (pamp != NULL) { 8940 npages = btop((uintptr_t)(lpgeaddr - lpgaddr)); 8941 ASSERT(svd->type == MAP_SHARED); 8942 atomic_add_long((ulong_t *)&svd->softlockcnt, 8943 npages); 8944 } 8945 if (sftlck_sbase) { 8946 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, 1); 8947 } 8948 if (sftlck_send) { 8949 atomic_add_long((ulong_t *)&svd->softlockcnt_send, 1); 8950 } 8951 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8952 *ppp = pplist + adjustpages; 8953 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END, 8954 "segvn_pagelock: cache hit seg %p addr %p", seg, addr); 8955 return (0); 8956 } 8957 8958 /* 8959 * For MAP_SHARED segments we already verified above that segment 8960 * protections allow this pagelock operation. 8961 */ 8962 if (pamp == NULL) { 8963 ASSERT(svd->type == MAP_PRIVATE); 8964 if (svd->pageprot == 0) { 8965 if ((svd->prot & protchk) == 0) { 8966 error = EACCES; 8967 goto out; 8968 } 8969 if (svd->prot & PROT_WRITE) { 8970 wlen = lpgeaddr - lpgaddr; 8971 } else { 8972 wlen = 0; 8973 ASSERT(rw == S_READ); 8974 } 8975 } else { 8976 int wcont = 1; 8977 /* 8978 * check page protections 8979 */ 8980 for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) { 8981 struct vpage *vp; 8982 8983 ASSERT(seg->s_szc == 0 || 8984 sameprot(seg, a, pgsz)); 8985 vp = &svd->vpage[seg_page(seg, a)]; 8986 if ((VPP_PROT(vp) & protchk) == 0) { 8987 error = EACCES; 8988 goto out; 8989 } 8990 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) { 8991 wlen += pgsz; 8992 } else { 8993 wcont = 0; 8994 ASSERT(rw == S_READ); 8995 } 8996 } 8997 } 8998 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr); 8999 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr); 9000 } 9001 9002 /* 9003 * Only build large page adjusted shadow list if we expect to insert 9004 * it into pcache. For large enough pages it's a big overhead to 9005 * create a shadow list of the entire large page. But this overhead 9006 * should be amortized over repeated pcache hits on subsequent reuse 9007 * of this shadow list (IO into any range within this shadow list will 9008 * find it in pcache since we large page align the request for pcache 9009 * lookups). pcache performance is improved with bigger shadow lists 9010 * as it reduces the time to pcache the entire big segment and reduces 9011 * pcache chain length. 9012 */ 9013 if (seg_pinsert_check(seg, pamp, paddr, 9014 lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) { 9015 addr = lpgaddr; 9016 len = lpgeaddr - lpgaddr; 9017 use_pcache = 1; 9018 } else { 9019 use_pcache = 0; 9020 /* 9021 * Since this entry will not be inserted into the pcache, we 9022 * will not do any adjustments to the starting address or 9023 * size of the memory to be locked. 9024 */ 9025 adjustpages = 0; 9026 } 9027 npages = btop(len); 9028 9029 pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP); 9030 pl = pplist; 9031 *ppp = pplist + adjustpages; 9032 /* 9033 * If use_pcache is 0 this shadow list is not large page adjusted. 9034 * Record this info in the last entry of shadow array so that 9035 * L_PAGEUNLOCK can determine if it should large page adjust the 9036 * address range to find the real range that was locked. 9037 */ 9038 pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST; 9039 9040 page = seg_page(seg, addr); 9041 anon_index = svd->anon_index + page; 9042 9043 anlock = 0; 9044 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 9045 ASSERT(amp->a_szc >= seg->s_szc); 9046 anpgcnt = page_get_pagecnt(amp->a_szc); 9047 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) { 9048 struct anon *ap; 9049 struct vnode *vp; 9050 u_offset_t off; 9051 9052 /* 9053 * Lock and unlock anon array only once per large page. 9054 * anon_array_enter() locks the root anon slot according to 9055 * a_szc which can't change while anon map is locked. We lock 9056 * anon the first time through this loop and each time we 9057 * reach anon index that corresponds to a root of a large 9058 * page. 9059 */ 9060 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) { 9061 ASSERT(anlock == 0); 9062 anon_array_enter(amp, anon_index, &cookie); 9063 anlock = 1; 9064 } 9065 ap = anon_get_ptr(amp->ahp, anon_index); 9066 9067 /* 9068 * We must never use seg_pcache for COW pages 9069 * because we might end up with original page still 9070 * lying in seg_pcache even after private page is 9071 * created. This leads to data corruption as 9072 * aio_write refers to the page still in cache 9073 * while all other accesses refer to the private 9074 * page. 9075 */ 9076 if (ap == NULL || ap->an_refcnt != 1) { 9077 struct vpage *vpage; 9078 9079 if (seg->s_szc) { 9080 error = EFAULT; 9081 break; 9082 } 9083 if (svd->vpage != NULL) { 9084 vpage = &svd->vpage[seg_page(seg, a)]; 9085 } else { 9086 vpage = NULL; 9087 } 9088 ASSERT(anlock); 9089 anon_array_exit(&cookie); 9090 anlock = 0; 9091 pp = NULL; 9092 error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0, 9093 vpage, &pp, 0, F_INVAL, rw, 1); 9094 if (error) { 9095 error = fc_decode(error); 9096 break; 9097 } 9098 anon_array_enter(amp, anon_index, &cookie); 9099 anlock = 1; 9100 ap = anon_get_ptr(amp->ahp, anon_index); 9101 if (ap == NULL || ap->an_refcnt != 1) { 9102 error = EFAULT; 9103 break; 9104 } 9105 } 9106 swap_xlate(ap, &vp, &off); 9107 pp = page_lookup_nowait(vp, off, SE_SHARED); 9108 if (pp == NULL) { 9109 error = EFAULT; 9110 break; 9111 } 9112 if (ap->an_pvp != NULL) { 9113 anon_swap_free(ap, pp); 9114 } 9115 /* 9116 * Unlock anon if this is the last slot in a large page. 9117 */ 9118 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) { 9119 ASSERT(anlock); 9120 anon_array_exit(&cookie); 9121 anlock = 0; 9122 } 9123 *pplist++ = pp; 9124 } 9125 if (anlock) { /* Ensure the lock is dropped */ 9126 anon_array_exit(&cookie); 9127 } 9128 ANON_LOCK_EXIT(&->a_rwlock); 9129 9130 if (a >= addr + len) { 9131 atomic_add_long((ulong_t *)&svd->softlockcnt, npages); 9132 if (pamp != NULL) { 9133 ASSERT(svd->type == MAP_SHARED); 9134 atomic_add_long((ulong_t *)&pamp->a_softlockcnt, 9135 npages); 9136 wlen = len; 9137 } 9138 if (sftlck_sbase) { 9139 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, 1); 9140 } 9141 if (sftlck_send) { 9142 atomic_add_long((ulong_t *)&svd->softlockcnt_send, 1); 9143 } 9144 if (use_pcache) { 9145 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl, 9146 rw, pflags, preclaim_callback); 9147 } 9148 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9149 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END, 9150 "segvn_pagelock: cache fill seg %p addr %p", seg, addr); 9151 return (0); 9152 } 9153 9154 pplist = pl; 9155 np = ((uintptr_t)(a - addr)) >> PAGESHIFT; 9156 while (np > (uint_t)0) { 9157 ASSERT(PAGE_LOCKED(*pplist)); 9158 page_unlock(*pplist); 9159 np--; 9160 pplist++; 9161 } 9162 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9163 out: 9164 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9165 *ppp = NULL; 9166 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END, 9167 "segvn_pagelock: cache miss seg %p addr %p", seg, addr); 9168 return (error); 9169 } 9170 9171 /* 9172 * purge any cached pages in the I/O page cache 9173 */ 9174 static void 9175 segvn_purge(struct seg *seg) 9176 { 9177 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9178 9179 /* 9180 * pcache is only used by pure anon segments. 9181 */ 9182 if (svd->amp == NULL || svd->vp != NULL) { 9183 return; 9184 } 9185 9186 /* 9187 * For MAP_SHARED segments non 0 segment's softlockcnt means 9188 * active IO is still in progress via this segment. So we only 9189 * purge MAP_SHARED segments when their softlockcnt is 0. 9190 */ 9191 if (svd->type == MAP_PRIVATE) { 9192 if (svd->softlockcnt) { 9193 seg_ppurge(seg, NULL, 0); 9194 } 9195 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) { 9196 seg_ppurge(seg, svd->amp, 0); 9197 } 9198 } 9199 9200 /* 9201 * If async argument is not 0 we are called from pcache async thread and don't 9202 * hold AS lock. 9203 */ 9204 9205 /*ARGSUSED*/ 9206 static int 9207 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 9208 enum seg_rw rw, int async) 9209 { 9210 struct seg *seg = (struct seg *)ptag; 9211 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9212 pgcnt_t np, npages; 9213 struct page **pl; 9214 9215 npages = np = btop(len); 9216 ASSERT(npages); 9217 9218 ASSERT(svd->vp == NULL && svd->amp != NULL); 9219 ASSERT(svd->softlockcnt >= npages); 9220 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9221 9222 pl = pplist; 9223 9224 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST); 9225 ASSERT(!async || pl[np] == PCACHE_SHWLIST); 9226 9227 while (np > (uint_t)0) { 9228 if (rw == S_WRITE) { 9229 hat_setrefmod(*pplist); 9230 } else { 9231 hat_setref(*pplist); 9232 } 9233 page_unlock(*pplist); 9234 np--; 9235 pplist++; 9236 } 9237 9238 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9239 9240 /* 9241 * If we are pcache async thread we don't hold AS lock. This means if 9242 * softlockcnt drops to 0 after the decrement below address space may 9243 * get freed. We can't allow it since after softlock derement to 0 we 9244 * still need to access as structure for possible wakeup of unmap 9245 * waiters. To prevent the disappearance of as we take this segment 9246 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to 9247 * make sure this routine completes before segment is freed. 9248 * 9249 * The second complication we have to deal with in async case is a 9250 * possibility of missed wake up of unmap wait thread. When we don't 9251 * hold as lock here we may take a_contents lock before unmap wait 9252 * thread that was first to see softlockcnt was still not 0. As a 9253 * result we'll fail to wake up an unmap wait thread. To avoid this 9254 * race we set nounmapwait flag in as structure if we drop softlockcnt 9255 * to 0 when we were called by pcache async thread. unmapwait thread 9256 * will not block if this flag is set. 9257 */ 9258 if (async) { 9259 mutex_enter(&svd->segfree_syncmtx); 9260 } 9261 9262 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) { 9263 if (async || AS_ISUNMAPWAIT(seg->s_as)) { 9264 mutex_enter(&seg->s_as->a_contents); 9265 if (async) { 9266 AS_SETNOUNMAPWAIT(seg->s_as); 9267 } 9268 if (AS_ISUNMAPWAIT(seg->s_as)) { 9269 AS_CLRUNMAPWAIT(seg->s_as); 9270 cv_broadcast(&seg->s_as->a_cv); 9271 } 9272 mutex_exit(&seg->s_as->a_contents); 9273 } 9274 } 9275 9276 if (async) { 9277 mutex_exit(&svd->segfree_syncmtx); 9278 } 9279 return (0); 9280 } 9281 9282 /*ARGSUSED*/ 9283 static int 9284 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 9285 enum seg_rw rw, int async) 9286 { 9287 amp_t *amp = (amp_t *)ptag; 9288 pgcnt_t np, npages; 9289 struct page **pl; 9290 9291 npages = np = btop(len); 9292 ASSERT(npages); 9293 ASSERT(amp->a_softlockcnt >= npages); 9294 9295 pl = pplist; 9296 9297 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST); 9298 ASSERT(!async || pl[np] == PCACHE_SHWLIST); 9299 9300 while (np > (uint_t)0) { 9301 if (rw == S_WRITE) { 9302 hat_setrefmod(*pplist); 9303 } else { 9304 hat_setref(*pplist); 9305 } 9306 page_unlock(*pplist); 9307 np--; 9308 pplist++; 9309 } 9310 9311 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9312 9313 /* 9314 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt 9315 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0 9316 * and anonmap_purge() acquires a_purgemtx. 9317 */ 9318 mutex_enter(&->a_purgemtx); 9319 if (!atomic_add_long_nv((ulong_t *)&->a_softlockcnt, -npages) && 9320 amp->a_purgewait) { 9321 amp->a_purgewait = 0; 9322 cv_broadcast(&->a_purgecv); 9323 } 9324 mutex_exit(&->a_purgemtx); 9325 return (0); 9326 } 9327 9328 /* 9329 * get a memory ID for an addr in a given segment 9330 * 9331 * XXX only creates PAGESIZE pages if anon slots are not initialized. 9332 * At fault time they will be relocated into larger pages. 9333 */ 9334 static int 9335 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 9336 { 9337 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9338 struct anon *ap = NULL; 9339 ulong_t anon_index; 9340 struct anon_map *amp; 9341 anon_sync_obj_t cookie; 9342 9343 if (svd->type == MAP_PRIVATE) { 9344 memidp->val[0] = (uintptr_t)seg->s_as; 9345 memidp->val[1] = (uintptr_t)addr; 9346 return (0); 9347 } 9348 9349 if (svd->type == MAP_SHARED) { 9350 if (svd->vp) { 9351 memidp->val[0] = (uintptr_t)svd->vp; 9352 memidp->val[1] = (u_longlong_t)svd->offset + 9353 (uintptr_t)(addr - seg->s_base); 9354 return (0); 9355 } else { 9356 9357 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 9358 if ((amp = svd->amp) != NULL) { 9359 anon_index = svd->anon_index + 9360 seg_page(seg, addr); 9361 } 9362 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9363 9364 ASSERT(amp != NULL); 9365 9366 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 9367 anon_array_enter(amp, anon_index, &cookie); 9368 ap = anon_get_ptr(amp->ahp, anon_index); 9369 if (ap == NULL) { 9370 page_t *pp; 9371 9372 pp = anon_zero(seg, addr, &ap, svd->cred); 9373 if (pp == NULL) { 9374 anon_array_exit(&cookie); 9375 ANON_LOCK_EXIT(&->a_rwlock); 9376 return (ENOMEM); 9377 } 9378 ASSERT(anon_get_ptr(amp->ahp, anon_index) 9379 == NULL); 9380 (void) anon_set_ptr(amp->ahp, anon_index, 9381 ap, ANON_SLEEP); 9382 page_unlock(pp); 9383 } 9384 9385 anon_array_exit(&cookie); 9386 ANON_LOCK_EXIT(&->a_rwlock); 9387 9388 memidp->val[0] = (uintptr_t)ap; 9389 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 9390 return (0); 9391 } 9392 } 9393 return (EINVAL); 9394 } 9395 9396 static int 9397 sameprot(struct seg *seg, caddr_t a, size_t len) 9398 { 9399 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9400 struct vpage *vpage; 9401 spgcnt_t pages = btop(len); 9402 uint_t prot; 9403 9404 if (svd->pageprot == 0) 9405 return (1); 9406 9407 ASSERT(svd->vpage != NULL); 9408 9409 vpage = &svd->vpage[seg_page(seg, a)]; 9410 prot = VPP_PROT(vpage); 9411 vpage++; 9412 pages--; 9413 while (pages-- > 0) { 9414 if (prot != VPP_PROT(vpage)) 9415 return (0); 9416 vpage++; 9417 } 9418 return (1); 9419 } 9420 9421 /* 9422 * Get memory allocation policy info for specified address in given segment 9423 */ 9424 static lgrp_mem_policy_info_t * 9425 segvn_getpolicy(struct seg *seg, caddr_t addr) 9426 { 9427 struct anon_map *amp; 9428 ulong_t anon_index; 9429 lgrp_mem_policy_info_t *policy_info; 9430 struct segvn_data *svn_data; 9431 u_offset_t vn_off; 9432 vnode_t *vp; 9433 9434 ASSERT(seg != NULL); 9435 9436 svn_data = (struct segvn_data *)seg->s_data; 9437 if (svn_data == NULL) 9438 return (NULL); 9439 9440 /* 9441 * Get policy info for private or shared memory 9442 */ 9443 if (svn_data->type != MAP_SHARED) { 9444 if (svn_data->tr_state != SEGVN_TR_ON) { 9445 policy_info = &svn_data->policy_info; 9446 } else { 9447 policy_info = &svn_data->tr_policy_info; 9448 ASSERT(policy_info->mem_policy == 9449 LGRP_MEM_POLICY_NEXT_SEG); 9450 } 9451 } else { 9452 amp = svn_data->amp; 9453 anon_index = svn_data->anon_index + seg_page(seg, addr); 9454 vp = svn_data->vp; 9455 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base); 9456 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off); 9457 } 9458 9459 return (policy_info); 9460 } 9461 9462 /*ARGSUSED*/ 9463 static int 9464 segvn_capable(struct seg *seg, segcapability_t capability) 9465 { 9466 return (0); 9467 } 9468 9469 /* 9470 * Bind text vnode segment to an amp. If we bind successfully mappings will be 9471 * established to per vnode mapping per lgroup amp pages instead of to vnode 9472 * pages. There's one amp per vnode text mapping per lgroup. Many processes 9473 * may share the same text replication amp. If a suitable amp doesn't already 9474 * exist in svntr hash table create a new one. We may fail to bind to amp if 9475 * segment is not eligible for text replication. Code below first checks for 9476 * these conditions. If binding is successful segment tr_state is set to on 9477 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and 9478 * svd->amp remains as NULL. 9479 */ 9480 static void 9481 segvn_textrepl(struct seg *seg) 9482 { 9483 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9484 vnode_t *vp = svd->vp; 9485 u_offset_t off = svd->offset; 9486 size_t size = seg->s_size; 9487 u_offset_t eoff = off + size; 9488 uint_t szc = seg->s_szc; 9489 ulong_t hash = SVNTR_HASH_FUNC(vp); 9490 svntr_t *svntrp; 9491 struct vattr va; 9492 proc_t *p = seg->s_as->a_proc; 9493 lgrp_id_t lgrp_id; 9494 lgrp_id_t olid; 9495 int first; 9496 struct anon_map *amp; 9497 9498 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9499 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 9500 ASSERT(p != NULL); 9501 ASSERT(svd->tr_state == SEGVN_TR_INIT); 9502 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9503 ASSERT(svd->flags & MAP_TEXT); 9504 ASSERT(svd->type == MAP_PRIVATE); 9505 ASSERT(vp != NULL && svd->amp == NULL); 9506 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 9507 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0); 9508 ASSERT(seg->s_as != &kas); 9509 ASSERT(off < eoff); 9510 ASSERT(svntr_hashtab != NULL); 9511 9512 /* 9513 * If numa optimizations are no longer desired bail out. 9514 */ 9515 if (!lgrp_optimizations()) { 9516 svd->tr_state = SEGVN_TR_OFF; 9517 return; 9518 } 9519 9520 /* 9521 * Avoid creating anon maps with size bigger than the file size. 9522 * If VOP_GETATTR() call fails bail out. 9523 */ 9524 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME; 9525 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) { 9526 svd->tr_state = SEGVN_TR_OFF; 9527 SEGVN_TR_ADDSTAT(gaerr); 9528 return; 9529 } 9530 if (btopr(va.va_size) < btopr(eoff)) { 9531 svd->tr_state = SEGVN_TR_OFF; 9532 SEGVN_TR_ADDSTAT(overmap); 9533 return; 9534 } 9535 9536 /* 9537 * VVMEXEC may not be set yet if exec() prefaults text segment. Set 9538 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED 9539 * mapping that checks if trcache for this vnode needs to be 9540 * invalidated can't miss us. 9541 */ 9542 if (!(vp->v_flag & VVMEXEC)) { 9543 mutex_enter(&vp->v_lock); 9544 vp->v_flag |= VVMEXEC; 9545 mutex_exit(&vp->v_lock); 9546 } 9547 mutex_enter(&svntr_hashtab[hash].tr_lock); 9548 /* 9549 * Bail out if potentially MAP_SHARED writable mappings exist to this 9550 * vnode. We don't want to use old file contents from existing 9551 * replicas if this mapping was established after the original file 9552 * was changed. 9553 */ 9554 if (vn_is_mapped(vp, V_WRITE)) { 9555 mutex_exit(&svntr_hashtab[hash].tr_lock); 9556 svd->tr_state = SEGVN_TR_OFF; 9557 SEGVN_TR_ADDSTAT(wrcnt); 9558 return; 9559 } 9560 svntrp = svntr_hashtab[hash].tr_head; 9561 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9562 ASSERT(svntrp->tr_refcnt != 0); 9563 if (svntrp->tr_vp != vp) { 9564 continue; 9565 } 9566 9567 /* 9568 * Bail out if the file or its attributes were changed after 9569 * this replication entry was created since we need to use the 9570 * latest file contents. Note that mtime test alone is not 9571 * sufficient because a user can explicitly change mtime via 9572 * utimes(2) interfaces back to the old value after modifiying 9573 * the file contents. To detect this case we also have to test 9574 * ctime which among other things records the time of the last 9575 * mtime change by utimes(2). ctime is not changed when the file 9576 * is only read or executed so we expect that typically existing 9577 * replication amp's can be used most of the time. 9578 */ 9579 if (!svntrp->tr_valid || 9580 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec || 9581 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec || 9582 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec || 9583 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) { 9584 mutex_exit(&svntr_hashtab[hash].tr_lock); 9585 svd->tr_state = SEGVN_TR_OFF; 9586 SEGVN_TR_ADDSTAT(stale); 9587 return; 9588 } 9589 /* 9590 * if off, eoff and szc match current segment we found the 9591 * existing entry we can use. 9592 */ 9593 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff && 9594 svntrp->tr_szc == szc) { 9595 break; 9596 } 9597 /* 9598 * Don't create different but overlapping in file offsets 9599 * entries to avoid replication of the same file pages more 9600 * than once per lgroup. 9601 */ 9602 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) || 9603 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) { 9604 mutex_exit(&svntr_hashtab[hash].tr_lock); 9605 svd->tr_state = SEGVN_TR_OFF; 9606 SEGVN_TR_ADDSTAT(overlap); 9607 return; 9608 } 9609 } 9610 /* 9611 * If we didn't find existing entry create a new one. 9612 */ 9613 if (svntrp == NULL) { 9614 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP); 9615 if (svntrp == NULL) { 9616 mutex_exit(&svntr_hashtab[hash].tr_lock); 9617 svd->tr_state = SEGVN_TR_OFF; 9618 SEGVN_TR_ADDSTAT(nokmem); 9619 return; 9620 } 9621 #ifdef DEBUG 9622 { 9623 lgrp_id_t i; 9624 for (i = 0; i < NLGRPS_MAX; i++) { 9625 ASSERT(svntrp->tr_amp[i] == NULL); 9626 } 9627 } 9628 #endif /* DEBUG */ 9629 svntrp->tr_vp = vp; 9630 svntrp->tr_off = off; 9631 svntrp->tr_eoff = eoff; 9632 svntrp->tr_szc = szc; 9633 svntrp->tr_valid = 1; 9634 svntrp->tr_mtime = va.va_mtime; 9635 svntrp->tr_ctime = va.va_ctime; 9636 svntrp->tr_refcnt = 0; 9637 svntrp->tr_next = svntr_hashtab[hash].tr_head; 9638 svntr_hashtab[hash].tr_head = svntrp; 9639 } 9640 first = 1; 9641 again: 9642 /* 9643 * We want to pick a replica with pages on main thread's (t_tid = 1, 9644 * aka T1) lgrp. Currently text replication is only optimized for 9645 * workloads that either have all threads of a process on the same 9646 * lgrp or execute their large text primarily on main thread. 9647 */ 9648 lgrp_id = p->p_t1_lgrpid; 9649 if (lgrp_id == LGRP_NONE) { 9650 /* 9651 * In case exec() prefaults text on non main thread use 9652 * current thread lgrpid. It will become main thread anyway 9653 * soon. 9654 */ 9655 lgrp_id = lgrp_home_id(curthread); 9656 } 9657 /* 9658 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise 9659 * just set it to NLGRPS_MAX if it's different from current process T1 9660 * home lgrp. p_tr_lgrpid is used to detect if process uses text 9661 * replication and T1 new home is different from lgrp used for text 9662 * replication. When this happens asyncronous segvn thread rechecks if 9663 * segments should change lgrps used for text replication. If we fail 9664 * to set p_tr_lgrpid with cas32 then set it to NLGRPS_MAX without cas 9665 * if it's not already NLGRPS_MAX and not equal lgrp_id we want to 9666 * use. We don't need to use cas in this case because another thread 9667 * that races in between our non atomic check and set may only change 9668 * p_tr_lgrpid to NLGRPS_MAX at this point. 9669 */ 9670 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9671 olid = p->p_tr_lgrpid; 9672 if (lgrp_id != olid && olid != NLGRPS_MAX) { 9673 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX; 9674 if (cas32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) != olid) { 9675 olid = p->p_tr_lgrpid; 9676 ASSERT(olid != LGRP_NONE); 9677 if (olid != lgrp_id && olid != NLGRPS_MAX) { 9678 p->p_tr_lgrpid = NLGRPS_MAX; 9679 } 9680 } 9681 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 9682 membar_producer(); 9683 /* 9684 * lgrp_move_thread() won't schedule async recheck after 9685 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not 9686 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid 9687 * is not LGRP_NONE. 9688 */ 9689 if (first && p->p_t1_lgrpid != LGRP_NONE && 9690 p->p_t1_lgrpid != lgrp_id) { 9691 first = 0; 9692 goto again; 9693 } 9694 } 9695 /* 9696 * If no amp was created yet for lgrp_id create a new one as long as 9697 * we have enough memory to afford it. 9698 */ 9699 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) { 9700 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 9701 if (trmem > segvn_textrepl_max_bytes) { 9702 SEGVN_TR_ADDSTAT(normem); 9703 goto fail; 9704 } 9705 if (anon_try_resv_zone(size, NULL) == 0) { 9706 SEGVN_TR_ADDSTAT(noanon); 9707 goto fail; 9708 } 9709 amp = anonmap_alloc(size, size, ANON_NOSLEEP); 9710 if (amp == NULL) { 9711 anon_unresv_zone(size, NULL); 9712 SEGVN_TR_ADDSTAT(nokmem); 9713 goto fail; 9714 } 9715 ASSERT(amp->refcnt == 1); 9716 amp->a_szc = szc; 9717 svntrp->tr_amp[lgrp_id] = amp; 9718 SEGVN_TR_ADDSTAT(newamp); 9719 } 9720 svntrp->tr_refcnt++; 9721 ASSERT(svd->svn_trnext == NULL); 9722 ASSERT(svd->svn_trprev == NULL); 9723 svd->svn_trnext = svntrp->tr_svnhead; 9724 svd->svn_trprev = NULL; 9725 if (svntrp->tr_svnhead != NULL) { 9726 svntrp->tr_svnhead->svn_trprev = svd; 9727 } 9728 svntrp->tr_svnhead = svd; 9729 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size); 9730 ASSERT(amp->refcnt >= 1); 9731 svd->amp = amp; 9732 svd->anon_index = 0; 9733 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG; 9734 svd->tr_policy_info.mem_lgrpid = lgrp_id; 9735 svd->tr_state = SEGVN_TR_ON; 9736 mutex_exit(&svntr_hashtab[hash].tr_lock); 9737 SEGVN_TR_ADDSTAT(repl); 9738 return; 9739 fail: 9740 ASSERT(segvn_textrepl_bytes >= size); 9741 atomic_add_long(&segvn_textrepl_bytes, -size); 9742 ASSERT(svntrp != NULL); 9743 ASSERT(svntrp->tr_amp[lgrp_id] == NULL); 9744 if (svntrp->tr_refcnt == 0) { 9745 ASSERT(svntrp == svntr_hashtab[hash].tr_head); 9746 svntr_hashtab[hash].tr_head = svntrp->tr_next; 9747 mutex_exit(&svntr_hashtab[hash].tr_lock); 9748 kmem_cache_free(svntr_cache, svntrp); 9749 } else { 9750 mutex_exit(&svntr_hashtab[hash].tr_lock); 9751 } 9752 svd->tr_state = SEGVN_TR_OFF; 9753 } 9754 9755 /* 9756 * Convert seg back to regular vnode mapping seg by unbinding it from its text 9757 * replication amp. This routine is most typically called when segment is 9758 * unmapped but can also be called when segment no longer qualifies for text 9759 * replication (e.g. due to protection changes). If unload_unmap is set use 9760 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of 9761 * svntr free all its anon maps and remove it from the hash table. 9762 */ 9763 static void 9764 segvn_textunrepl(struct seg *seg, int unload_unmap) 9765 { 9766 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9767 vnode_t *vp = svd->vp; 9768 u_offset_t off = svd->offset; 9769 size_t size = seg->s_size; 9770 u_offset_t eoff = off + size; 9771 uint_t szc = seg->s_szc; 9772 ulong_t hash = SVNTR_HASH_FUNC(vp); 9773 svntr_t *svntrp; 9774 svntr_t **prv_svntrp; 9775 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid; 9776 lgrp_id_t i; 9777 9778 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9779 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 9780 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 9781 ASSERT(svd->tr_state == SEGVN_TR_ON); 9782 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9783 ASSERT(svd->amp != NULL); 9784 ASSERT(svd->amp->refcnt >= 1); 9785 ASSERT(svd->anon_index == 0); 9786 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9787 ASSERT(svntr_hashtab != NULL); 9788 9789 mutex_enter(&svntr_hashtab[hash].tr_lock); 9790 prv_svntrp = &svntr_hashtab[hash].tr_head; 9791 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) { 9792 ASSERT(svntrp->tr_refcnt != 0); 9793 if (svntrp->tr_vp == vp && svntrp->tr_off == off && 9794 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) { 9795 break; 9796 } 9797 } 9798 if (svntrp == NULL) { 9799 panic("segvn_textunrepl: svntr record not found"); 9800 } 9801 if (svntrp->tr_amp[lgrp_id] != svd->amp) { 9802 panic("segvn_textunrepl: amp mismatch"); 9803 } 9804 svd->tr_state = SEGVN_TR_OFF; 9805 svd->amp = NULL; 9806 if (svd->svn_trprev == NULL) { 9807 ASSERT(svntrp->tr_svnhead == svd); 9808 svntrp->tr_svnhead = svd->svn_trnext; 9809 if (svntrp->tr_svnhead != NULL) { 9810 svntrp->tr_svnhead->svn_trprev = NULL; 9811 } 9812 svd->svn_trnext = NULL; 9813 } else { 9814 svd->svn_trprev->svn_trnext = svd->svn_trnext; 9815 if (svd->svn_trnext != NULL) { 9816 svd->svn_trnext->svn_trprev = svd->svn_trprev; 9817 svd->svn_trnext = NULL; 9818 } 9819 svd->svn_trprev = NULL; 9820 } 9821 if (--svntrp->tr_refcnt) { 9822 mutex_exit(&svntr_hashtab[hash].tr_lock); 9823 goto done; 9824 } 9825 *prv_svntrp = svntrp->tr_next; 9826 mutex_exit(&svntr_hashtab[hash].tr_lock); 9827 for (i = 0; i < NLGRPS_MAX; i++) { 9828 struct anon_map *amp = svntrp->tr_amp[i]; 9829 if (amp == NULL) { 9830 continue; 9831 } 9832 ASSERT(amp->refcnt == 1); 9833 ASSERT(amp->swresv == size); 9834 ASSERT(amp->size == size); 9835 ASSERT(amp->a_szc == szc); 9836 if (amp->a_szc != 0) { 9837 anon_free_pages(amp->ahp, 0, size, szc); 9838 } else { 9839 anon_free(amp->ahp, 0, size); 9840 } 9841 svntrp->tr_amp[i] = NULL; 9842 ASSERT(segvn_textrepl_bytes >= size); 9843 atomic_add_long(&segvn_textrepl_bytes, -size); 9844 anon_unresv_zone(amp->swresv, NULL); 9845 amp->refcnt = 0; 9846 anonmap_free(amp); 9847 } 9848 kmem_cache_free(svntr_cache, svntrp); 9849 done: 9850 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size, 9851 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL); 9852 } 9853 9854 /* 9855 * This is called when a MAP_SHARED writable mapping is created to a vnode 9856 * that is currently used for execution (VVMEXEC flag is set). In this case we 9857 * need to prevent further use of existing replicas. 9858 */ 9859 static void 9860 segvn_inval_trcache(vnode_t *vp) 9861 { 9862 ulong_t hash = SVNTR_HASH_FUNC(vp); 9863 svntr_t *svntrp; 9864 9865 ASSERT(vp->v_flag & VVMEXEC); 9866 9867 if (svntr_hashtab == NULL) { 9868 return; 9869 } 9870 9871 mutex_enter(&svntr_hashtab[hash].tr_lock); 9872 svntrp = svntr_hashtab[hash].tr_head; 9873 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9874 ASSERT(svntrp->tr_refcnt != 0); 9875 if (svntrp->tr_vp == vp && svntrp->tr_valid) { 9876 svntrp->tr_valid = 0; 9877 } 9878 } 9879 mutex_exit(&svntr_hashtab[hash].tr_lock); 9880 } 9881 9882 static void 9883 segvn_trasync_thread(void) 9884 { 9885 callb_cpr_t cpr_info; 9886 kmutex_t cpr_lock; /* just for CPR stuff */ 9887 9888 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL); 9889 9890 CALLB_CPR_INIT(&cpr_info, &cpr_lock, 9891 callb_generic_cpr, "segvn_async"); 9892 9893 if (segvn_update_textrepl_interval == 0) { 9894 segvn_update_textrepl_interval = segvn_update_tr_time * hz; 9895 } else { 9896 segvn_update_textrepl_interval *= hz; 9897 } 9898 (void) timeout(segvn_trupdate_wakeup, NULL, 9899 segvn_update_textrepl_interval); 9900 9901 for (;;) { 9902 mutex_enter(&cpr_lock); 9903 CALLB_CPR_SAFE_BEGIN(&cpr_info); 9904 mutex_exit(&cpr_lock); 9905 sema_p(&segvn_trasync_sem); 9906 mutex_enter(&cpr_lock); 9907 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 9908 mutex_exit(&cpr_lock); 9909 segvn_trupdate(); 9910 } 9911 } 9912 9913 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0; 9914 9915 static void 9916 segvn_trupdate_wakeup(void *dummy) 9917 { 9918 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations(); 9919 9920 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) { 9921 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs; 9922 sema_v(&segvn_trasync_sem); 9923 } 9924 9925 if (!segvn_disable_textrepl_update && 9926 segvn_update_textrepl_interval != 0) { 9927 (void) timeout(segvn_trupdate_wakeup, dummy, 9928 segvn_update_textrepl_interval); 9929 } 9930 } 9931 9932 static void 9933 segvn_trupdate(void) 9934 { 9935 ulong_t hash; 9936 svntr_t *svntrp; 9937 segvn_data_t *svd; 9938 9939 ASSERT(svntr_hashtab != NULL); 9940 9941 for (hash = 0; hash < svntr_hashtab_sz; hash++) { 9942 mutex_enter(&svntr_hashtab[hash].tr_lock); 9943 svntrp = svntr_hashtab[hash].tr_head; 9944 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9945 ASSERT(svntrp->tr_refcnt != 0); 9946 svd = svntrp->tr_svnhead; 9947 for (; svd != NULL; svd = svd->svn_trnext) { 9948 segvn_trupdate_seg(svd->seg, svd, svntrp, 9949 hash); 9950 } 9951 } 9952 mutex_exit(&svntr_hashtab[hash].tr_lock); 9953 } 9954 } 9955 9956 static void 9957 segvn_trupdate_seg(struct seg *seg, 9958 segvn_data_t *svd, 9959 svntr_t *svntrp, 9960 ulong_t hash) 9961 { 9962 proc_t *p; 9963 lgrp_id_t lgrp_id; 9964 struct as *as; 9965 size_t size; 9966 struct anon_map *amp; 9967 9968 ASSERT(svd->vp != NULL); 9969 ASSERT(svd->vp == svntrp->tr_vp); 9970 ASSERT(svd->offset == svntrp->tr_off); 9971 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff); 9972 ASSERT(seg != NULL); 9973 ASSERT(svd->seg == seg); 9974 ASSERT(seg->s_data == (void *)svd); 9975 ASSERT(seg->s_szc == svntrp->tr_szc); 9976 ASSERT(svd->tr_state == SEGVN_TR_ON); 9977 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9978 ASSERT(svd->amp != NULL); 9979 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 9980 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE); 9981 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX); 9982 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp); 9983 ASSERT(svntrp->tr_refcnt != 0); 9984 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock)); 9985 9986 as = seg->s_as; 9987 ASSERT(as != NULL && as != &kas); 9988 p = as->a_proc; 9989 ASSERT(p != NULL); 9990 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 9991 lgrp_id = p->p_t1_lgrpid; 9992 if (lgrp_id == LGRP_NONE) { 9993 return; 9994 } 9995 ASSERT(lgrp_id < NLGRPS_MAX); 9996 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) { 9997 return; 9998 } 9999 10000 /* 10001 * Use tryenter locking since we are locking as/seg and svntr hash 10002 * lock in reverse from syncrounous thread order. 10003 */ 10004 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) { 10005 SEGVN_TR_ADDSTAT(nolock); 10006 if (segvn_lgrp_trthr_migrs_snpsht) { 10007 segvn_lgrp_trthr_migrs_snpsht = 0; 10008 } 10009 return; 10010 } 10011 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) { 10012 AS_LOCK_EXIT(as, &as->a_lock); 10013 SEGVN_TR_ADDSTAT(nolock); 10014 if (segvn_lgrp_trthr_migrs_snpsht) { 10015 segvn_lgrp_trthr_migrs_snpsht = 0; 10016 } 10017 return; 10018 } 10019 size = seg->s_size; 10020 if (svntrp->tr_amp[lgrp_id] == NULL) { 10021 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 10022 if (trmem > segvn_textrepl_max_bytes) { 10023 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10024 AS_LOCK_EXIT(as, &as->a_lock); 10025 atomic_add_long(&segvn_textrepl_bytes, -size); 10026 SEGVN_TR_ADDSTAT(normem); 10027 return; 10028 } 10029 if (anon_try_resv_zone(size, NULL) == 0) { 10030 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10031 AS_LOCK_EXIT(as, &as->a_lock); 10032 atomic_add_long(&segvn_textrepl_bytes, -size); 10033 SEGVN_TR_ADDSTAT(noanon); 10034 return; 10035 } 10036 amp = anonmap_alloc(size, size, KM_NOSLEEP); 10037 if (amp == NULL) { 10038 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10039 AS_LOCK_EXIT(as, &as->a_lock); 10040 atomic_add_long(&segvn_textrepl_bytes, -size); 10041 anon_unresv_zone(size, NULL); 10042 SEGVN_TR_ADDSTAT(nokmem); 10043 return; 10044 } 10045 ASSERT(amp->refcnt == 1); 10046 amp->a_szc = seg->s_szc; 10047 svntrp->tr_amp[lgrp_id] = amp; 10048 } 10049 /* 10050 * We don't need to drop the bucket lock but here we give other 10051 * threads a chance. svntr and svd can't be unlinked as long as 10052 * segment lock is held as a writer and AS held as well. After we 10053 * retake bucket lock we'll continue from where we left. We'll be able 10054 * to reach the end of either list since new entries are always added 10055 * to the beginning of the lists. 10056 */ 10057 mutex_exit(&svntr_hashtab[hash].tr_lock); 10058 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL); 10059 mutex_enter(&svntr_hashtab[hash].tr_lock); 10060 10061 ASSERT(svd->tr_state == SEGVN_TR_ON); 10062 ASSERT(svd->amp != NULL); 10063 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 10064 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id); 10065 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]); 10066 10067 svd->tr_policy_info.mem_lgrpid = lgrp_id; 10068 svd->amp = svntrp->tr_amp[lgrp_id]; 10069 p->p_tr_lgrpid = NLGRPS_MAX; 10070 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10071 AS_LOCK_EXIT(as, &as->a_lock); 10072 10073 ASSERT(svntrp->tr_refcnt != 0); 10074 ASSERT(svd->vp == svntrp->tr_vp); 10075 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id); 10076 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]); 10077 ASSERT(svd->seg == seg); 10078 ASSERT(svd->tr_state == SEGVN_TR_ON); 10079 10080 SEGVN_TR_ADDSTAT(asyncrepl); 10081 } 10082