1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2014, Joyent, Inc. All rights reserved. 24 */ 25 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * University Copyright- Copyright (c) 1982, 1986, 1988 31 * The Regents of the University of California 32 * All Rights Reserved 33 * 34 * University Acknowledgment- Portions of this document are derived from 35 * software developed by the University of California, Berkeley, and its 36 * contributors. 37 */ 38 39 /* 40 * VM - shared or copy-on-write from a vnode/anonymous memory. 41 */ 42 43 #include <sys/types.h> 44 #include <sys/param.h> 45 #include <sys/t_lock.h> 46 #include <sys/errno.h> 47 #include <sys/systm.h> 48 #include <sys/mman.h> 49 #include <sys/debug.h> 50 #include <sys/cred.h> 51 #include <sys/vmsystm.h> 52 #include <sys/tuneable.h> 53 #include <sys/bitmap.h> 54 #include <sys/swap.h> 55 #include <sys/kmem.h> 56 #include <sys/sysmacros.h> 57 #include <sys/vtrace.h> 58 #include <sys/cmn_err.h> 59 #include <sys/callb.h> 60 #include <sys/vm.h> 61 #include <sys/dumphdr.h> 62 #include <sys/lgrp.h> 63 64 #include <vm/hat.h> 65 #include <vm/as.h> 66 #include <vm/seg.h> 67 #include <vm/seg_vn.h> 68 #include <vm/pvn.h> 69 #include <vm/anon.h> 70 #include <vm/page.h> 71 #include <vm/vpage.h> 72 #include <sys/proc.h> 73 #include <sys/task.h> 74 #include <sys/project.h> 75 #include <sys/zone.h> 76 #include <sys/shm_impl.h> 77 /* 78 * Private seg op routines. 79 */ 80 static int segvn_dup(struct seg *seg, struct seg *newseg); 81 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len); 82 static void segvn_free(struct seg *seg); 83 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg, 84 caddr_t addr, size_t len, enum fault_type type, 85 enum seg_rw rw); 86 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr); 87 static int segvn_setprot(struct seg *seg, caddr_t addr, 88 size_t len, uint_t prot); 89 static int segvn_checkprot(struct seg *seg, caddr_t addr, 90 size_t len, uint_t prot); 91 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 92 static size_t segvn_swapout(struct seg *seg); 93 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len, 94 int attr, uint_t flags); 95 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len, 96 char *vec); 97 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 98 int attr, int op, ulong_t *lockmap, size_t pos); 99 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len, 100 uint_t *protv); 101 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr); 102 static int segvn_gettype(struct seg *seg, caddr_t addr); 103 static int segvn_getvp(struct seg *seg, caddr_t addr, 104 struct vnode **vpp); 105 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len, 106 uint_t behav); 107 static void segvn_dump(struct seg *seg); 108 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, 109 struct page ***ppp, enum lock_type type, enum seg_rw rw); 110 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, 111 uint_t szc); 112 static int segvn_getmemid(struct seg *seg, caddr_t addr, 113 memid_t *memidp); 114 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t); 115 static int segvn_capable(struct seg *seg, segcapability_t capable); 116 117 struct seg_ops segvn_ops = { 118 segvn_dup, 119 segvn_unmap, 120 segvn_free, 121 segvn_fault, 122 segvn_faulta, 123 segvn_setprot, 124 segvn_checkprot, 125 segvn_kluster, 126 segvn_swapout, 127 segvn_sync, 128 segvn_incore, 129 segvn_lockop, 130 segvn_getprot, 131 segvn_getoffset, 132 segvn_gettype, 133 segvn_getvp, 134 segvn_advise, 135 segvn_dump, 136 segvn_pagelock, 137 segvn_setpagesize, 138 segvn_getmemid, 139 segvn_getpolicy, 140 segvn_capable, 141 }; 142 143 /* 144 * Common zfod structures, provided as a shorthand for others to use. 145 */ 146 static segvn_crargs_t zfod_segvn_crargs = 147 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL); 148 static segvn_crargs_t kzfod_segvn_crargs = 149 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER, 150 PROT_ALL & ~PROT_USER); 151 static segvn_crargs_t stack_noexec_crargs = 152 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL); 153 154 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */ 155 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */ 156 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */ 157 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */ 158 159 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */ 160 161 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */ 162 163 size_t segvn_pglock_comb_thrshld = (1UL << 16); /* 64K */ 164 size_t segvn_pglock_comb_balign = (1UL << 16); /* 64K */ 165 uint_t segvn_pglock_comb_bshift; 166 size_t segvn_pglock_comb_palign; 167 168 static int segvn_concat(struct seg *, struct seg *, int); 169 static int segvn_extend_prev(struct seg *, struct seg *, 170 struct segvn_crargs *, size_t); 171 static int segvn_extend_next(struct seg *, struct seg *, 172 struct segvn_crargs *, size_t); 173 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw); 174 static void segvn_pagelist_rele(page_t **); 175 static void segvn_setvnode_mpss(vnode_t *); 176 static void segvn_relocate_pages(page_t **, page_t *); 177 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *); 178 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t, 179 uint_t, page_t **, page_t **, uint_t *, int *); 180 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t, 181 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 182 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t, 183 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 184 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t, 185 u_offset_t, struct vpage *, page_t **, uint_t, 186 enum fault_type, enum seg_rw, int); 187 static void segvn_vpage(struct seg *); 188 static size_t segvn_count_swap_by_vpages(struct seg *); 189 190 static void segvn_purge(struct seg *seg); 191 static int segvn_reclaim(void *, caddr_t, size_t, struct page **, 192 enum seg_rw, int); 193 static int shamp_reclaim(void *, caddr_t, size_t, struct page **, 194 enum seg_rw, int); 195 196 static int sameprot(struct seg *, caddr_t, size_t); 197 198 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t); 199 static int segvn_clrszc(struct seg *); 200 static struct seg *segvn_split_seg(struct seg *, caddr_t); 201 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t, 202 ulong_t, uint_t); 203 204 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t, 205 size_t, void *, u_offset_t); 206 207 static struct kmem_cache *segvn_cache; 208 static struct kmem_cache **segvn_szc_cache; 209 210 #ifdef VM_STATS 211 static struct segvnvmstats_str { 212 ulong_t fill_vp_pages[31]; 213 ulong_t fltvnpages[49]; 214 ulong_t fullszcpages[10]; 215 ulong_t relocatepages[3]; 216 ulong_t fltanpages[17]; 217 ulong_t pagelock[2]; 218 ulong_t demoterange[3]; 219 } segvnvmstats; 220 #endif /* VM_STATS */ 221 222 #define SDR_RANGE 1 /* demote entire range */ 223 #define SDR_END 2 /* demote non aligned ends only */ 224 225 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \ 226 if ((len) != 0) { \ 227 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \ 228 ASSERT(lpgaddr >= (seg)->s_base); \ 229 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \ 230 (len)), pgsz); \ 231 ASSERT(lpgeaddr > lpgaddr); \ 232 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \ 233 } else { \ 234 lpgeaddr = lpgaddr = (addr); \ 235 } \ 236 } 237 238 /*ARGSUSED*/ 239 static int 240 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags) 241 { 242 struct segvn_data *svd = buf; 243 244 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL); 245 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL); 246 svd->svn_trnext = svd->svn_trprev = NULL; 247 return (0); 248 } 249 250 /*ARGSUSED1*/ 251 static void 252 segvn_cache_destructor(void *buf, void *cdrarg) 253 { 254 struct segvn_data *svd = buf; 255 256 rw_destroy(&svd->lock); 257 mutex_destroy(&svd->segfree_syncmtx); 258 } 259 260 /*ARGSUSED*/ 261 static int 262 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags) 263 { 264 bzero(buf, sizeof (svntr_t)); 265 return (0); 266 } 267 268 /* 269 * Patching this variable to non-zero allows the system to run with 270 * stacks marked as "not executable". It's a bit of a kludge, but is 271 * provided as a tweakable for platforms that export those ABIs 272 * (e.g. sparc V8) that have executable stacks enabled by default. 273 * There are also some restrictions for platforms that don't actually 274 * implement 'noexec' protections. 275 * 276 * Once enabled, the system is (therefore) unable to provide a fully 277 * ABI-compliant execution environment, though practically speaking, 278 * most everything works. The exceptions are generally some interpreters 279 * and debuggers that create executable code on the stack and jump 280 * into it (without explicitly mprotecting the address range to include 281 * PROT_EXEC). 282 * 283 * One important class of applications that are disabled are those 284 * that have been transformed into malicious agents using one of the 285 * numerous "buffer overflow" attacks. See 4007890. 286 */ 287 int noexec_user_stack = 0; 288 int noexec_user_stack_log = 1; 289 290 int segvn_lpg_disable = 0; 291 uint_t segvn_maxpgszc = 0; 292 293 ulong_t segvn_vmpss_clrszc_cnt; 294 ulong_t segvn_vmpss_clrszc_err; 295 ulong_t segvn_fltvnpages_clrszc_cnt; 296 ulong_t segvn_fltvnpages_clrszc_err; 297 ulong_t segvn_setpgsz_align_err; 298 ulong_t segvn_setpgsz_anon_align_err; 299 ulong_t segvn_setpgsz_getattr_err; 300 ulong_t segvn_setpgsz_eof_err; 301 ulong_t segvn_faultvnmpss_align_err1; 302 ulong_t segvn_faultvnmpss_align_err2; 303 ulong_t segvn_faultvnmpss_align_err3; 304 ulong_t segvn_faultvnmpss_align_err4; 305 ulong_t segvn_faultvnmpss_align_err5; 306 ulong_t segvn_vmpss_pageio_deadlk_err; 307 308 int segvn_use_regions = 1; 309 310 /* 311 * Segvn supports text replication optimization for NUMA platforms. Text 312 * replica's are represented by anon maps (amp). There's one amp per text file 313 * region per lgroup. A process chooses the amp for each of its text mappings 314 * based on the lgroup assignment of its main thread (t_tid = 1). All 315 * processes that want a replica on a particular lgroup for the same text file 316 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table 317 * with vp,off,size,szc used as a key. Text replication segments are read only 318 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by 319 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode 320 * pages. Replication amp is assigned to a segment when it gets its first 321 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread 322 * rechecks periodically if the process still maps an amp local to the main 323 * thread. If not async thread forces process to remap to an amp in the new 324 * home lgroup of the main thread. Current text replication implementation 325 * only provides the benefit to workloads that do most of their work in the 326 * main thread of a process or all the threads of a process run in the same 327 * lgroup. To extend text replication benefit to different types of 328 * multithreaded workloads further work would be needed in the hat layer to 329 * allow the same virtual address in the same hat to simultaneously map 330 * different physical addresses (i.e. page table replication would be needed 331 * for x86). 332 * 333 * amp pages are used instead of vnode pages as long as segment has a very 334 * simple life cycle. It's created via segvn_create(), handles S_EXEC 335 * (S_READ) pagefaults and is fully unmapped. If anything more complicated 336 * happens such as protection is changed, real COW fault happens, pagesize is 337 * changed, MC_LOCK is requested or segment is partially unmapped we turn off 338 * text replication by converting the segment back to vnode only segment 339 * (unmap segment's address range and set svd->amp to NULL). 340 * 341 * The original file can be changed after amp is inserted into 342 * svntr_hashtab. Processes that are launched after the file is already 343 * changed can't use the replica's created prior to the file change. To 344 * implement this functionality hash entries are timestamped. Replica's can 345 * only be used if current file modification time is the same as the timestamp 346 * saved when hash entry was created. However just timestamps alone are not 347 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We 348 * deal with file changes via MAP_SHARED mappings differently. When writable 349 * MAP_SHARED mappings are created to vnodes marked as executable we mark all 350 * existing replica's for this vnode as not usable for future text 351 * mappings. And we don't create new replica's for files that currently have 352 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is 353 * true). 354 */ 355 356 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20) 357 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR; 358 359 static ulong_t svntr_hashtab_sz = 512; 360 static svntr_bucket_t *svntr_hashtab = NULL; 361 static struct kmem_cache *svntr_cache; 362 static svntr_stats_t *segvn_textrepl_stats; 363 static ksema_t segvn_trasync_sem; 364 365 int segvn_disable_textrepl = 1; 366 size_t textrepl_size_thresh = (size_t)-1; 367 size_t segvn_textrepl_bytes = 0; 368 size_t segvn_textrepl_max_bytes = 0; 369 clock_t segvn_update_textrepl_interval = 0; 370 int segvn_update_tr_time = 10; 371 int segvn_disable_textrepl_update = 0; 372 373 static void segvn_textrepl(struct seg *); 374 static void segvn_textunrepl(struct seg *, int); 375 static void segvn_inval_trcache(vnode_t *); 376 static void segvn_trasync_thread(void); 377 static void segvn_trupdate_wakeup(void *); 378 static void segvn_trupdate(void); 379 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *, 380 ulong_t); 381 382 /* 383 * Initialize segvn data structures 384 */ 385 void 386 segvn_init(void) 387 { 388 uint_t maxszc; 389 uint_t szc; 390 size_t pgsz; 391 392 segvn_cache = kmem_cache_create("segvn_cache", 393 sizeof (struct segvn_data), 0, 394 segvn_cache_constructor, segvn_cache_destructor, NULL, 395 NULL, NULL, 0); 396 397 if (segvn_lpg_disable == 0) { 398 szc = maxszc = page_num_pagesizes() - 1; 399 if (szc == 0) { 400 segvn_lpg_disable = 1; 401 } 402 if (page_get_pagesize(0) != PAGESIZE) { 403 panic("segvn_init: bad szc 0"); 404 /*NOTREACHED*/ 405 } 406 while (szc != 0) { 407 pgsz = page_get_pagesize(szc); 408 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) { 409 panic("segvn_init: bad szc %d", szc); 410 /*NOTREACHED*/ 411 } 412 szc--; 413 } 414 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc) 415 segvn_maxpgszc = maxszc; 416 } 417 418 if (segvn_maxpgszc) { 419 segvn_szc_cache = (struct kmem_cache **)kmem_alloc( 420 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *), 421 KM_SLEEP); 422 } 423 424 for (szc = 1; szc <= segvn_maxpgszc; szc++) { 425 char str[32]; 426 427 (void) sprintf(str, "segvn_szc_cache%d", szc); 428 segvn_szc_cache[szc] = kmem_cache_create(str, 429 page_get_pagecnt(szc) * sizeof (page_t *), 0, 430 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG); 431 } 432 433 434 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL)) 435 segvn_use_regions = 0; 436 437 /* 438 * For now shared regions and text replication segvn support 439 * are mutually exclusive. This is acceptable because 440 * currently significant benefit from text replication was 441 * only observed on AMD64 NUMA platforms (due to relatively 442 * small L2$ size) and currently we don't support shared 443 * regions on x86. 444 */ 445 if (segvn_use_regions && !segvn_disable_textrepl) { 446 segvn_disable_textrepl = 1; 447 } 448 449 #if defined(_LP64) 450 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 && 451 !segvn_disable_textrepl) { 452 ulong_t i; 453 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t); 454 455 svntr_cache = kmem_cache_create("svntr_cache", 456 sizeof (svntr_t), 0, svntr_cache_constructor, NULL, 457 NULL, NULL, NULL, 0); 458 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP); 459 for (i = 0; i < svntr_hashtab_sz; i++) { 460 mutex_init(&svntr_hashtab[i].tr_lock, NULL, 461 MUTEX_DEFAULT, NULL); 462 } 463 segvn_textrepl_max_bytes = ptob(physmem) / 464 segvn_textrepl_max_bytes_factor; 465 segvn_textrepl_stats = kmem_zalloc(NCPU * 466 sizeof (svntr_stats_t), KM_SLEEP); 467 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL); 468 (void) thread_create(NULL, 0, segvn_trasync_thread, 469 NULL, 0, &p0, TS_RUN, minclsyspri); 470 } 471 #endif 472 473 if (!ISP2(segvn_pglock_comb_balign) || 474 segvn_pglock_comb_balign < PAGESIZE) { 475 segvn_pglock_comb_balign = 1UL << 16; /* 64K */ 476 } 477 segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1; 478 segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign); 479 } 480 481 #define SEGVN_PAGEIO ((void *)0x1) 482 #define SEGVN_NOPAGEIO ((void *)0x2) 483 484 static void 485 segvn_setvnode_mpss(vnode_t *vp) 486 { 487 int err; 488 489 ASSERT(vp->v_mpssdata == NULL || 490 vp->v_mpssdata == SEGVN_PAGEIO || 491 vp->v_mpssdata == SEGVN_NOPAGEIO); 492 493 if (vp->v_mpssdata == NULL) { 494 if (vn_vmpss_usepageio(vp)) { 495 err = VOP_PAGEIO(vp, (page_t *)NULL, 496 (u_offset_t)0, 0, 0, CRED(), NULL); 497 } else { 498 err = ENOSYS; 499 } 500 /* 501 * set v_mpssdata just once per vnode life 502 * so that it never changes. 503 */ 504 mutex_enter(&vp->v_lock); 505 if (vp->v_mpssdata == NULL) { 506 if (err == EINVAL) { 507 vp->v_mpssdata = SEGVN_PAGEIO; 508 } else { 509 vp->v_mpssdata = SEGVN_NOPAGEIO; 510 } 511 } 512 mutex_exit(&vp->v_lock); 513 } 514 } 515 516 int 517 segvn_create(struct seg *seg, void *argsp) 518 { 519 struct segvn_crargs *a = (struct segvn_crargs *)argsp; 520 struct segvn_data *svd; 521 size_t swresv = 0; 522 struct cred *cred; 523 struct anon_map *amp; 524 int error = 0; 525 size_t pgsz; 526 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT; 527 int use_rgn = 0; 528 int trok = 0; 529 530 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 531 532 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) { 533 panic("segvn_create type"); 534 /*NOTREACHED*/ 535 } 536 537 /* 538 * Check arguments. If a shared anon structure is given then 539 * it is illegal to also specify a vp. 540 */ 541 if (a->amp != NULL && a->vp != NULL) { 542 panic("segvn_create anon_map"); 543 /*NOTREACHED*/ 544 } 545 546 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) && 547 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) && 548 segvn_use_regions) { 549 use_rgn = 1; 550 } 551 552 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */ 553 if (a->type == MAP_SHARED) 554 a->flags &= ~MAP_NORESERVE; 555 556 if (a->szc != 0) { 557 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) || 558 (a->amp != NULL && a->type == MAP_PRIVATE) || 559 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) { 560 a->szc = 0; 561 } else { 562 if (a->szc > segvn_maxpgszc) 563 a->szc = segvn_maxpgszc; 564 pgsz = page_get_pagesize(a->szc); 565 if (!IS_P2ALIGNED(seg->s_base, pgsz) || 566 !IS_P2ALIGNED(seg->s_size, pgsz)) { 567 a->szc = 0; 568 } else if (a->vp != NULL) { 569 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) { 570 /* 571 * paranoid check. 572 * hat_page_demote() is not supported 573 * on swapfs pages. 574 */ 575 a->szc = 0; 576 } else if (map_addr_vacalign_check(seg->s_base, 577 a->offset & PAGEMASK)) { 578 a->szc = 0; 579 } 580 } else if (a->amp != NULL) { 581 pgcnt_t anum = btopr(a->offset); 582 pgcnt_t pgcnt = page_get_pagecnt(a->szc); 583 if (!IS_P2ALIGNED(anum, pgcnt)) { 584 a->szc = 0; 585 } 586 } 587 } 588 } 589 590 /* 591 * If segment may need private pages, reserve them now. 592 */ 593 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) || 594 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) { 595 if (anon_resv_zone(seg->s_size, 596 seg->s_as->a_proc->p_zone) == 0) 597 return (EAGAIN); 598 swresv = seg->s_size; 599 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 600 seg, swresv, 1); 601 } 602 603 /* 604 * Reserve any mapping structures that may be required. 605 * 606 * Don't do it for segments that may use regions. It's currently a 607 * noop in the hat implementations anyway. 608 */ 609 if (!use_rgn) { 610 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP); 611 } 612 613 if (a->cred) { 614 cred = a->cred; 615 crhold(cred); 616 } else { 617 crhold(cred = CRED()); 618 } 619 620 /* Inform the vnode of the new mapping */ 621 if (a->vp != NULL) { 622 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK, 623 seg->s_as, seg->s_base, seg->s_size, a->prot, 624 a->maxprot, a->type, cred, NULL); 625 if (error) { 626 if (swresv != 0) { 627 anon_unresv_zone(swresv, 628 seg->s_as->a_proc->p_zone); 629 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 630 "anon proc:%p %lu %u", seg, swresv, 0); 631 } 632 crfree(cred); 633 if (!use_rgn) { 634 hat_unload(seg->s_as->a_hat, seg->s_base, 635 seg->s_size, HAT_UNLOAD_UNMAP); 636 } 637 return (error); 638 } 639 /* 640 * svntr_hashtab will be NULL if we support shared regions. 641 */ 642 trok = ((a->flags & MAP_TEXT) && 643 (seg->s_size > textrepl_size_thresh || 644 (a->flags & _MAP_TEXTREPL)) && 645 lgrp_optimizations() && svntr_hashtab != NULL && 646 a->type == MAP_PRIVATE && swresv == 0 && 647 !(a->flags & MAP_NORESERVE) && 648 seg->s_as != &kas && a->vp->v_type == VREG); 649 650 ASSERT(!trok || !use_rgn); 651 } 652 653 /* 654 * MAP_NORESERVE mappings don't count towards the VSZ of a process 655 * until we fault the pages in. 656 */ 657 if ((a->vp == NULL || a->vp->v_type != VREG) && 658 a->flags & MAP_NORESERVE) { 659 seg->s_as->a_resvsize -= seg->s_size; 660 } 661 662 /* 663 * If more than one segment in the address space, and they're adjacent 664 * virtually, try to concatenate them. Don't concatenate if an 665 * explicit anon_map structure was supplied (e.g., SystemV shared 666 * memory) or if we'll use text replication for this segment. 667 */ 668 if (a->amp == NULL && !use_rgn && !trok) { 669 struct seg *pseg, *nseg; 670 struct segvn_data *psvd, *nsvd; 671 lgrp_mem_policy_t ppolicy, npolicy; 672 uint_t lgrp_mem_policy_flags = 0; 673 extern lgrp_mem_policy_t lgrp_mem_default_policy; 674 675 /* 676 * Memory policy flags (lgrp_mem_policy_flags) is valid when 677 * extending stack/heap segments. 678 */ 679 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) && 680 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) { 681 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags; 682 } else { 683 /* 684 * Get policy when not extending it from another segment 685 */ 686 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type); 687 } 688 689 /* 690 * First, try to concatenate the previous and new segments 691 */ 692 pseg = AS_SEGPREV(seg->s_as, seg); 693 if (pseg != NULL && 694 pseg->s_base + pseg->s_size == seg->s_base && 695 pseg->s_ops == &segvn_ops) { 696 /* 697 * Get memory allocation policy from previous segment. 698 * When extension is specified (e.g. for heap) apply 699 * this policy to the new segment regardless of the 700 * outcome of segment concatenation. Extension occurs 701 * for non-default policy otherwise default policy is 702 * used and is based on extended segment size. 703 */ 704 psvd = (struct segvn_data *)pseg->s_data; 705 ppolicy = psvd->policy_info.mem_policy; 706 if (lgrp_mem_policy_flags == 707 LGRP_MP_FLAG_EXTEND_UP) { 708 if (ppolicy != lgrp_mem_default_policy) { 709 mpolicy = ppolicy; 710 } else { 711 mpolicy = lgrp_mem_policy_default( 712 pseg->s_size + seg->s_size, 713 a->type); 714 } 715 } 716 717 if (mpolicy == ppolicy && 718 (pseg->s_size + seg->s_size <= 719 segvn_comb_thrshld || psvd->amp == NULL) && 720 segvn_extend_prev(pseg, seg, a, swresv) == 0) { 721 /* 722 * success! now try to concatenate 723 * with following seg 724 */ 725 crfree(cred); 726 nseg = AS_SEGNEXT(pseg->s_as, pseg); 727 if (nseg != NULL && 728 nseg != pseg && 729 nseg->s_ops == &segvn_ops && 730 pseg->s_base + pseg->s_size == 731 nseg->s_base) 732 (void) segvn_concat(pseg, nseg, 0); 733 ASSERT(pseg->s_szc == 0 || 734 (a->szc == pseg->s_szc && 735 IS_P2ALIGNED(pseg->s_base, pgsz) && 736 IS_P2ALIGNED(pseg->s_size, pgsz))); 737 return (0); 738 } 739 } 740 741 /* 742 * Failed, so try to concatenate with following seg 743 */ 744 nseg = AS_SEGNEXT(seg->s_as, seg); 745 if (nseg != NULL && 746 seg->s_base + seg->s_size == nseg->s_base && 747 nseg->s_ops == &segvn_ops) { 748 /* 749 * Get memory allocation policy from next segment. 750 * When extension is specified (e.g. for stack) apply 751 * this policy to the new segment regardless of the 752 * outcome of segment concatenation. Extension occurs 753 * for non-default policy otherwise default policy is 754 * used and is based on extended segment size. 755 */ 756 nsvd = (struct segvn_data *)nseg->s_data; 757 npolicy = nsvd->policy_info.mem_policy; 758 if (lgrp_mem_policy_flags == 759 LGRP_MP_FLAG_EXTEND_DOWN) { 760 if (npolicy != lgrp_mem_default_policy) { 761 mpolicy = npolicy; 762 } else { 763 mpolicy = lgrp_mem_policy_default( 764 nseg->s_size + seg->s_size, 765 a->type); 766 } 767 } 768 769 if (mpolicy == npolicy && 770 segvn_extend_next(seg, nseg, a, swresv) == 0) { 771 crfree(cred); 772 ASSERT(nseg->s_szc == 0 || 773 (a->szc == nseg->s_szc && 774 IS_P2ALIGNED(nseg->s_base, pgsz) && 775 IS_P2ALIGNED(nseg->s_size, pgsz))); 776 return (0); 777 } 778 } 779 } 780 781 if (a->vp != NULL) { 782 VN_HOLD(a->vp); 783 if (a->type == MAP_SHARED) 784 lgrp_shm_policy_init(NULL, a->vp); 785 } 786 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 787 788 seg->s_ops = &segvn_ops; 789 seg->s_data = (void *)svd; 790 seg->s_szc = a->szc; 791 792 svd->seg = seg; 793 svd->vp = a->vp; 794 /* 795 * Anonymous mappings have no backing file so the offset is meaningless. 796 */ 797 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0; 798 svd->prot = a->prot; 799 svd->maxprot = a->maxprot; 800 svd->pageprot = 0; 801 svd->type = a->type; 802 svd->vpage = NULL; 803 svd->cred = cred; 804 svd->advice = MADV_NORMAL; 805 svd->pageadvice = 0; 806 svd->flags = (ushort_t)a->flags; 807 svd->softlockcnt = 0; 808 svd->softlockcnt_sbase = 0; 809 svd->softlockcnt_send = 0; 810 svd->rcookie = HAT_INVALID_REGION_COOKIE; 811 svd->pageswap = 0; 812 813 if (a->szc != 0 && a->vp != NULL) { 814 segvn_setvnode_mpss(a->vp); 815 } 816 if (svd->type == MAP_SHARED && svd->vp != NULL && 817 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) { 818 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 819 segvn_inval_trcache(svd->vp); 820 } 821 822 amp = a->amp; 823 if ((svd->amp = amp) == NULL) { 824 svd->anon_index = 0; 825 if (svd->type == MAP_SHARED) { 826 svd->swresv = 0; 827 /* 828 * Shared mappings to a vp need no other setup. 829 * If we have a shared mapping to an anon_map object 830 * which hasn't been allocated yet, allocate the 831 * struct now so that it will be properly shared 832 * by remembering the swap reservation there. 833 */ 834 if (a->vp == NULL) { 835 svd->amp = anonmap_alloc(seg->s_size, swresv, 836 ANON_SLEEP); 837 svd->amp->a_szc = seg->s_szc; 838 } 839 } else { 840 /* 841 * Private mapping (with or without a vp). 842 * Allocate anon_map when needed. 843 */ 844 svd->swresv = swresv; 845 } 846 } else { 847 pgcnt_t anon_num; 848 849 /* 850 * Mapping to an existing anon_map structure without a vp. 851 * For now we will insure that the segment size isn't larger 852 * than the size - offset gives us. Later on we may wish to 853 * have the anon array dynamically allocated itself so that 854 * we don't always have to allocate all the anon pointer slots. 855 * This of course involves adding extra code to check that we 856 * aren't trying to use an anon pointer slot beyond the end 857 * of the currently allocated anon array. 858 */ 859 if ((amp->size - a->offset) < seg->s_size) { 860 panic("segvn_create anon_map size"); 861 /*NOTREACHED*/ 862 } 863 864 anon_num = btopr(a->offset); 865 866 if (a->type == MAP_SHARED) { 867 /* 868 * SHARED mapping to a given anon_map. 869 */ 870 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 871 amp->refcnt++; 872 if (a->szc > amp->a_szc) { 873 amp->a_szc = a->szc; 874 } 875 ANON_LOCK_EXIT(&->a_rwlock); 876 svd->anon_index = anon_num; 877 svd->swresv = 0; 878 } else { 879 /* 880 * PRIVATE mapping to a given anon_map. 881 * Make sure that all the needed anon 882 * structures are created (so that we will 883 * share the underlying pages if nothing 884 * is written by this mapping) and then 885 * duplicate the anon array as is done 886 * when a privately mapped segment is dup'ed. 887 */ 888 struct anon *ap; 889 caddr_t addr; 890 caddr_t eaddr; 891 ulong_t anon_idx; 892 int hat_flag = HAT_LOAD; 893 894 if (svd->flags & MAP_TEXT) { 895 hat_flag |= HAT_LOAD_TEXT; 896 } 897 898 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 899 svd->amp->a_szc = seg->s_szc; 900 svd->anon_index = 0; 901 svd->swresv = swresv; 902 903 /* 904 * Prevent 2 threads from allocating anon 905 * slots simultaneously. 906 */ 907 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 908 eaddr = seg->s_base + seg->s_size; 909 910 for (anon_idx = anon_num, addr = seg->s_base; 911 addr < eaddr; addr += PAGESIZE, anon_idx++) { 912 page_t *pp; 913 914 if ((ap = anon_get_ptr(amp->ahp, 915 anon_idx)) != NULL) 916 continue; 917 918 /* 919 * Allocate the anon struct now. 920 * Might as well load up translation 921 * to the page while we're at it... 922 */ 923 pp = anon_zero(seg, addr, &ap, cred); 924 if (ap == NULL || pp == NULL) { 925 panic("segvn_create anon_zero"); 926 /*NOTREACHED*/ 927 } 928 929 /* 930 * Re-acquire the anon_map lock and 931 * initialize the anon array entry. 932 */ 933 ASSERT(anon_get_ptr(amp->ahp, 934 anon_idx) == NULL); 935 (void) anon_set_ptr(amp->ahp, anon_idx, ap, 936 ANON_SLEEP); 937 938 ASSERT(seg->s_szc == 0); 939 ASSERT(!IS_VMODSORT(pp->p_vnode)); 940 941 ASSERT(use_rgn == 0); 942 hat_memload(seg->s_as->a_hat, addr, pp, 943 svd->prot & ~PROT_WRITE, hat_flag); 944 945 page_unlock(pp); 946 } 947 ASSERT(seg->s_szc == 0); 948 anon_dup(amp->ahp, anon_num, svd->amp->ahp, 949 0, seg->s_size); 950 ANON_LOCK_EXIT(&->a_rwlock); 951 } 952 } 953 954 /* 955 * Set default memory allocation policy for segment 956 * 957 * Always set policy for private memory at least for initialization 958 * even if this is a shared memory segment 959 */ 960 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size); 961 962 if (svd->type == MAP_SHARED) 963 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index, 964 svd->vp, svd->offset, seg->s_size); 965 966 if (use_rgn) { 967 ASSERT(!trok); 968 ASSERT(svd->amp == NULL); 969 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base, 970 seg->s_size, (void *)svd->vp, svd->offset, svd->prot, 971 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback, 972 HAT_REGION_TEXT); 973 } 974 975 ASSERT(!trok || !(svd->prot & PROT_WRITE)); 976 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF; 977 978 return (0); 979 } 980 981 /* 982 * Concatenate two existing segments, if possible. 983 * Return 0 on success, -1 if two segments are not compatible 984 * or -2 on memory allocation failure. 985 * If amp_cat == 1 then try and concat segments with anon maps 986 */ 987 static int 988 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat) 989 { 990 struct segvn_data *svd1 = seg1->s_data; 991 struct segvn_data *svd2 = seg2->s_data; 992 struct anon_map *amp1 = svd1->amp; 993 struct anon_map *amp2 = svd2->amp; 994 struct vpage *vpage1 = svd1->vpage; 995 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL; 996 size_t size, nvpsize; 997 pgcnt_t npages1, npages2; 998 999 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as); 1000 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 1001 ASSERT(seg1->s_ops == seg2->s_ops); 1002 1003 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) || 1004 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 1005 return (-1); 1006 } 1007 1008 /* both segments exist, try to merge them */ 1009 #define incompat(x) (svd1->x != svd2->x) 1010 if (incompat(vp) || incompat(maxprot) || 1011 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) || 1012 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) || 1013 incompat(type) || incompat(cred) || incompat(flags) || 1014 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) || 1015 (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0) 1016 return (-1); 1017 #undef incompat 1018 1019 /* 1020 * vp == NULL implies zfod, offset doesn't matter 1021 */ 1022 if (svd1->vp != NULL && 1023 svd1->offset + seg1->s_size != svd2->offset) { 1024 return (-1); 1025 } 1026 1027 /* 1028 * Don't concatenate if either segment uses text replication. 1029 */ 1030 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) { 1031 return (-1); 1032 } 1033 1034 /* 1035 * Fail early if we're not supposed to concatenate 1036 * segments with non NULL amp. 1037 */ 1038 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) { 1039 return (-1); 1040 } 1041 1042 if (svd1->vp == NULL && svd1->type == MAP_SHARED) { 1043 if (amp1 != amp2) { 1044 return (-1); 1045 } 1046 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) != 1047 svd2->anon_index) { 1048 return (-1); 1049 } 1050 ASSERT(amp1 == NULL || amp1->refcnt >= 2); 1051 } 1052 1053 /* 1054 * If either seg has vpages, create a new merged vpage array. 1055 */ 1056 if (vpage1 != NULL || vpage2 != NULL) { 1057 struct vpage *vp, *evp; 1058 1059 npages1 = seg_pages(seg1); 1060 npages2 = seg_pages(seg2); 1061 nvpsize = vpgtob(npages1 + npages2); 1062 1063 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) { 1064 return (-2); 1065 } 1066 1067 if (vpage1 != NULL) { 1068 bcopy(vpage1, nvpage, vpgtob(npages1)); 1069 } else { 1070 evp = nvpage + npages1; 1071 for (vp = nvpage; vp < evp; vp++) { 1072 VPP_SETPROT(vp, svd1->prot); 1073 VPP_SETADVICE(vp, svd1->advice); 1074 } 1075 } 1076 1077 if (vpage2 != NULL) { 1078 bcopy(vpage2, nvpage + npages1, vpgtob(npages2)); 1079 } else { 1080 evp = nvpage + npages1 + npages2; 1081 for (vp = nvpage + npages1; vp < evp; vp++) { 1082 VPP_SETPROT(vp, svd2->prot); 1083 VPP_SETADVICE(vp, svd2->advice); 1084 } 1085 } 1086 1087 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) { 1088 ASSERT(svd1->swresv == seg1->s_size); 1089 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1090 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1091 evp = nvpage + npages1; 1092 for (vp = nvpage; vp < evp; vp++) { 1093 VPP_SETSWAPRES(vp); 1094 } 1095 } 1096 1097 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) { 1098 ASSERT(svd2->swresv == seg2->s_size); 1099 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1100 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1101 vp = nvpage + npages1; 1102 evp = vp + npages2; 1103 for (; vp < evp; vp++) { 1104 VPP_SETSWAPRES(vp); 1105 } 1106 } 1107 } 1108 ASSERT((vpage1 != NULL || vpage2 != NULL) || 1109 (svd1->pageswap == 0 && svd2->pageswap == 0)); 1110 1111 /* 1112 * If either segment has private pages, create a new merged anon 1113 * array. If mergeing shared anon segments just decrement anon map's 1114 * refcnt. 1115 */ 1116 if (amp1 != NULL && svd1->type == MAP_SHARED) { 1117 ASSERT(amp1 == amp2 && svd1->vp == NULL); 1118 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1119 ASSERT(amp1->refcnt >= 2); 1120 amp1->refcnt--; 1121 ANON_LOCK_EXIT(&1->a_rwlock); 1122 svd2->amp = NULL; 1123 } else if (amp1 != NULL || amp2 != NULL) { 1124 struct anon_hdr *nahp; 1125 struct anon_map *namp = NULL; 1126 size_t asize; 1127 1128 ASSERT(svd1->type == MAP_PRIVATE); 1129 1130 asize = seg1->s_size + seg2->s_size; 1131 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) { 1132 if (nvpage != NULL) { 1133 kmem_free(nvpage, nvpsize); 1134 } 1135 return (-2); 1136 } 1137 if (amp1 != NULL) { 1138 /* 1139 * XXX anon rwlock is not really needed because 1140 * this is a private segment and we are writers. 1141 */ 1142 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1143 ASSERT(amp1->refcnt == 1); 1144 if (anon_copy_ptr(amp1->ahp, svd1->anon_index, 1145 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) { 1146 anon_release(nahp, btop(asize)); 1147 ANON_LOCK_EXIT(&1->a_rwlock); 1148 if (nvpage != NULL) { 1149 kmem_free(nvpage, nvpsize); 1150 } 1151 return (-2); 1152 } 1153 } 1154 if (amp2 != NULL) { 1155 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1156 ASSERT(amp2->refcnt == 1); 1157 if (anon_copy_ptr(amp2->ahp, svd2->anon_index, 1158 nahp, btop(seg1->s_size), btop(seg2->s_size), 1159 ANON_NOSLEEP)) { 1160 anon_release(nahp, btop(asize)); 1161 ANON_LOCK_EXIT(&2->a_rwlock); 1162 if (amp1 != NULL) { 1163 ANON_LOCK_EXIT(&1->a_rwlock); 1164 } 1165 if (nvpage != NULL) { 1166 kmem_free(nvpage, nvpsize); 1167 } 1168 return (-2); 1169 } 1170 } 1171 if (amp1 != NULL) { 1172 namp = amp1; 1173 anon_release(amp1->ahp, btop(amp1->size)); 1174 } 1175 if (amp2 != NULL) { 1176 if (namp == NULL) { 1177 ASSERT(amp1 == NULL); 1178 namp = amp2; 1179 anon_release(amp2->ahp, btop(amp2->size)); 1180 } else { 1181 amp2->refcnt--; 1182 ANON_LOCK_EXIT(&2->a_rwlock); 1183 anonmap_free(amp2); 1184 } 1185 svd2->amp = NULL; /* needed for seg_free */ 1186 } 1187 namp->ahp = nahp; 1188 namp->size = asize; 1189 svd1->amp = namp; 1190 svd1->anon_index = 0; 1191 ANON_LOCK_EXIT(&namp->a_rwlock); 1192 } 1193 /* 1194 * Now free the old vpage structures. 1195 */ 1196 if (nvpage != NULL) { 1197 if (vpage1 != NULL) { 1198 kmem_free(vpage1, vpgtob(npages1)); 1199 } 1200 if (vpage2 != NULL) { 1201 svd2->vpage = NULL; 1202 kmem_free(vpage2, vpgtob(npages2)); 1203 } 1204 if (svd2->pageprot) { 1205 svd1->pageprot = 1; 1206 } 1207 if (svd2->pageadvice) { 1208 svd1->pageadvice = 1; 1209 } 1210 if (svd2->pageswap) { 1211 svd1->pageswap = 1; 1212 } 1213 svd1->vpage = nvpage; 1214 } 1215 1216 /* all looks ok, merge segments */ 1217 svd1->swresv += svd2->swresv; 1218 svd2->swresv = 0; /* so seg_free doesn't release swap space */ 1219 size = seg2->s_size; 1220 seg_free(seg2); 1221 seg1->s_size += size; 1222 return (0); 1223 } 1224 1225 /* 1226 * Extend the previous segment (seg1) to include the 1227 * new segment (seg2 + a), if possible. 1228 * Return 0 on success. 1229 */ 1230 static int 1231 segvn_extend_prev(seg1, seg2, a, swresv) 1232 struct seg *seg1, *seg2; 1233 struct segvn_crargs *a; 1234 size_t swresv; 1235 { 1236 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data; 1237 size_t size; 1238 struct anon_map *amp1; 1239 struct vpage *new_vpage; 1240 1241 /* 1242 * We don't need any segment level locks for "segvn" data 1243 * since the address space is "write" locked. 1244 */ 1245 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 1246 1247 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) { 1248 return (-1); 1249 } 1250 1251 /* second segment is new, try to extend first */ 1252 /* XXX - should also check cred */ 1253 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot || 1254 (!svd1->pageprot && (svd1->prot != a->prot)) || 1255 svd1->type != a->type || svd1->flags != a->flags || 1256 seg1->s_szc != a->szc || svd1->softlockcnt_send > 0) 1257 return (-1); 1258 1259 /* vp == NULL implies zfod, offset doesn't matter */ 1260 if (svd1->vp != NULL && 1261 svd1->offset + seg1->s_size != (a->offset & PAGEMASK)) 1262 return (-1); 1263 1264 if (svd1->tr_state != SEGVN_TR_OFF) { 1265 return (-1); 1266 } 1267 1268 amp1 = svd1->amp; 1269 if (amp1) { 1270 pgcnt_t newpgs; 1271 1272 /* 1273 * Segment has private pages, can data structures 1274 * be expanded? 1275 * 1276 * Acquire the anon_map lock to prevent it from changing, 1277 * if it is shared. This ensures that the anon_map 1278 * will not change while a thread which has a read/write 1279 * lock on an address space references it. 1280 * XXX - Don't need the anon_map lock at all if "refcnt" 1281 * is 1. 1282 * 1283 * Can't grow a MAP_SHARED segment with an anonmap because 1284 * there may be existing anon slots where we want to extend 1285 * the segment and we wouldn't know what to do with them 1286 * (e.g., for tmpfs right thing is to just leave them there, 1287 * for /dev/zero they should be cleared out). 1288 */ 1289 if (svd1->type == MAP_SHARED) 1290 return (-1); 1291 1292 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1293 if (amp1->refcnt > 1) { 1294 ANON_LOCK_EXIT(&1->a_rwlock); 1295 return (-1); 1296 } 1297 newpgs = anon_grow(amp1->ahp, &svd1->anon_index, 1298 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP); 1299 1300 if (newpgs == 0) { 1301 ANON_LOCK_EXIT(&1->a_rwlock); 1302 return (-1); 1303 } 1304 amp1->size = ptob(newpgs); 1305 ANON_LOCK_EXIT(&1->a_rwlock); 1306 } 1307 if (svd1->vpage != NULL) { 1308 struct vpage *vp, *evp; 1309 new_vpage = 1310 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1311 KM_NOSLEEP); 1312 if (new_vpage == NULL) 1313 return (-1); 1314 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1))); 1315 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1))); 1316 svd1->vpage = new_vpage; 1317 1318 vp = new_vpage + seg_pages(seg1); 1319 evp = vp + seg_pages(seg2); 1320 for (; vp < evp; vp++) 1321 VPP_SETPROT(vp, a->prot); 1322 if (svd1->pageswap && swresv) { 1323 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1324 ASSERT(swresv == seg2->s_size); 1325 vp = new_vpage + seg_pages(seg1); 1326 for (; vp < evp; vp++) { 1327 VPP_SETSWAPRES(vp); 1328 } 1329 } 1330 } 1331 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0); 1332 size = seg2->s_size; 1333 seg_free(seg2); 1334 seg1->s_size += size; 1335 svd1->swresv += swresv; 1336 if (svd1->pageprot && (a->prot & PROT_WRITE) && 1337 svd1->type == MAP_SHARED && svd1->vp != NULL && 1338 (svd1->vp->v_flag & VVMEXEC)) { 1339 ASSERT(vn_is_mapped(svd1->vp, V_WRITE)); 1340 segvn_inval_trcache(svd1->vp); 1341 } 1342 return (0); 1343 } 1344 1345 /* 1346 * Extend the next segment (seg2) to include the 1347 * new segment (seg1 + a), if possible. 1348 * Return 0 on success. 1349 */ 1350 static int 1351 segvn_extend_next( 1352 struct seg *seg1, 1353 struct seg *seg2, 1354 struct segvn_crargs *a, 1355 size_t swresv) 1356 { 1357 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data; 1358 size_t size; 1359 struct anon_map *amp2; 1360 struct vpage *new_vpage; 1361 1362 /* 1363 * We don't need any segment level locks for "segvn" data 1364 * since the address space is "write" locked. 1365 */ 1366 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock)); 1367 1368 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 1369 return (-1); 1370 } 1371 1372 /* first segment is new, try to extend second */ 1373 /* XXX - should also check cred */ 1374 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot || 1375 (!svd2->pageprot && (svd2->prot != a->prot)) || 1376 svd2->type != a->type || svd2->flags != a->flags || 1377 seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0) 1378 return (-1); 1379 /* vp == NULL implies zfod, offset doesn't matter */ 1380 if (svd2->vp != NULL && 1381 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset) 1382 return (-1); 1383 1384 if (svd2->tr_state != SEGVN_TR_OFF) { 1385 return (-1); 1386 } 1387 1388 amp2 = svd2->amp; 1389 if (amp2) { 1390 pgcnt_t newpgs; 1391 1392 /* 1393 * Segment has private pages, can data structures 1394 * be expanded? 1395 * 1396 * Acquire the anon_map lock to prevent it from changing, 1397 * if it is shared. This ensures that the anon_map 1398 * will not change while a thread which has a read/write 1399 * lock on an address space references it. 1400 * 1401 * XXX - Don't need the anon_map lock at all if "refcnt" 1402 * is 1. 1403 */ 1404 if (svd2->type == MAP_SHARED) 1405 return (-1); 1406 1407 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1408 if (amp2->refcnt > 1) { 1409 ANON_LOCK_EXIT(&2->a_rwlock); 1410 return (-1); 1411 } 1412 newpgs = anon_grow(amp2->ahp, &svd2->anon_index, 1413 btop(seg2->s_size), btop(seg1->s_size), 1414 ANON_NOSLEEP | ANON_GROWDOWN); 1415 1416 if (newpgs == 0) { 1417 ANON_LOCK_EXIT(&2->a_rwlock); 1418 return (-1); 1419 } 1420 amp2->size = ptob(newpgs); 1421 ANON_LOCK_EXIT(&2->a_rwlock); 1422 } 1423 if (svd2->vpage != NULL) { 1424 struct vpage *vp, *evp; 1425 new_vpage = 1426 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1427 KM_NOSLEEP); 1428 if (new_vpage == NULL) { 1429 /* Not merging segments so adjust anon_index back */ 1430 if (amp2) 1431 svd2->anon_index += seg_pages(seg1); 1432 return (-1); 1433 } 1434 bcopy(svd2->vpage, new_vpage + seg_pages(seg1), 1435 vpgtob(seg_pages(seg2))); 1436 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2))); 1437 svd2->vpage = new_vpage; 1438 1439 vp = new_vpage; 1440 evp = vp + seg_pages(seg1); 1441 for (; vp < evp; vp++) 1442 VPP_SETPROT(vp, a->prot); 1443 if (svd2->pageswap && swresv) { 1444 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1445 ASSERT(swresv == seg1->s_size); 1446 vp = new_vpage; 1447 for (; vp < evp; vp++) { 1448 VPP_SETSWAPRES(vp); 1449 } 1450 } 1451 } 1452 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0); 1453 size = seg1->s_size; 1454 seg_free(seg1); 1455 seg2->s_size += size; 1456 seg2->s_base -= size; 1457 svd2->offset -= size; 1458 svd2->swresv += swresv; 1459 if (svd2->pageprot && (a->prot & PROT_WRITE) && 1460 svd2->type == MAP_SHARED && svd2->vp != NULL && 1461 (svd2->vp->v_flag & VVMEXEC)) { 1462 ASSERT(vn_is_mapped(svd2->vp, V_WRITE)); 1463 segvn_inval_trcache(svd2->vp); 1464 } 1465 return (0); 1466 } 1467 1468 static int 1469 segvn_dup(struct seg *seg, struct seg *newseg) 1470 { 1471 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1472 struct segvn_data *newsvd; 1473 pgcnt_t npages = seg_pages(seg); 1474 int error = 0; 1475 uint_t prot; 1476 size_t len; 1477 struct anon_map *amp; 1478 1479 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1480 ASSERT(newseg->s_as->a_proc->p_parent == curproc); 1481 1482 /* 1483 * If segment has anon reserved, reserve more for the new seg. 1484 * For a MAP_NORESERVE segment swresv will be a count of all the 1485 * allocated anon slots; thus we reserve for the child as many slots 1486 * as the parent has allocated. This semantic prevents the child or 1487 * parent from dieing during a copy-on-write fault caused by trying 1488 * to write a shared pre-existing anon page. 1489 */ 1490 if ((len = svd->swresv) != 0) { 1491 if (anon_resv(svd->swresv) == 0) 1492 return (ENOMEM); 1493 1494 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 1495 seg, len, 0); 1496 } 1497 1498 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 1499 1500 newseg->s_ops = &segvn_ops; 1501 newseg->s_data = (void *)newsvd; 1502 newseg->s_szc = seg->s_szc; 1503 1504 newsvd->seg = newseg; 1505 if ((newsvd->vp = svd->vp) != NULL) { 1506 VN_HOLD(svd->vp); 1507 if (svd->type == MAP_SHARED) 1508 lgrp_shm_policy_init(NULL, svd->vp); 1509 } 1510 newsvd->offset = svd->offset; 1511 newsvd->prot = svd->prot; 1512 newsvd->maxprot = svd->maxprot; 1513 newsvd->pageprot = svd->pageprot; 1514 newsvd->type = svd->type; 1515 newsvd->cred = svd->cred; 1516 crhold(newsvd->cred); 1517 newsvd->advice = svd->advice; 1518 newsvd->pageadvice = svd->pageadvice; 1519 newsvd->swresv = svd->swresv; 1520 newsvd->pageswap = svd->pageswap; 1521 newsvd->flags = svd->flags; 1522 newsvd->softlockcnt = 0; 1523 newsvd->softlockcnt_sbase = 0; 1524 newsvd->softlockcnt_send = 0; 1525 newsvd->policy_info = svd->policy_info; 1526 newsvd->rcookie = HAT_INVALID_REGION_COOKIE; 1527 1528 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) { 1529 /* 1530 * Not attaching to a shared anon object. 1531 */ 1532 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) || 1533 svd->tr_state == SEGVN_TR_OFF); 1534 if (svd->tr_state == SEGVN_TR_ON) { 1535 ASSERT(newsvd->vp != NULL && amp != NULL); 1536 newsvd->tr_state = SEGVN_TR_INIT; 1537 } else { 1538 newsvd->tr_state = svd->tr_state; 1539 } 1540 newsvd->amp = NULL; 1541 newsvd->anon_index = 0; 1542 } else { 1543 /* regions for now are only used on pure vnode segments */ 1544 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 1545 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1546 newsvd->tr_state = SEGVN_TR_OFF; 1547 if (svd->type == MAP_SHARED) { 1548 newsvd->amp = amp; 1549 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1550 amp->refcnt++; 1551 ANON_LOCK_EXIT(&->a_rwlock); 1552 newsvd->anon_index = svd->anon_index; 1553 } else { 1554 int reclaim = 1; 1555 1556 /* 1557 * Allocate and initialize new anon_map structure. 1558 */ 1559 newsvd->amp = anonmap_alloc(newseg->s_size, 0, 1560 ANON_SLEEP); 1561 newsvd->amp->a_szc = newseg->s_szc; 1562 newsvd->anon_index = 0; 1563 1564 /* 1565 * We don't have to acquire the anon_map lock 1566 * for the new segment (since it belongs to an 1567 * address space that is still not associated 1568 * with any process), or the segment in the old 1569 * address space (since all threads in it 1570 * are stopped while duplicating the address space). 1571 */ 1572 1573 /* 1574 * The goal of the following code is to make sure that 1575 * softlocked pages do not end up as copy on write 1576 * pages. This would cause problems where one 1577 * thread writes to a page that is COW and a different 1578 * thread in the same process has softlocked it. The 1579 * softlock lock would move away from this process 1580 * because the write would cause this process to get 1581 * a copy (without the softlock). 1582 * 1583 * The strategy here is to just break the 1584 * sharing on pages that could possibly be 1585 * softlocked. 1586 */ 1587 retry: 1588 if (svd->softlockcnt) { 1589 struct anon *ap, *newap; 1590 size_t i; 1591 uint_t vpprot; 1592 page_t *anon_pl[1+1], *pp; 1593 caddr_t addr; 1594 ulong_t old_idx = svd->anon_index; 1595 ulong_t new_idx = 0; 1596 1597 /* 1598 * The softlock count might be non zero 1599 * because some pages are still stuck in the 1600 * cache for lazy reclaim. Flush the cache 1601 * now. This should drop the count to zero. 1602 * [or there is really I/O going on to these 1603 * pages]. Note, we have the writers lock so 1604 * nothing gets inserted during the flush. 1605 */ 1606 if (reclaim == 1) { 1607 segvn_purge(seg); 1608 reclaim = 0; 1609 goto retry; 1610 } 1611 i = btopr(seg->s_size); 1612 addr = seg->s_base; 1613 /* 1614 * XXX break cow sharing using PAGESIZE 1615 * pages. They will be relocated into larger 1616 * pages at fault time. 1617 */ 1618 while (i-- > 0) { 1619 if (ap = anon_get_ptr(amp->ahp, 1620 old_idx)) { 1621 error = anon_getpage(&ap, 1622 &vpprot, anon_pl, PAGESIZE, 1623 seg, addr, S_READ, 1624 svd->cred); 1625 if (error) { 1626 newsvd->vpage = NULL; 1627 goto out; 1628 } 1629 /* 1630 * prot need not be computed 1631 * below 'cause anon_private is 1632 * going to ignore it anyway 1633 * as child doesn't inherit 1634 * pagelock from parent. 1635 */ 1636 prot = svd->pageprot ? 1637 VPP_PROT( 1638 &svd->vpage[ 1639 seg_page(seg, addr)]) 1640 : svd->prot; 1641 pp = anon_private(&newap, 1642 newseg, addr, prot, 1643 anon_pl[0], 0, 1644 newsvd->cred); 1645 if (pp == NULL) { 1646 /* no mem abort */ 1647 newsvd->vpage = NULL; 1648 error = ENOMEM; 1649 goto out; 1650 } 1651 (void) anon_set_ptr( 1652 newsvd->amp->ahp, new_idx, 1653 newap, ANON_SLEEP); 1654 page_unlock(pp); 1655 } 1656 addr += PAGESIZE; 1657 old_idx++; 1658 new_idx++; 1659 } 1660 } else { /* common case */ 1661 if (seg->s_szc != 0) { 1662 /* 1663 * If at least one of anon slots of a 1664 * large page exists then make sure 1665 * all anon slots of a large page 1666 * exist to avoid partial cow sharing 1667 * of a large page in the future. 1668 */ 1669 anon_dup_fill_holes(amp->ahp, 1670 svd->anon_index, newsvd->amp->ahp, 1671 0, seg->s_size, seg->s_szc, 1672 svd->vp != NULL); 1673 } else { 1674 anon_dup(amp->ahp, svd->anon_index, 1675 newsvd->amp->ahp, 0, seg->s_size); 1676 } 1677 1678 hat_clrattr(seg->s_as->a_hat, seg->s_base, 1679 seg->s_size, PROT_WRITE); 1680 } 1681 } 1682 } 1683 /* 1684 * If necessary, create a vpage structure for the new segment. 1685 * Do not copy any page lock indications. 1686 */ 1687 if (svd->vpage != NULL) { 1688 uint_t i; 1689 struct vpage *ovp = svd->vpage; 1690 struct vpage *nvp; 1691 1692 nvp = newsvd->vpage = 1693 kmem_alloc(vpgtob(npages), KM_SLEEP); 1694 for (i = 0; i < npages; i++) { 1695 *nvp = *ovp++; 1696 VPP_CLRPPLOCK(nvp++); 1697 } 1698 } else 1699 newsvd->vpage = NULL; 1700 1701 /* Inform the vnode of the new mapping */ 1702 if (newsvd->vp != NULL) { 1703 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset, 1704 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot, 1705 newsvd->maxprot, newsvd->type, newsvd->cred, NULL); 1706 } 1707 out: 1708 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1709 ASSERT(newsvd->amp == NULL); 1710 ASSERT(newsvd->tr_state == SEGVN_TR_OFF); 1711 newsvd->rcookie = svd->rcookie; 1712 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie); 1713 } 1714 return (error); 1715 } 1716 1717 1718 /* 1719 * callback function to invoke free_vp_pages() for only those pages actually 1720 * processed by the HAT when a shared region is destroyed. 1721 */ 1722 extern int free_pages; 1723 1724 static void 1725 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 1726 size_t r_size, void *r_obj, u_offset_t r_objoff) 1727 { 1728 u_offset_t off; 1729 size_t len; 1730 vnode_t *vp = (vnode_t *)r_obj; 1731 1732 ASSERT(eaddr > saddr); 1733 ASSERT(saddr >= r_saddr); 1734 ASSERT(saddr < r_saddr + r_size); 1735 ASSERT(eaddr > r_saddr); 1736 ASSERT(eaddr <= r_saddr + r_size); 1737 ASSERT(vp != NULL); 1738 1739 if (!free_pages) { 1740 return; 1741 } 1742 1743 len = eaddr - saddr; 1744 off = (saddr - r_saddr) + r_objoff; 1745 free_vp_pages(vp, off, len); 1746 } 1747 1748 /* 1749 * callback function used by segvn_unmap to invoke free_vp_pages() for only 1750 * those pages actually processed by the HAT 1751 */ 1752 static void 1753 segvn_hat_unload_callback(hat_callback_t *cb) 1754 { 1755 struct seg *seg = cb->hcb_data; 1756 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1757 size_t len; 1758 u_offset_t off; 1759 1760 ASSERT(svd->vp != NULL); 1761 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr); 1762 ASSERT(cb->hcb_start_addr >= seg->s_base); 1763 1764 len = cb->hcb_end_addr - cb->hcb_start_addr; 1765 off = cb->hcb_start_addr - seg->s_base; 1766 free_vp_pages(svd->vp, svd->offset + off, len); 1767 } 1768 1769 /* 1770 * This function determines the number of bytes of swap reserved by 1771 * a segment for which per-page accounting is present. It is used to 1772 * calculate the correct value of a segvn_data's swresv. 1773 */ 1774 static size_t 1775 segvn_count_swap_by_vpages(struct seg *seg) 1776 { 1777 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1778 struct vpage *vp, *evp; 1779 size_t nswappages = 0; 1780 1781 ASSERT(svd->pageswap); 1782 ASSERT(svd->vpage != NULL); 1783 1784 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 1785 1786 for (vp = svd->vpage; vp < evp; vp++) { 1787 if (VPP_ISSWAPRES(vp)) 1788 nswappages++; 1789 } 1790 1791 return (nswappages << PAGESHIFT); 1792 } 1793 1794 static int 1795 segvn_unmap(struct seg *seg, caddr_t addr, size_t len) 1796 { 1797 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1798 struct segvn_data *nsvd; 1799 struct seg *nseg; 1800 struct anon_map *amp; 1801 pgcnt_t opages; /* old segment size in pages */ 1802 pgcnt_t npages; /* new segment size in pages */ 1803 pgcnt_t dpages; /* pages being deleted (unmapped) */ 1804 hat_callback_t callback; /* used for free_vp_pages() */ 1805 hat_callback_t *cbp = NULL; 1806 caddr_t nbase; 1807 size_t nsize; 1808 size_t oswresv; 1809 int reclaim = 1; 1810 1811 /* 1812 * We don't need any segment level locks for "segvn" data 1813 * since the address space is "write" locked. 1814 */ 1815 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1816 1817 /* 1818 * Fail the unmap if pages are SOFTLOCKed through this mapping. 1819 * softlockcnt is protected from change by the as write lock. 1820 */ 1821 retry: 1822 if (svd->softlockcnt > 0) { 1823 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1824 1825 /* 1826 * If this is shared segment non 0 softlockcnt 1827 * means locked pages are still in use. 1828 */ 1829 if (svd->type == MAP_SHARED) { 1830 return (EAGAIN); 1831 } 1832 1833 /* 1834 * since we do have the writers lock nobody can fill 1835 * the cache during the purge. The flush either succeeds 1836 * or we still have pending I/Os. 1837 */ 1838 if (reclaim == 1) { 1839 segvn_purge(seg); 1840 reclaim = 0; 1841 goto retry; 1842 } 1843 return (EAGAIN); 1844 } 1845 1846 /* 1847 * Check for bad sizes 1848 */ 1849 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size || 1850 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) { 1851 panic("segvn_unmap"); 1852 /*NOTREACHED*/ 1853 } 1854 1855 if (seg->s_szc != 0) { 1856 size_t pgsz = page_get_pagesize(seg->s_szc); 1857 int err; 1858 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 1859 ASSERT(seg->s_base != addr || seg->s_size != len); 1860 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1861 ASSERT(svd->amp == NULL); 1862 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1863 hat_leave_region(seg->s_as->a_hat, 1864 svd->rcookie, HAT_REGION_TEXT); 1865 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1866 /* 1867 * could pass a flag to segvn_demote_range() 1868 * below to tell it not to do any unloads but 1869 * this case is rare enough to not bother for 1870 * now. 1871 */ 1872 } else if (svd->tr_state == SEGVN_TR_INIT) { 1873 svd->tr_state = SEGVN_TR_OFF; 1874 } else if (svd->tr_state == SEGVN_TR_ON) { 1875 ASSERT(svd->amp != NULL); 1876 segvn_textunrepl(seg, 1); 1877 ASSERT(svd->amp == NULL); 1878 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1879 } 1880 VM_STAT_ADD(segvnvmstats.demoterange[0]); 1881 err = segvn_demote_range(seg, addr, len, SDR_END, 0); 1882 if (err == 0) { 1883 return (IE_RETRY); 1884 } 1885 return (err); 1886 } 1887 } 1888 1889 /* Inform the vnode of the unmapping. */ 1890 if (svd->vp) { 1891 int error; 1892 1893 error = VOP_DELMAP(svd->vp, 1894 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base), 1895 seg->s_as, addr, len, svd->prot, svd->maxprot, 1896 svd->type, svd->cred, NULL); 1897 1898 if (error == EAGAIN) 1899 return (error); 1900 } 1901 1902 /* 1903 * Remove any page locks set through this mapping. 1904 * If text replication is not off no page locks could have been 1905 * established via this mapping. 1906 */ 1907 if (svd->tr_state == SEGVN_TR_OFF) { 1908 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0); 1909 } 1910 1911 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1912 ASSERT(svd->amp == NULL); 1913 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1914 ASSERT(svd->type == MAP_PRIVATE); 1915 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 1916 HAT_REGION_TEXT); 1917 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1918 } else if (svd->tr_state == SEGVN_TR_ON) { 1919 ASSERT(svd->amp != NULL); 1920 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE)); 1921 segvn_textunrepl(seg, 1); 1922 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 1923 } else { 1924 if (svd->tr_state != SEGVN_TR_OFF) { 1925 ASSERT(svd->tr_state == SEGVN_TR_INIT); 1926 svd->tr_state = SEGVN_TR_OFF; 1927 } 1928 /* 1929 * Unload any hardware translations in the range to be taken 1930 * out. Use a callback to invoke free_vp_pages() effectively. 1931 */ 1932 if (svd->vp != NULL && free_pages != 0) { 1933 callback.hcb_data = seg; 1934 callback.hcb_function = segvn_hat_unload_callback; 1935 cbp = &callback; 1936 } 1937 hat_unload_callback(seg->s_as->a_hat, addr, len, 1938 HAT_UNLOAD_UNMAP, cbp); 1939 1940 if (svd->type == MAP_SHARED && svd->vp != NULL && 1941 (svd->vp->v_flag & VVMEXEC) && 1942 ((svd->prot & PROT_WRITE) || svd->pageprot)) { 1943 segvn_inval_trcache(svd->vp); 1944 } 1945 } 1946 1947 /* 1948 * Check for entire segment 1949 */ 1950 if (addr == seg->s_base && len == seg->s_size) { 1951 seg_free(seg); 1952 return (0); 1953 } 1954 1955 opages = seg_pages(seg); 1956 dpages = btop(len); 1957 npages = opages - dpages; 1958 amp = svd->amp; 1959 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc); 1960 1961 /* 1962 * Check for beginning of segment 1963 */ 1964 if (addr == seg->s_base) { 1965 if (svd->vpage != NULL) { 1966 size_t nbytes; 1967 struct vpage *ovpage; 1968 1969 ovpage = svd->vpage; /* keep pointer to vpage */ 1970 1971 nbytes = vpgtob(npages); 1972 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 1973 bcopy(&ovpage[dpages], svd->vpage, nbytes); 1974 1975 /* free up old vpage */ 1976 kmem_free(ovpage, vpgtob(opages)); 1977 } 1978 if (amp != NULL) { 1979 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1980 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 1981 /* 1982 * Shared anon map is no longer in use. Before 1983 * freeing its pages purge all entries from 1984 * pcache that belong to this amp. 1985 */ 1986 if (svd->type == MAP_SHARED) { 1987 ASSERT(amp->refcnt == 1); 1988 ASSERT(svd->softlockcnt == 0); 1989 anonmap_purge(amp); 1990 } 1991 /* 1992 * Free up now unused parts of anon_map array. 1993 */ 1994 if (amp->a_szc == seg->s_szc) { 1995 if (seg->s_szc != 0) { 1996 anon_free_pages(amp->ahp, 1997 svd->anon_index, len, 1998 seg->s_szc); 1999 } else { 2000 anon_free(amp->ahp, 2001 svd->anon_index, 2002 len); 2003 } 2004 } else { 2005 ASSERT(svd->type == MAP_SHARED); 2006 ASSERT(amp->a_szc > seg->s_szc); 2007 anon_shmap_free_pages(amp, 2008 svd->anon_index, len); 2009 } 2010 2011 /* 2012 * Unreserve swap space for the 2013 * unmapped chunk of this segment in 2014 * case it's MAP_SHARED 2015 */ 2016 if (svd->type == MAP_SHARED) { 2017 anon_unresv_zone(len, 2018 seg->s_as->a_proc->p_zone); 2019 amp->swresv -= len; 2020 } 2021 } 2022 ANON_LOCK_EXIT(&->a_rwlock); 2023 svd->anon_index += dpages; 2024 } 2025 if (svd->vp != NULL) 2026 svd->offset += len; 2027 2028 seg->s_base += len; 2029 seg->s_size -= len; 2030 2031 if (svd->swresv) { 2032 if (svd->flags & MAP_NORESERVE) { 2033 ASSERT(amp); 2034 oswresv = svd->swresv; 2035 2036 svd->swresv = ptob(anon_pages(amp->ahp, 2037 svd->anon_index, npages)); 2038 anon_unresv_zone(oswresv - svd->swresv, 2039 seg->s_as->a_proc->p_zone); 2040 if (SEG_IS_PARTIAL_RESV(seg)) 2041 seg->s_as->a_resvsize -= oswresv - 2042 svd->swresv; 2043 } else { 2044 size_t unlen; 2045 2046 if (svd->pageswap) { 2047 oswresv = svd->swresv; 2048 svd->swresv = 2049 segvn_count_swap_by_vpages(seg); 2050 ASSERT(oswresv >= svd->swresv); 2051 unlen = oswresv - svd->swresv; 2052 } else { 2053 svd->swresv -= len; 2054 ASSERT(svd->swresv == seg->s_size); 2055 unlen = len; 2056 } 2057 anon_unresv_zone(unlen, 2058 seg->s_as->a_proc->p_zone); 2059 } 2060 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2061 seg, len, 0); 2062 } 2063 2064 return (0); 2065 } 2066 2067 /* 2068 * Check for end of segment 2069 */ 2070 if (addr + len == seg->s_base + seg->s_size) { 2071 if (svd->vpage != NULL) { 2072 size_t nbytes; 2073 struct vpage *ovpage; 2074 2075 ovpage = svd->vpage; /* keep pointer to vpage */ 2076 2077 nbytes = vpgtob(npages); 2078 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2079 bcopy(ovpage, svd->vpage, nbytes); 2080 2081 /* free up old vpage */ 2082 kmem_free(ovpage, vpgtob(opages)); 2083 2084 } 2085 if (amp != NULL) { 2086 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2087 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2088 /* 2089 * Free up now unused parts of anon_map array. 2090 */ 2091 ulong_t an_idx = svd->anon_index + npages; 2092 2093 /* 2094 * Shared anon map is no longer in use. Before 2095 * freeing its pages purge all entries from 2096 * pcache that belong to this amp. 2097 */ 2098 if (svd->type == MAP_SHARED) { 2099 ASSERT(amp->refcnt == 1); 2100 ASSERT(svd->softlockcnt == 0); 2101 anonmap_purge(amp); 2102 } 2103 2104 if (amp->a_szc == seg->s_szc) { 2105 if (seg->s_szc != 0) { 2106 anon_free_pages(amp->ahp, 2107 an_idx, len, 2108 seg->s_szc); 2109 } else { 2110 anon_free(amp->ahp, an_idx, 2111 len); 2112 } 2113 } else { 2114 ASSERT(svd->type == MAP_SHARED); 2115 ASSERT(amp->a_szc > seg->s_szc); 2116 anon_shmap_free_pages(amp, 2117 an_idx, len); 2118 } 2119 2120 /* 2121 * Unreserve swap space for the 2122 * unmapped chunk of this segment in 2123 * case it's MAP_SHARED 2124 */ 2125 if (svd->type == MAP_SHARED) { 2126 anon_unresv_zone(len, 2127 seg->s_as->a_proc->p_zone); 2128 amp->swresv -= len; 2129 } 2130 } 2131 ANON_LOCK_EXIT(&->a_rwlock); 2132 } 2133 2134 seg->s_size -= len; 2135 2136 if (svd->swresv) { 2137 if (svd->flags & MAP_NORESERVE) { 2138 ASSERT(amp); 2139 oswresv = svd->swresv; 2140 svd->swresv = ptob(anon_pages(amp->ahp, 2141 svd->anon_index, npages)); 2142 anon_unresv_zone(oswresv - svd->swresv, 2143 seg->s_as->a_proc->p_zone); 2144 if (SEG_IS_PARTIAL_RESV(seg)) 2145 seg->s_as->a_resvsize -= oswresv - 2146 svd->swresv; 2147 } else { 2148 size_t unlen; 2149 2150 if (svd->pageswap) { 2151 oswresv = svd->swresv; 2152 svd->swresv = 2153 segvn_count_swap_by_vpages(seg); 2154 ASSERT(oswresv >= svd->swresv); 2155 unlen = oswresv - svd->swresv; 2156 } else { 2157 svd->swresv -= len; 2158 ASSERT(svd->swresv == seg->s_size); 2159 unlen = len; 2160 } 2161 anon_unresv_zone(unlen, 2162 seg->s_as->a_proc->p_zone); 2163 } 2164 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2165 "anon proc:%p %lu %u", seg, len, 0); 2166 } 2167 2168 return (0); 2169 } 2170 2171 /* 2172 * The section to go is in the middle of the segment, 2173 * have to make it into two segments. nseg is made for 2174 * the high end while seg is cut down at the low end. 2175 */ 2176 nbase = addr + len; /* new seg base */ 2177 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */ 2178 seg->s_size = addr - seg->s_base; /* shrink old seg */ 2179 nseg = seg_alloc(seg->s_as, nbase, nsize); 2180 if (nseg == NULL) { 2181 panic("segvn_unmap seg_alloc"); 2182 /*NOTREACHED*/ 2183 } 2184 nseg->s_ops = seg->s_ops; 2185 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 2186 nseg->s_data = (void *)nsvd; 2187 nseg->s_szc = seg->s_szc; 2188 *nsvd = *svd; 2189 nsvd->seg = nseg; 2190 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base); 2191 nsvd->swresv = 0; 2192 nsvd->softlockcnt = 0; 2193 nsvd->softlockcnt_sbase = 0; 2194 nsvd->softlockcnt_send = 0; 2195 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 2196 2197 if (svd->vp != NULL) { 2198 VN_HOLD(nsvd->vp); 2199 if (nsvd->type == MAP_SHARED) 2200 lgrp_shm_policy_init(NULL, nsvd->vp); 2201 } 2202 crhold(svd->cred); 2203 2204 if (svd->vpage == NULL) { 2205 nsvd->vpage = NULL; 2206 } else { 2207 /* need to split vpage into two arrays */ 2208 size_t nbytes; 2209 struct vpage *ovpage; 2210 2211 ovpage = svd->vpage; /* keep pointer to vpage */ 2212 2213 npages = seg_pages(seg); /* seg has shrunk */ 2214 nbytes = vpgtob(npages); 2215 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2216 2217 bcopy(ovpage, svd->vpage, nbytes); 2218 2219 npages = seg_pages(nseg); 2220 nbytes = vpgtob(npages); 2221 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2222 2223 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes); 2224 2225 /* free up old vpage */ 2226 kmem_free(ovpage, vpgtob(opages)); 2227 } 2228 2229 if (amp == NULL) { 2230 nsvd->amp = NULL; 2231 nsvd->anon_index = 0; 2232 } else { 2233 /* 2234 * Need to create a new anon map for the new segment. 2235 * We'll also allocate a new smaller array for the old 2236 * smaller segment to save space. 2237 */ 2238 opages = btop((uintptr_t)(addr - seg->s_base)); 2239 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2240 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2241 /* 2242 * Free up now unused parts of anon_map array. 2243 */ 2244 ulong_t an_idx = svd->anon_index + opages; 2245 2246 /* 2247 * Shared anon map is no longer in use. Before 2248 * freeing its pages purge all entries from 2249 * pcache that belong to this amp. 2250 */ 2251 if (svd->type == MAP_SHARED) { 2252 ASSERT(amp->refcnt == 1); 2253 ASSERT(svd->softlockcnt == 0); 2254 anonmap_purge(amp); 2255 } 2256 2257 if (amp->a_szc == seg->s_szc) { 2258 if (seg->s_szc != 0) { 2259 anon_free_pages(amp->ahp, an_idx, len, 2260 seg->s_szc); 2261 } else { 2262 anon_free(amp->ahp, an_idx, 2263 len); 2264 } 2265 } else { 2266 ASSERT(svd->type == MAP_SHARED); 2267 ASSERT(amp->a_szc > seg->s_szc); 2268 anon_shmap_free_pages(amp, an_idx, len); 2269 } 2270 2271 /* 2272 * Unreserve swap space for the 2273 * unmapped chunk of this segment in 2274 * case it's MAP_SHARED 2275 */ 2276 if (svd->type == MAP_SHARED) { 2277 anon_unresv_zone(len, 2278 seg->s_as->a_proc->p_zone); 2279 amp->swresv -= len; 2280 } 2281 } 2282 nsvd->anon_index = svd->anon_index + 2283 btop((uintptr_t)(nseg->s_base - seg->s_base)); 2284 if (svd->type == MAP_SHARED) { 2285 amp->refcnt++; 2286 nsvd->amp = amp; 2287 } else { 2288 struct anon_map *namp; 2289 struct anon_hdr *nahp; 2290 2291 ASSERT(svd->type == MAP_PRIVATE); 2292 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 2293 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 2294 namp->a_szc = seg->s_szc; 2295 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp, 2296 0, btop(seg->s_size), ANON_SLEEP); 2297 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index, 2298 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 2299 anon_release(amp->ahp, btop(amp->size)); 2300 svd->anon_index = 0; 2301 nsvd->anon_index = 0; 2302 amp->ahp = nahp; 2303 amp->size = seg->s_size; 2304 nsvd->amp = namp; 2305 } 2306 ANON_LOCK_EXIT(&->a_rwlock); 2307 } 2308 if (svd->swresv) { 2309 if (svd->flags & MAP_NORESERVE) { 2310 ASSERT(amp); 2311 oswresv = svd->swresv; 2312 svd->swresv = ptob(anon_pages(amp->ahp, 2313 svd->anon_index, btop(seg->s_size))); 2314 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 2315 nsvd->anon_index, btop(nseg->s_size))); 2316 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2317 anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv), 2318 seg->s_as->a_proc->p_zone); 2319 if (SEG_IS_PARTIAL_RESV(seg)) 2320 seg->s_as->a_resvsize -= oswresv - 2321 (svd->swresv + nsvd->swresv); 2322 } else { 2323 size_t unlen; 2324 2325 if (svd->pageswap) { 2326 oswresv = svd->swresv; 2327 svd->swresv = segvn_count_swap_by_vpages(seg); 2328 nsvd->swresv = segvn_count_swap_by_vpages(nseg); 2329 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2330 unlen = oswresv - (svd->swresv + nsvd->swresv); 2331 } else { 2332 if (seg->s_size + nseg->s_size + len != 2333 svd->swresv) { 2334 panic("segvn_unmap: cannot split " 2335 "swap reservation"); 2336 /*NOTREACHED*/ 2337 } 2338 svd->swresv = seg->s_size; 2339 nsvd->swresv = nseg->s_size; 2340 unlen = len; 2341 } 2342 anon_unresv_zone(unlen, 2343 seg->s_as->a_proc->p_zone); 2344 } 2345 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2346 seg, len, 0); 2347 } 2348 2349 return (0); /* I'm glad that's all over with! */ 2350 } 2351 2352 static void 2353 segvn_free(struct seg *seg) 2354 { 2355 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2356 pgcnt_t npages = seg_pages(seg); 2357 struct anon_map *amp; 2358 size_t len; 2359 2360 /* 2361 * We don't need any segment level locks for "segvn" data 2362 * since the address space is "write" locked. 2363 */ 2364 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2365 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2366 2367 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2368 2369 /* 2370 * Be sure to unlock pages. XXX Why do things get free'ed instead 2371 * of unmapped? XXX 2372 */ 2373 (void) segvn_lockop(seg, seg->s_base, seg->s_size, 2374 0, MC_UNLOCK, NULL, 0); 2375 2376 /* 2377 * Deallocate the vpage and anon pointers if necessary and possible. 2378 */ 2379 if (svd->vpage != NULL) { 2380 kmem_free(svd->vpage, vpgtob(npages)); 2381 svd->vpage = NULL; 2382 } 2383 if ((amp = svd->amp) != NULL) { 2384 /* 2385 * If there are no more references to this anon_map 2386 * structure, then deallocate the structure after freeing 2387 * up all the anon slot pointers that we can. 2388 */ 2389 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2390 ASSERT(amp->a_szc >= seg->s_szc); 2391 if (--amp->refcnt == 0) { 2392 if (svd->type == MAP_PRIVATE) { 2393 /* 2394 * Private - we only need to anon_free 2395 * the part that this segment refers to. 2396 */ 2397 if (seg->s_szc != 0) { 2398 anon_free_pages(amp->ahp, 2399 svd->anon_index, seg->s_size, 2400 seg->s_szc); 2401 } else { 2402 anon_free(amp->ahp, svd->anon_index, 2403 seg->s_size); 2404 } 2405 } else { 2406 2407 /* 2408 * Shared anon map is no longer in use. Before 2409 * freeing its pages purge all entries from 2410 * pcache that belong to this amp. 2411 */ 2412 ASSERT(svd->softlockcnt == 0); 2413 anonmap_purge(amp); 2414 2415 /* 2416 * Shared - anon_free the entire 2417 * anon_map's worth of stuff and 2418 * release any swap reservation. 2419 */ 2420 if (amp->a_szc != 0) { 2421 anon_shmap_free_pages(amp, 0, 2422 amp->size); 2423 } else { 2424 anon_free(amp->ahp, 0, amp->size); 2425 } 2426 if ((len = amp->swresv) != 0) { 2427 anon_unresv_zone(len, 2428 seg->s_as->a_proc->p_zone); 2429 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2430 "anon proc:%p %lu %u", seg, len, 0); 2431 } 2432 } 2433 svd->amp = NULL; 2434 ANON_LOCK_EXIT(&->a_rwlock); 2435 anonmap_free(amp); 2436 } else if (svd->type == MAP_PRIVATE) { 2437 /* 2438 * We had a private mapping which still has 2439 * a held anon_map so just free up all the 2440 * anon slot pointers that we were using. 2441 */ 2442 if (seg->s_szc != 0) { 2443 anon_free_pages(amp->ahp, svd->anon_index, 2444 seg->s_size, seg->s_szc); 2445 } else { 2446 anon_free(amp->ahp, svd->anon_index, 2447 seg->s_size); 2448 } 2449 ANON_LOCK_EXIT(&->a_rwlock); 2450 } else { 2451 ANON_LOCK_EXIT(&->a_rwlock); 2452 } 2453 } 2454 2455 /* 2456 * Release swap reservation. 2457 */ 2458 if ((len = svd->swresv) != 0) { 2459 anon_unresv_zone(svd->swresv, 2460 seg->s_as->a_proc->p_zone); 2461 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2462 seg, len, 0); 2463 if (SEG_IS_PARTIAL_RESV(seg)) 2464 seg->s_as->a_resvsize -= svd->swresv; 2465 svd->swresv = 0; 2466 } 2467 /* 2468 * Release claim on vnode, credentials, and finally free the 2469 * private data. 2470 */ 2471 if (svd->vp != NULL) { 2472 if (svd->type == MAP_SHARED) 2473 lgrp_shm_policy_fini(NULL, svd->vp); 2474 VN_RELE(svd->vp); 2475 svd->vp = NULL; 2476 } 2477 crfree(svd->cred); 2478 svd->pageprot = 0; 2479 svd->pageadvice = 0; 2480 svd->pageswap = 0; 2481 svd->cred = NULL; 2482 2483 /* 2484 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's 2485 * still working with this segment without holding as lock (in case 2486 * it's called by pcache async thread). 2487 */ 2488 ASSERT(svd->softlockcnt == 0); 2489 mutex_enter(&svd->segfree_syncmtx); 2490 mutex_exit(&svd->segfree_syncmtx); 2491 2492 seg->s_data = NULL; 2493 kmem_cache_free(segvn_cache, svd); 2494 } 2495 2496 /* 2497 * Do a F_SOFTUNLOCK call over the range requested. The range must have 2498 * already been F_SOFTLOCK'ed. 2499 * Caller must always match addr and len of a softunlock with a previous 2500 * softlock with exactly the same addr and len. 2501 */ 2502 static void 2503 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw) 2504 { 2505 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2506 page_t *pp; 2507 caddr_t adr; 2508 struct vnode *vp; 2509 u_offset_t offset; 2510 ulong_t anon_index; 2511 struct anon_map *amp; 2512 struct anon *ap = NULL; 2513 2514 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2515 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 2516 2517 if ((amp = svd->amp) != NULL) 2518 anon_index = svd->anon_index + seg_page(seg, addr); 2519 2520 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 2521 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2522 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie); 2523 } else { 2524 hat_unlock(seg->s_as->a_hat, addr, len); 2525 } 2526 for (adr = addr; adr < addr + len; adr += PAGESIZE) { 2527 if (amp != NULL) { 2528 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2529 if ((ap = anon_get_ptr(amp->ahp, anon_index++)) 2530 != NULL) { 2531 swap_xlate(ap, &vp, &offset); 2532 } else { 2533 vp = svd->vp; 2534 offset = svd->offset + 2535 (uintptr_t)(adr - seg->s_base); 2536 } 2537 ANON_LOCK_EXIT(&->a_rwlock); 2538 } else { 2539 vp = svd->vp; 2540 offset = svd->offset + 2541 (uintptr_t)(adr - seg->s_base); 2542 } 2543 2544 /* 2545 * Use page_find() instead of page_lookup() to 2546 * find the page since we know that it is locked. 2547 */ 2548 pp = page_find(vp, offset); 2549 if (pp == NULL) { 2550 panic( 2551 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx", 2552 (void *)adr, (void *)ap, (void *)vp, offset); 2553 /*NOTREACHED*/ 2554 } 2555 2556 if (rw == S_WRITE) { 2557 hat_setrefmod(pp); 2558 if (seg->s_as->a_vbits) 2559 hat_setstat(seg->s_as, adr, PAGESIZE, 2560 P_REF | P_MOD); 2561 } else if (rw != S_OTHER) { 2562 hat_setref(pp); 2563 if (seg->s_as->a_vbits) 2564 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF); 2565 } 2566 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2567 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset); 2568 page_unlock(pp); 2569 } 2570 ASSERT(svd->softlockcnt >= btop(len)); 2571 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) { 2572 /* 2573 * All SOFTLOCKS are gone. Wakeup any waiting 2574 * unmappers so they can try again to unmap. 2575 * Check for waiters first without the mutex 2576 * held so we don't always grab the mutex on 2577 * softunlocks. 2578 */ 2579 if (AS_ISUNMAPWAIT(seg->s_as)) { 2580 mutex_enter(&seg->s_as->a_contents); 2581 if (AS_ISUNMAPWAIT(seg->s_as)) { 2582 AS_CLRUNMAPWAIT(seg->s_as); 2583 cv_broadcast(&seg->s_as->a_cv); 2584 } 2585 mutex_exit(&seg->s_as->a_contents); 2586 } 2587 } 2588 } 2589 2590 #define PAGE_HANDLED ((page_t *)-1) 2591 2592 /* 2593 * Release all the pages in the NULL terminated ppp list 2594 * which haven't already been converted to PAGE_HANDLED. 2595 */ 2596 static void 2597 segvn_pagelist_rele(page_t **ppp) 2598 { 2599 for (; *ppp != NULL; ppp++) { 2600 if (*ppp != PAGE_HANDLED) 2601 page_unlock(*ppp); 2602 } 2603 } 2604 2605 static int stealcow = 1; 2606 2607 /* 2608 * Workaround for viking chip bug. See bug id 1220902. 2609 * To fix this down in pagefault() would require importing so 2610 * much as and segvn code as to be unmaintainable. 2611 */ 2612 int enable_mbit_wa = 0; 2613 2614 /* 2615 * Handles all the dirty work of getting the right 2616 * anonymous pages and loading up the translations. 2617 * This routine is called only from segvn_fault() 2618 * when looping over the range of addresses requested. 2619 * 2620 * The basic algorithm here is: 2621 * If this is an anon_zero case 2622 * Call anon_zero to allocate page 2623 * Load up translation 2624 * Return 2625 * endif 2626 * If this is an anon page 2627 * Use anon_getpage to get the page 2628 * else 2629 * Find page in pl[] list passed in 2630 * endif 2631 * If not a cow 2632 * Load up the translation to the page 2633 * return 2634 * endif 2635 * Call anon_private to handle cow 2636 * Load up (writable) translation to new page 2637 */ 2638 static faultcode_t 2639 segvn_faultpage( 2640 struct hat *hat, /* the hat to use for mapping */ 2641 struct seg *seg, /* seg_vn of interest */ 2642 caddr_t addr, /* address in as */ 2643 u_offset_t off, /* offset in vp */ 2644 struct vpage *vpage, /* pointer to vpage for vp, off */ 2645 page_t *pl[], /* object source page pointer */ 2646 uint_t vpprot, /* access allowed to object pages */ 2647 enum fault_type type, /* type of fault */ 2648 enum seg_rw rw, /* type of access at fault */ 2649 int brkcow) /* we may need to break cow */ 2650 { 2651 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2652 page_t *pp, **ppp; 2653 uint_t pageflags = 0; 2654 page_t *anon_pl[1 + 1]; 2655 page_t *opp = NULL; /* original page */ 2656 uint_t prot; 2657 int err; 2658 int cow; 2659 int claim; 2660 int steal = 0; 2661 ulong_t anon_index; 2662 struct anon *ap, *oldap; 2663 struct anon_map *amp; 2664 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 2665 int anon_lock = 0; 2666 anon_sync_obj_t cookie; 2667 2668 if (svd->flags & MAP_TEXT) { 2669 hat_flag |= HAT_LOAD_TEXT; 2670 } 2671 2672 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 2673 ASSERT(seg->s_szc == 0); 2674 ASSERT(svd->tr_state != SEGVN_TR_INIT); 2675 2676 /* 2677 * Initialize protection value for this page. 2678 * If we have per page protection values check it now. 2679 */ 2680 if (svd->pageprot) { 2681 uint_t protchk; 2682 2683 switch (rw) { 2684 case S_READ: 2685 protchk = PROT_READ; 2686 break; 2687 case S_WRITE: 2688 protchk = PROT_WRITE; 2689 break; 2690 case S_EXEC: 2691 protchk = PROT_EXEC; 2692 break; 2693 case S_OTHER: 2694 default: 2695 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 2696 break; 2697 } 2698 2699 prot = VPP_PROT(vpage); 2700 if ((prot & protchk) == 0) 2701 return (FC_PROT); /* illegal access type */ 2702 } else { 2703 prot = svd->prot; 2704 } 2705 2706 if (type == F_SOFTLOCK) { 2707 atomic_inc_ulong((ulong_t *)&svd->softlockcnt); 2708 } 2709 2710 /* 2711 * Always acquire the anon array lock to prevent 2 threads from 2712 * allocating separate anon slots for the same "addr". 2713 */ 2714 2715 if ((amp = svd->amp) != NULL) { 2716 ASSERT(RW_READ_HELD(&->a_rwlock)); 2717 anon_index = svd->anon_index + seg_page(seg, addr); 2718 anon_array_enter(amp, anon_index, &cookie); 2719 anon_lock = 1; 2720 } 2721 2722 if (svd->vp == NULL && amp != NULL) { 2723 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) { 2724 /* 2725 * Allocate a (normally) writable anonymous page of 2726 * zeroes. If no advance reservations, reserve now. 2727 */ 2728 if (svd->flags & MAP_NORESERVE) { 2729 if (anon_resv_zone(ptob(1), 2730 seg->s_as->a_proc->p_zone)) { 2731 atomic_add_long(&svd->swresv, ptob(1)); 2732 atomic_add_long(&seg->s_as->a_resvsize, 2733 ptob(1)); 2734 } else { 2735 err = ENOMEM; 2736 goto out; 2737 } 2738 } 2739 if ((pp = anon_zero(seg, addr, &ap, 2740 svd->cred)) == NULL) { 2741 err = ENOMEM; 2742 goto out; /* out of swap space */ 2743 } 2744 /* 2745 * Re-acquire the anon_map lock and 2746 * initialize the anon array entry. 2747 */ 2748 (void) anon_set_ptr(amp->ahp, anon_index, ap, 2749 ANON_SLEEP); 2750 2751 ASSERT(pp->p_szc == 0); 2752 2753 /* 2754 * Handle pages that have been marked for migration 2755 */ 2756 if (lgrp_optimizations()) 2757 page_migrate(seg, addr, &pp, 1); 2758 2759 if (enable_mbit_wa) { 2760 if (rw == S_WRITE) 2761 hat_setmod(pp); 2762 else if (!hat_ismod(pp)) 2763 prot &= ~PROT_WRITE; 2764 } 2765 /* 2766 * If AS_PAGLCK is set in a_flags (via memcntl(2) 2767 * with MC_LOCKAS, MCL_FUTURE) and this is a 2768 * MAP_NORESERVE segment, we may need to 2769 * permanently lock the page as it is being faulted 2770 * for the first time. The following text applies 2771 * only to MAP_NORESERVE segments: 2772 * 2773 * As per memcntl(2), if this segment was created 2774 * after MCL_FUTURE was applied (a "future" 2775 * segment), its pages must be locked. If this 2776 * segment existed at MCL_FUTURE application (a 2777 * "past" segment), the interface is unclear. 2778 * 2779 * We decide to lock only if vpage is present: 2780 * 2781 * - "future" segments will have a vpage array (see 2782 * as_map), and so will be locked as required 2783 * 2784 * - "past" segments may not have a vpage array, 2785 * depending on whether events (such as 2786 * mprotect) have occurred. Locking if vpage 2787 * exists will preserve legacy behavior. Not 2788 * locking if vpage is absent, will not break 2789 * the interface or legacy behavior. Note that 2790 * allocating vpage here if it's absent requires 2791 * upgrading the segvn reader lock, the cost of 2792 * which does not seem worthwhile. 2793 * 2794 * Usually testing and setting VPP_ISPPLOCK and 2795 * VPP_SETPPLOCK requires holding the segvn lock as 2796 * writer, but in this case all readers are 2797 * serializing on the anon array lock. 2798 */ 2799 if (AS_ISPGLCK(seg->s_as) && vpage != NULL && 2800 (svd->flags & MAP_NORESERVE) && 2801 !VPP_ISPPLOCK(vpage)) { 2802 proc_t *p = seg->s_as->a_proc; 2803 ASSERT(svd->type == MAP_PRIVATE); 2804 mutex_enter(&p->p_lock); 2805 if (rctl_incr_locked_mem(p, NULL, PAGESIZE, 2806 1) == 0) { 2807 claim = VPP_PROT(vpage) & PROT_WRITE; 2808 if (page_pp_lock(pp, claim, 0)) { 2809 VPP_SETPPLOCK(vpage); 2810 } else { 2811 rctl_decr_locked_mem(p, NULL, 2812 PAGESIZE, 1); 2813 } 2814 } 2815 mutex_exit(&p->p_lock); 2816 } 2817 2818 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2819 hat_memload(hat, addr, pp, prot, hat_flag); 2820 2821 if (!(hat_flag & HAT_LOAD_LOCK)) 2822 page_unlock(pp); 2823 2824 anon_array_exit(&cookie); 2825 return (0); 2826 } 2827 } 2828 2829 /* 2830 * Obtain the page structure via anon_getpage() if it is 2831 * a private copy of an object (the result of a previous 2832 * copy-on-write). 2833 */ 2834 if (amp != NULL) { 2835 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) { 2836 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE, 2837 seg, addr, rw, svd->cred); 2838 if (err) 2839 goto out; 2840 2841 if (svd->type == MAP_SHARED) { 2842 /* 2843 * If this is a shared mapping to an 2844 * anon_map, then ignore the write 2845 * permissions returned by anon_getpage(). 2846 * They apply to the private mappings 2847 * of this anon_map. 2848 */ 2849 vpprot |= PROT_WRITE; 2850 } 2851 opp = anon_pl[0]; 2852 } 2853 } 2854 2855 /* 2856 * Search the pl[] list passed in if it is from the 2857 * original object (i.e., not a private copy). 2858 */ 2859 if (opp == NULL) { 2860 /* 2861 * Find original page. We must be bringing it in 2862 * from the list in pl[]. 2863 */ 2864 for (ppp = pl; (opp = *ppp) != NULL; ppp++) { 2865 if (opp == PAGE_HANDLED) 2866 continue; 2867 ASSERT(opp->p_vnode == svd->vp); /* XXX */ 2868 if (opp->p_offset == off) 2869 break; 2870 } 2871 if (opp == NULL) { 2872 panic("segvn_faultpage not found"); 2873 /*NOTREACHED*/ 2874 } 2875 *ppp = PAGE_HANDLED; 2876 2877 } 2878 2879 ASSERT(PAGE_LOCKED(opp)); 2880 2881 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2882 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0); 2883 2884 /* 2885 * The fault is treated as a copy-on-write fault if a 2886 * write occurs on a private segment and the object 2887 * page (i.e., mapping) is write protected. We assume 2888 * that fatal protection checks have already been made. 2889 */ 2890 2891 if (brkcow) { 2892 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2893 cow = !(vpprot & PROT_WRITE); 2894 } else if (svd->tr_state == SEGVN_TR_ON) { 2895 /* 2896 * If we are doing text replication COW on first touch. 2897 */ 2898 ASSERT(amp != NULL); 2899 ASSERT(svd->vp != NULL); 2900 ASSERT(rw != S_WRITE); 2901 cow = (ap == NULL); 2902 } else { 2903 cow = 0; 2904 } 2905 2906 /* 2907 * If not a copy-on-write case load the translation 2908 * and return. 2909 */ 2910 if (cow == 0) { 2911 2912 /* 2913 * Handle pages that have been marked for migration 2914 */ 2915 if (lgrp_optimizations()) 2916 page_migrate(seg, addr, &opp, 1); 2917 2918 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) { 2919 if (rw == S_WRITE) 2920 hat_setmod(opp); 2921 else if (rw != S_OTHER && !hat_ismod(opp)) 2922 prot &= ~PROT_WRITE; 2923 } 2924 2925 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 2926 (!svd->pageprot && svd->prot == (prot & vpprot))); 2927 ASSERT(amp == NULL || 2928 svd->rcookie == HAT_INVALID_REGION_COOKIE); 2929 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag, 2930 svd->rcookie); 2931 2932 if (!(hat_flag & HAT_LOAD_LOCK)) 2933 page_unlock(opp); 2934 2935 if (anon_lock) { 2936 anon_array_exit(&cookie); 2937 } 2938 return (0); 2939 } 2940 2941 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2942 2943 hat_setref(opp); 2944 2945 ASSERT(amp != NULL && anon_lock); 2946 2947 /* 2948 * Steal the page only if it isn't a private page 2949 * since stealing a private page is not worth the effort. 2950 */ 2951 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) 2952 steal = 1; 2953 2954 /* 2955 * Steal the original page if the following conditions are true: 2956 * 2957 * We are low on memory, the page is not private, page is not large, 2958 * not shared, not modified, not `locked' or if we have it `locked' 2959 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies 2960 * that the page is not shared) and if it doesn't have any 2961 * translations. page_struct_lock isn't needed to look at p_cowcnt 2962 * and p_lckcnt because we first get exclusive lock on page. 2963 */ 2964 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD); 2965 2966 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 && 2967 page_tryupgrade(opp) && !hat_ismod(opp) && 2968 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) || 2969 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 && 2970 vpage != NULL && VPP_ISPPLOCK(vpage)))) { 2971 /* 2972 * Check if this page has other translations 2973 * after unloading our translation. 2974 */ 2975 if (hat_page_is_mapped(opp)) { 2976 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2977 hat_unload(seg->s_as->a_hat, addr, PAGESIZE, 2978 HAT_UNLOAD); 2979 } 2980 2981 /* 2982 * hat_unload() might sync back someone else's recent 2983 * modification, so check again. 2984 */ 2985 if (!hat_ismod(opp) && !hat_page_is_mapped(opp)) 2986 pageflags |= STEAL_PAGE; 2987 } 2988 2989 /* 2990 * If we have a vpage pointer, see if it indicates that we have 2991 * ``locked'' the page we map -- if so, tell anon_private to 2992 * transfer the locking resource to the new page. 2993 * 2994 * See Statement at the beginning of segvn_lockop regarding 2995 * the way lockcnts/cowcnts are handled during COW. 2996 * 2997 */ 2998 if (vpage != NULL && VPP_ISPPLOCK(vpage)) 2999 pageflags |= LOCK_PAGE; 3000 3001 /* 3002 * Allocate a private page and perform the copy. 3003 * For MAP_NORESERVE reserve swap space now, unless this 3004 * is a cow fault on an existing anon page in which case 3005 * MAP_NORESERVE will have made advance reservations. 3006 */ 3007 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) { 3008 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) { 3009 atomic_add_long(&svd->swresv, ptob(1)); 3010 atomic_add_long(&seg->s_as->a_resvsize, ptob(1)); 3011 } else { 3012 page_unlock(opp); 3013 err = ENOMEM; 3014 goto out; 3015 } 3016 } 3017 oldap = ap; 3018 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred); 3019 if (pp == NULL) { 3020 err = ENOMEM; /* out of swap space */ 3021 goto out; 3022 } 3023 3024 /* 3025 * If we copied away from an anonymous page, then 3026 * we are one step closer to freeing up an anon slot. 3027 * 3028 * NOTE: The original anon slot must be released while 3029 * holding the "anon_map" lock. This is necessary to prevent 3030 * other threads from obtaining a pointer to the anon slot 3031 * which may be freed if its "refcnt" is 1. 3032 */ 3033 if (oldap != NULL) 3034 anon_decref(oldap); 3035 3036 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 3037 3038 /* 3039 * Handle pages that have been marked for migration 3040 */ 3041 if (lgrp_optimizations()) 3042 page_migrate(seg, addr, &pp, 1); 3043 3044 ASSERT(pp->p_szc == 0); 3045 3046 ASSERT(!IS_VMODSORT(pp->p_vnode)); 3047 if (enable_mbit_wa) { 3048 if (rw == S_WRITE) 3049 hat_setmod(pp); 3050 else if (!hat_ismod(pp)) 3051 prot &= ~PROT_WRITE; 3052 } 3053 3054 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 3055 hat_memload(hat, addr, pp, prot, hat_flag); 3056 3057 if (!(hat_flag & HAT_LOAD_LOCK)) 3058 page_unlock(pp); 3059 3060 ASSERT(anon_lock); 3061 anon_array_exit(&cookie); 3062 return (0); 3063 out: 3064 if (anon_lock) 3065 anon_array_exit(&cookie); 3066 3067 if (type == F_SOFTLOCK) { 3068 atomic_dec_ulong((ulong_t *)&svd->softlockcnt); 3069 } 3070 return (FC_MAKE_ERR(err)); 3071 } 3072 3073 /* 3074 * relocate a bunch of smaller targ pages into one large repl page. all targ 3075 * pages must be complete pages smaller than replacement pages. 3076 * it's assumed that no page's szc can change since they are all PAGESIZE or 3077 * complete large pages locked SHARED. 3078 */ 3079 static void 3080 segvn_relocate_pages(page_t **targ, page_t *replacement) 3081 { 3082 page_t *pp; 3083 pgcnt_t repl_npgs, curnpgs; 3084 pgcnt_t i; 3085 uint_t repl_szc = replacement->p_szc; 3086 page_t *first_repl = replacement; 3087 page_t *repl; 3088 spgcnt_t npgs; 3089 3090 VM_STAT_ADD(segvnvmstats.relocatepages[0]); 3091 3092 ASSERT(repl_szc != 0); 3093 npgs = repl_npgs = page_get_pagecnt(repl_szc); 3094 3095 i = 0; 3096 while (repl_npgs) { 3097 spgcnt_t nreloc; 3098 int err; 3099 ASSERT(replacement != NULL); 3100 pp = targ[i]; 3101 ASSERT(pp->p_szc < repl_szc); 3102 ASSERT(PAGE_EXCL(pp)); 3103 ASSERT(!PP_ISFREE(pp)); 3104 curnpgs = page_get_pagecnt(pp->p_szc); 3105 if (curnpgs == 1) { 3106 VM_STAT_ADD(segvnvmstats.relocatepages[1]); 3107 repl = replacement; 3108 page_sub(&replacement, repl); 3109 ASSERT(PAGE_EXCL(repl)); 3110 ASSERT(!PP_ISFREE(repl)); 3111 ASSERT(repl->p_szc == repl_szc); 3112 } else { 3113 page_t *repl_savepp; 3114 int j; 3115 VM_STAT_ADD(segvnvmstats.relocatepages[2]); 3116 repl_savepp = replacement; 3117 for (j = 0; j < curnpgs; j++) { 3118 repl = replacement; 3119 page_sub(&replacement, repl); 3120 ASSERT(PAGE_EXCL(repl)); 3121 ASSERT(!PP_ISFREE(repl)); 3122 ASSERT(repl->p_szc == repl_szc); 3123 ASSERT(page_pptonum(targ[i + j]) == 3124 page_pptonum(targ[i]) + j); 3125 } 3126 repl = repl_savepp; 3127 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs)); 3128 } 3129 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL); 3130 if (err || nreloc != curnpgs) { 3131 panic("segvn_relocate_pages: " 3132 "page_relocate failed err=%d curnpgs=%ld " 3133 "nreloc=%ld", err, curnpgs, nreloc); 3134 } 3135 ASSERT(curnpgs <= repl_npgs); 3136 repl_npgs -= curnpgs; 3137 i += curnpgs; 3138 } 3139 ASSERT(replacement == NULL); 3140 3141 repl = first_repl; 3142 repl_npgs = npgs; 3143 for (i = 0; i < repl_npgs; i++) { 3144 ASSERT(PAGE_EXCL(repl)); 3145 ASSERT(!PP_ISFREE(repl)); 3146 targ[i] = repl; 3147 page_downgrade(targ[i]); 3148 repl++; 3149 } 3150 } 3151 3152 /* 3153 * Check if all pages in ppa array are complete smaller than szc pages and 3154 * their roots will still be aligned relative to their current size if the 3155 * entire ppa array is relocated into one szc page. If these conditions are 3156 * not met return 0. 3157 * 3158 * If all pages are properly aligned attempt to upgrade their locks 3159 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0. 3160 * upgrdfail was set to 0 by caller. 3161 * 3162 * Return 1 if all pages are aligned and locked exclusively. 3163 * 3164 * If all pages in ppa array happen to be physically contiguous to make one 3165 * szc page and all exclusive locks are successfully obtained promote the page 3166 * size to szc and set *pszc to szc. Return 1 with pages locked shared. 3167 */ 3168 static int 3169 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc) 3170 { 3171 page_t *pp; 3172 pfn_t pfn; 3173 pgcnt_t totnpgs = page_get_pagecnt(szc); 3174 pfn_t first_pfn; 3175 int contig = 1; 3176 pgcnt_t i; 3177 pgcnt_t j; 3178 uint_t curszc; 3179 pgcnt_t curnpgs; 3180 int root = 0; 3181 3182 ASSERT(szc > 0); 3183 3184 VM_STAT_ADD(segvnvmstats.fullszcpages[0]); 3185 3186 for (i = 0; i < totnpgs; i++) { 3187 pp = ppa[i]; 3188 ASSERT(PAGE_SHARED(pp)); 3189 ASSERT(!PP_ISFREE(pp)); 3190 pfn = page_pptonum(pp); 3191 if (i == 0) { 3192 if (!IS_P2ALIGNED(pfn, totnpgs)) { 3193 contig = 0; 3194 } else { 3195 first_pfn = pfn; 3196 } 3197 } else if (contig && pfn != first_pfn + i) { 3198 contig = 0; 3199 } 3200 if (pp->p_szc == 0) { 3201 if (root) { 3202 VM_STAT_ADD(segvnvmstats.fullszcpages[1]); 3203 return (0); 3204 } 3205 } else if (!root) { 3206 if ((curszc = pp->p_szc) >= szc) { 3207 VM_STAT_ADD(segvnvmstats.fullszcpages[2]); 3208 return (0); 3209 } 3210 if (curszc == 0) { 3211 /* 3212 * p_szc changed means we don't have all pages 3213 * locked. return failure. 3214 */ 3215 VM_STAT_ADD(segvnvmstats.fullszcpages[3]); 3216 return (0); 3217 } 3218 curnpgs = page_get_pagecnt(curszc); 3219 if (!IS_P2ALIGNED(pfn, curnpgs) || 3220 !IS_P2ALIGNED(i, curnpgs)) { 3221 VM_STAT_ADD(segvnvmstats.fullszcpages[4]); 3222 return (0); 3223 } 3224 root = 1; 3225 } else { 3226 ASSERT(i > 0); 3227 VM_STAT_ADD(segvnvmstats.fullszcpages[5]); 3228 if (pp->p_szc != curszc) { 3229 VM_STAT_ADD(segvnvmstats.fullszcpages[6]); 3230 return (0); 3231 } 3232 if (pfn - 1 != page_pptonum(ppa[i - 1])) { 3233 panic("segvn_full_szcpages: " 3234 "large page not physically contiguous"); 3235 } 3236 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) { 3237 root = 0; 3238 } 3239 } 3240 } 3241 3242 for (i = 0; i < totnpgs; i++) { 3243 ASSERT(ppa[i]->p_szc < szc); 3244 if (!page_tryupgrade(ppa[i])) { 3245 for (j = 0; j < i; j++) { 3246 page_downgrade(ppa[j]); 3247 } 3248 *pszc = ppa[i]->p_szc; 3249 *upgrdfail = 1; 3250 VM_STAT_ADD(segvnvmstats.fullszcpages[7]); 3251 return (0); 3252 } 3253 } 3254 3255 /* 3256 * When a page is put a free cachelist its szc is set to 0. if file 3257 * system reclaimed pages from cachelist targ pages will be physically 3258 * contiguous with 0 p_szc. in this case just upgrade szc of targ 3259 * pages without any relocations. 3260 * To avoid any hat issues with previous small mappings 3261 * hat_pageunload() the target pages first. 3262 */ 3263 if (contig) { 3264 VM_STAT_ADD(segvnvmstats.fullszcpages[8]); 3265 for (i = 0; i < totnpgs; i++) { 3266 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD); 3267 } 3268 for (i = 0; i < totnpgs; i++) { 3269 ppa[i]->p_szc = szc; 3270 } 3271 for (i = 0; i < totnpgs; i++) { 3272 ASSERT(PAGE_EXCL(ppa[i])); 3273 page_downgrade(ppa[i]); 3274 } 3275 if (pszc != NULL) { 3276 *pszc = szc; 3277 } 3278 } 3279 VM_STAT_ADD(segvnvmstats.fullszcpages[9]); 3280 return (1); 3281 } 3282 3283 /* 3284 * Create physically contiguous pages for [vp, off] - [vp, off + 3285 * page_size(szc)) range and for private segment return them in ppa array. 3286 * Pages are created either via IO or relocations. 3287 * 3288 * Return 1 on success and 0 on failure. 3289 * 3290 * If physically contiguous pages already exist for this range return 1 without 3291 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa 3292 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE(). 3293 */ 3294 3295 static int 3296 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off, 3297 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc, 3298 int *downsize) 3299 3300 { 3301 page_t *pplist = *ppplist; 3302 size_t pgsz = page_get_pagesize(szc); 3303 pgcnt_t pages = btop(pgsz); 3304 ulong_t start_off = off; 3305 u_offset_t eoff = off + pgsz; 3306 spgcnt_t nreloc; 3307 u_offset_t io_off = off; 3308 size_t io_len; 3309 page_t *io_pplist = NULL; 3310 page_t *done_pplist = NULL; 3311 pgcnt_t pgidx = 0; 3312 page_t *pp; 3313 page_t *newpp; 3314 page_t *targpp; 3315 int io_err = 0; 3316 int i; 3317 pfn_t pfn; 3318 ulong_t ppages; 3319 page_t *targ_pplist = NULL; 3320 page_t *repl_pplist = NULL; 3321 page_t *tmp_pplist; 3322 int nios = 0; 3323 uint_t pszc; 3324 struct vattr va; 3325 3326 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]); 3327 3328 ASSERT(szc != 0); 3329 ASSERT(pplist->p_szc == szc); 3330 3331 /* 3332 * downsize will be set to 1 only if we fail to lock pages. this will 3333 * allow subsequent faults to try to relocate the page again. If we 3334 * fail due to misalignment don't downsize and let the caller map the 3335 * whole region with small mappings to avoid more faults into the area 3336 * where we can't get large pages anyway. 3337 */ 3338 *downsize = 0; 3339 3340 while (off < eoff) { 3341 newpp = pplist; 3342 ASSERT(newpp != NULL); 3343 ASSERT(PAGE_EXCL(newpp)); 3344 ASSERT(!PP_ISFREE(newpp)); 3345 /* 3346 * we pass NULL for nrelocp to page_lookup_create() 3347 * so that it doesn't relocate. We relocate here 3348 * later only after we make sure we can lock all 3349 * pages in the range we handle and they are all 3350 * aligned. 3351 */ 3352 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0); 3353 ASSERT(pp != NULL); 3354 ASSERT(!PP_ISFREE(pp)); 3355 ASSERT(pp->p_vnode == vp); 3356 ASSERT(pp->p_offset == off); 3357 if (pp == newpp) { 3358 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]); 3359 page_sub(&pplist, pp); 3360 ASSERT(PAGE_EXCL(pp)); 3361 ASSERT(page_iolock_assert(pp)); 3362 page_list_concat(&io_pplist, &pp); 3363 off += PAGESIZE; 3364 continue; 3365 } 3366 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]); 3367 pfn = page_pptonum(pp); 3368 pszc = pp->p_szc; 3369 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL && 3370 IS_P2ALIGNED(pfn, pages)) { 3371 ASSERT(repl_pplist == NULL); 3372 ASSERT(done_pplist == NULL); 3373 ASSERT(pplist == *ppplist); 3374 page_unlock(pp); 3375 page_free_replacement_page(pplist); 3376 page_create_putback(pages); 3377 *ppplist = NULL; 3378 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]); 3379 return (1); 3380 } 3381 if (pszc >= szc) { 3382 page_unlock(pp); 3383 segvn_faultvnmpss_align_err1++; 3384 goto out; 3385 } 3386 ppages = page_get_pagecnt(pszc); 3387 if (!IS_P2ALIGNED(pfn, ppages)) { 3388 ASSERT(pszc > 0); 3389 /* 3390 * sizing down to pszc won't help. 3391 */ 3392 page_unlock(pp); 3393 segvn_faultvnmpss_align_err2++; 3394 goto out; 3395 } 3396 pfn = page_pptonum(newpp); 3397 if (!IS_P2ALIGNED(pfn, ppages)) { 3398 ASSERT(pszc > 0); 3399 /* 3400 * sizing down to pszc won't help. 3401 */ 3402 page_unlock(pp); 3403 segvn_faultvnmpss_align_err3++; 3404 goto out; 3405 } 3406 if (!PAGE_EXCL(pp)) { 3407 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]); 3408 page_unlock(pp); 3409 *downsize = 1; 3410 *ret_pszc = pp->p_szc; 3411 goto out; 3412 } 3413 targpp = pp; 3414 if (io_pplist != NULL) { 3415 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]); 3416 io_len = off - io_off; 3417 /* 3418 * Some file systems like NFS don't check EOF 3419 * conditions in VOP_PAGEIO(). Check it here 3420 * now that pages are locked SE_EXCL. Any file 3421 * truncation will wait until the pages are 3422 * unlocked so no need to worry that file will 3423 * be truncated after we check its size here. 3424 * XXX fix NFS to remove this check. 3425 */ 3426 va.va_mask = AT_SIZE; 3427 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) { 3428 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]); 3429 page_unlock(targpp); 3430 goto out; 3431 } 3432 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3433 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]); 3434 *downsize = 1; 3435 *ret_pszc = 0; 3436 page_unlock(targpp); 3437 goto out; 3438 } 3439 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3440 B_READ, svd->cred, NULL); 3441 if (io_err) { 3442 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]); 3443 page_unlock(targpp); 3444 if (io_err == EDEADLK) { 3445 segvn_vmpss_pageio_deadlk_err++; 3446 } 3447 goto out; 3448 } 3449 nios++; 3450 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]); 3451 while (io_pplist != NULL) { 3452 pp = io_pplist; 3453 page_sub(&io_pplist, pp); 3454 ASSERT(page_iolock_assert(pp)); 3455 page_io_unlock(pp); 3456 pgidx = (pp->p_offset - start_off) >> 3457 PAGESHIFT; 3458 ASSERT(pgidx < pages); 3459 ppa[pgidx] = pp; 3460 page_list_concat(&done_pplist, &pp); 3461 } 3462 } 3463 pp = targpp; 3464 ASSERT(PAGE_EXCL(pp)); 3465 ASSERT(pp->p_szc <= pszc); 3466 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) { 3467 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]); 3468 page_unlock(pp); 3469 *downsize = 1; 3470 *ret_pszc = pp->p_szc; 3471 goto out; 3472 } 3473 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]); 3474 /* 3475 * page szc chould have changed before the entire group was 3476 * locked. reread page szc. 3477 */ 3478 pszc = pp->p_szc; 3479 ppages = page_get_pagecnt(pszc); 3480 3481 /* link just the roots */ 3482 page_list_concat(&targ_pplist, &pp); 3483 page_sub(&pplist, newpp); 3484 page_list_concat(&repl_pplist, &newpp); 3485 off += PAGESIZE; 3486 while (--ppages != 0) { 3487 newpp = pplist; 3488 page_sub(&pplist, newpp); 3489 off += PAGESIZE; 3490 } 3491 io_off = off; 3492 } 3493 if (io_pplist != NULL) { 3494 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]); 3495 io_len = eoff - io_off; 3496 va.va_mask = AT_SIZE; 3497 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) { 3498 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]); 3499 goto out; 3500 } 3501 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3502 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]); 3503 *downsize = 1; 3504 *ret_pszc = 0; 3505 goto out; 3506 } 3507 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3508 B_READ, svd->cred, NULL); 3509 if (io_err) { 3510 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]); 3511 if (io_err == EDEADLK) { 3512 segvn_vmpss_pageio_deadlk_err++; 3513 } 3514 goto out; 3515 } 3516 nios++; 3517 while (io_pplist != NULL) { 3518 pp = io_pplist; 3519 page_sub(&io_pplist, pp); 3520 ASSERT(page_iolock_assert(pp)); 3521 page_io_unlock(pp); 3522 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3523 ASSERT(pgidx < pages); 3524 ppa[pgidx] = pp; 3525 } 3526 } 3527 /* 3528 * we're now bound to succeed or panic. 3529 * remove pages from done_pplist. it's not needed anymore. 3530 */ 3531 while (done_pplist != NULL) { 3532 pp = done_pplist; 3533 page_sub(&done_pplist, pp); 3534 } 3535 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]); 3536 ASSERT(pplist == NULL); 3537 *ppplist = NULL; 3538 while (targ_pplist != NULL) { 3539 int ret; 3540 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]); 3541 ASSERT(repl_pplist); 3542 pp = targ_pplist; 3543 page_sub(&targ_pplist, pp); 3544 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3545 newpp = repl_pplist; 3546 page_sub(&repl_pplist, newpp); 3547 #ifdef DEBUG 3548 pfn = page_pptonum(pp); 3549 pszc = pp->p_szc; 3550 ppages = page_get_pagecnt(pszc); 3551 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3552 pfn = page_pptonum(newpp); 3553 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3554 ASSERT(P2PHASE(pfn, pages) == pgidx); 3555 #endif 3556 nreloc = 0; 3557 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL); 3558 if (ret != 0 || nreloc == 0) { 3559 panic("segvn_fill_vp_pages: " 3560 "page_relocate failed"); 3561 } 3562 pp = newpp; 3563 while (nreloc-- != 0) { 3564 ASSERT(PAGE_EXCL(pp)); 3565 ASSERT(pp->p_vnode == vp); 3566 ASSERT(pgidx == 3567 ((pp->p_offset - start_off) >> PAGESHIFT)); 3568 ppa[pgidx++] = pp; 3569 pp++; 3570 } 3571 } 3572 3573 if (svd->type == MAP_PRIVATE) { 3574 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]); 3575 for (i = 0; i < pages; i++) { 3576 ASSERT(ppa[i] != NULL); 3577 ASSERT(PAGE_EXCL(ppa[i])); 3578 ASSERT(ppa[i]->p_vnode == vp); 3579 ASSERT(ppa[i]->p_offset == 3580 start_off + (i << PAGESHIFT)); 3581 page_downgrade(ppa[i]); 3582 } 3583 ppa[pages] = NULL; 3584 } else { 3585 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]); 3586 /* 3587 * the caller will still call VOP_GETPAGE() for shared segments 3588 * to check FS write permissions. For private segments we map 3589 * file read only anyway. so no VOP_GETPAGE is needed. 3590 */ 3591 for (i = 0; i < pages; i++) { 3592 ASSERT(ppa[i] != NULL); 3593 ASSERT(PAGE_EXCL(ppa[i])); 3594 ASSERT(ppa[i]->p_vnode == vp); 3595 ASSERT(ppa[i]->p_offset == 3596 start_off + (i << PAGESHIFT)); 3597 page_unlock(ppa[i]); 3598 } 3599 ppa[0] = NULL; 3600 } 3601 3602 return (1); 3603 out: 3604 /* 3605 * Do the cleanup. Unlock target pages we didn't relocate. They are 3606 * linked on targ_pplist by root pages. reassemble unused replacement 3607 * and io pages back to pplist. 3608 */ 3609 if (io_pplist != NULL) { 3610 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]); 3611 pp = io_pplist; 3612 do { 3613 ASSERT(pp->p_vnode == vp); 3614 ASSERT(pp->p_offset == io_off); 3615 ASSERT(page_iolock_assert(pp)); 3616 page_io_unlock(pp); 3617 page_hashout(pp, NULL); 3618 io_off += PAGESIZE; 3619 } while ((pp = pp->p_next) != io_pplist); 3620 page_list_concat(&io_pplist, &pplist); 3621 pplist = io_pplist; 3622 } 3623 tmp_pplist = NULL; 3624 while (targ_pplist != NULL) { 3625 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]); 3626 pp = targ_pplist; 3627 ASSERT(PAGE_EXCL(pp)); 3628 page_sub(&targ_pplist, pp); 3629 3630 pszc = pp->p_szc; 3631 ppages = page_get_pagecnt(pszc); 3632 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3633 3634 if (pszc != 0) { 3635 group_page_unlock(pp); 3636 } 3637 page_unlock(pp); 3638 3639 pp = repl_pplist; 3640 ASSERT(pp != NULL); 3641 ASSERT(PAGE_EXCL(pp)); 3642 ASSERT(pp->p_szc == szc); 3643 page_sub(&repl_pplist, pp); 3644 3645 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3646 3647 /* relink replacement page */ 3648 page_list_concat(&tmp_pplist, &pp); 3649 while (--ppages != 0) { 3650 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]); 3651 pp++; 3652 ASSERT(PAGE_EXCL(pp)); 3653 ASSERT(pp->p_szc == szc); 3654 page_list_concat(&tmp_pplist, &pp); 3655 } 3656 } 3657 if (tmp_pplist != NULL) { 3658 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]); 3659 page_list_concat(&tmp_pplist, &pplist); 3660 pplist = tmp_pplist; 3661 } 3662 /* 3663 * at this point all pages are either on done_pplist or 3664 * pplist. They can't be all on done_pplist otherwise 3665 * we'd've been done. 3666 */ 3667 ASSERT(pplist != NULL); 3668 if (nios != 0) { 3669 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]); 3670 pp = pplist; 3671 do { 3672 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]); 3673 ASSERT(pp->p_szc == szc); 3674 ASSERT(PAGE_EXCL(pp)); 3675 ASSERT(pp->p_vnode != vp); 3676 pp->p_szc = 0; 3677 } while ((pp = pp->p_next) != pplist); 3678 3679 pp = done_pplist; 3680 do { 3681 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]); 3682 ASSERT(pp->p_szc == szc); 3683 ASSERT(PAGE_EXCL(pp)); 3684 ASSERT(pp->p_vnode == vp); 3685 pp->p_szc = 0; 3686 } while ((pp = pp->p_next) != done_pplist); 3687 3688 while (pplist != NULL) { 3689 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]); 3690 pp = pplist; 3691 page_sub(&pplist, pp); 3692 page_free(pp, 0); 3693 } 3694 3695 while (done_pplist != NULL) { 3696 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]); 3697 pp = done_pplist; 3698 page_sub(&done_pplist, pp); 3699 page_unlock(pp); 3700 } 3701 *ppplist = NULL; 3702 return (0); 3703 } 3704 ASSERT(pplist == *ppplist); 3705 if (io_err) { 3706 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]); 3707 /* 3708 * don't downsize on io error. 3709 * see if vop_getpage succeeds. 3710 * pplist may still be used in this case 3711 * for relocations. 3712 */ 3713 return (0); 3714 } 3715 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]); 3716 page_free_replacement_page(pplist); 3717 page_create_putback(pages); 3718 *ppplist = NULL; 3719 return (0); 3720 } 3721 3722 int segvn_anypgsz = 0; 3723 3724 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \ 3725 if ((type) == F_SOFTLOCK) { \ 3726 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \ 3727 -(pages)); \ 3728 } 3729 3730 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \ 3731 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \ 3732 if ((rw) == S_WRITE) { \ 3733 for (i = 0; i < (pages); i++) { \ 3734 ASSERT((ppa)[i]->p_vnode == \ 3735 (ppa)[0]->p_vnode); \ 3736 hat_setmod((ppa)[i]); \ 3737 } \ 3738 } else if ((rw) != S_OTHER && \ 3739 ((prot) & (vpprot) & PROT_WRITE)) { \ 3740 for (i = 0; i < (pages); i++) { \ 3741 ASSERT((ppa)[i]->p_vnode == \ 3742 (ppa)[0]->p_vnode); \ 3743 if (!hat_ismod((ppa)[i])) { \ 3744 prot &= ~PROT_WRITE; \ 3745 break; \ 3746 } \ 3747 } \ 3748 } \ 3749 } 3750 3751 #ifdef VM_STATS 3752 3753 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \ 3754 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]); 3755 3756 #else /* VM_STATS */ 3757 3758 #define SEGVN_VMSTAT_FLTVNPAGES(idx) 3759 3760 #endif 3761 3762 static faultcode_t 3763 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 3764 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 3765 caddr_t eaddr, int brkcow) 3766 { 3767 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 3768 struct anon_map *amp = svd->amp; 3769 uchar_t segtype = svd->type; 3770 uint_t szc = seg->s_szc; 3771 size_t pgsz = page_get_pagesize(szc); 3772 size_t maxpgsz = pgsz; 3773 pgcnt_t pages = btop(pgsz); 3774 pgcnt_t maxpages = pages; 3775 size_t ppasize = (pages + 1) * sizeof (page_t *); 3776 caddr_t a = lpgaddr; 3777 caddr_t maxlpgeaddr = lpgeaddr; 3778 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base); 3779 ulong_t aindx = svd->anon_index + seg_page(seg, a); 3780 struct vpage *vpage = (svd->vpage != NULL) ? 3781 &svd->vpage[seg_page(seg, a)] : NULL; 3782 vnode_t *vp = svd->vp; 3783 page_t **ppa; 3784 uint_t pszc; 3785 size_t ppgsz; 3786 pgcnt_t ppages; 3787 faultcode_t err = 0; 3788 int ierr; 3789 int vop_size_err = 0; 3790 uint_t protchk, prot, vpprot; 3791 ulong_t i; 3792 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 3793 anon_sync_obj_t an_cookie; 3794 enum seg_rw arw; 3795 int alloc_failed = 0; 3796 int adjszc_chk; 3797 struct vattr va; 3798 int xhat = 0; 3799 page_t *pplist; 3800 pfn_t pfn; 3801 int physcontig; 3802 int upgrdfail; 3803 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */ 3804 int tron = (svd->tr_state == SEGVN_TR_ON); 3805 3806 ASSERT(szc != 0); 3807 ASSERT(vp != NULL); 3808 ASSERT(brkcow == 0 || amp != NULL); 3809 ASSERT(tron == 0 || amp != NULL); 3810 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 3811 ASSERT(!(svd->flags & MAP_NORESERVE)); 3812 ASSERT(type != F_SOFTUNLOCK); 3813 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 3814 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages)); 3815 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 3816 ASSERT(seg->s_szc < NBBY * sizeof (int)); 3817 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz); 3818 ASSERT(svd->tr_state != SEGVN_TR_INIT); 3819 3820 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]); 3821 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]); 3822 3823 if (svd->flags & MAP_TEXT) { 3824 hat_flag |= HAT_LOAD_TEXT; 3825 } 3826 3827 if (svd->pageprot) { 3828 switch (rw) { 3829 case S_READ: 3830 protchk = PROT_READ; 3831 break; 3832 case S_WRITE: 3833 protchk = PROT_WRITE; 3834 break; 3835 case S_EXEC: 3836 protchk = PROT_EXEC; 3837 break; 3838 case S_OTHER: 3839 default: 3840 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 3841 break; 3842 } 3843 } else { 3844 prot = svd->prot; 3845 /* caller has already done segment level protection check. */ 3846 } 3847 3848 if (seg->s_as->a_hat != hat) { 3849 xhat = 1; 3850 } 3851 3852 if (rw == S_WRITE && segtype == MAP_PRIVATE) { 3853 SEGVN_VMSTAT_FLTVNPAGES(2); 3854 arw = S_READ; 3855 } else { 3856 arw = rw; 3857 } 3858 3859 ppa = kmem_alloc(ppasize, KM_SLEEP); 3860 3861 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]); 3862 3863 for (;;) { 3864 adjszc_chk = 0; 3865 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) { 3866 if (adjszc_chk) { 3867 while (szc < seg->s_szc) { 3868 uintptr_t e; 3869 uint_t tszc; 3870 tszc = segvn_anypgsz_vnode ? szc + 1 : 3871 seg->s_szc; 3872 ppgsz = page_get_pagesize(tszc); 3873 if (!IS_P2ALIGNED(a, ppgsz) || 3874 ((alloc_failed >> tszc) & 0x1)) { 3875 break; 3876 } 3877 SEGVN_VMSTAT_FLTVNPAGES(4); 3878 szc = tszc; 3879 pgsz = ppgsz; 3880 pages = btop(pgsz); 3881 e = P2ROUNDUP((uintptr_t)eaddr, pgsz); 3882 lpgeaddr = (caddr_t)e; 3883 } 3884 } 3885 3886 again: 3887 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) { 3888 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 3889 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 3890 anon_array_enter(amp, aindx, &an_cookie); 3891 if (anon_get_ptr(amp->ahp, aindx) != NULL) { 3892 SEGVN_VMSTAT_FLTVNPAGES(5); 3893 ASSERT(anon_pages(amp->ahp, aindx, 3894 maxpages) == maxpages); 3895 anon_array_exit(&an_cookie); 3896 ANON_LOCK_EXIT(&->a_rwlock); 3897 err = segvn_fault_anonpages(hat, seg, 3898 a, a + maxpgsz, type, rw, 3899 MAX(a, addr), 3900 MIN(a + maxpgsz, eaddr), brkcow); 3901 if (err != 0) { 3902 SEGVN_VMSTAT_FLTVNPAGES(6); 3903 goto out; 3904 } 3905 if (szc < seg->s_szc) { 3906 szc = seg->s_szc; 3907 pgsz = maxpgsz; 3908 pages = maxpages; 3909 lpgeaddr = maxlpgeaddr; 3910 } 3911 goto next; 3912 } else { 3913 ASSERT(anon_pages(amp->ahp, aindx, 3914 maxpages) == 0); 3915 SEGVN_VMSTAT_FLTVNPAGES(7); 3916 anon_array_exit(&an_cookie); 3917 ANON_LOCK_EXIT(&->a_rwlock); 3918 } 3919 } 3920 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz)); 3921 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz)); 3922 3923 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 3924 ASSERT(vpage != NULL); 3925 prot = VPP_PROT(vpage); 3926 ASSERT(sameprot(seg, a, maxpgsz)); 3927 if ((prot & protchk) == 0) { 3928 SEGVN_VMSTAT_FLTVNPAGES(8); 3929 err = FC_PROT; 3930 goto out; 3931 } 3932 } 3933 if (type == F_SOFTLOCK) { 3934 atomic_add_long((ulong_t *)&svd->softlockcnt, 3935 pages); 3936 } 3937 3938 pplist = NULL; 3939 physcontig = 0; 3940 ppa[0] = NULL; 3941 if (!brkcow && !tron && szc && 3942 !page_exists_physcontig(vp, off, szc, 3943 segtype == MAP_PRIVATE ? ppa : NULL)) { 3944 SEGVN_VMSTAT_FLTVNPAGES(9); 3945 if (page_alloc_pages(vp, seg, a, &pplist, NULL, 3946 szc, 0, 0) && type != F_SOFTLOCK) { 3947 SEGVN_VMSTAT_FLTVNPAGES(10); 3948 pszc = 0; 3949 ierr = -1; 3950 alloc_failed |= (1 << szc); 3951 break; 3952 } 3953 if (pplist != NULL && 3954 vp->v_mpssdata == SEGVN_PAGEIO) { 3955 int downsize; 3956 SEGVN_VMSTAT_FLTVNPAGES(11); 3957 physcontig = segvn_fill_vp_pages(svd, 3958 vp, off, szc, ppa, &pplist, 3959 &pszc, &downsize); 3960 ASSERT(!physcontig || pplist == NULL); 3961 if (!physcontig && downsize && 3962 type != F_SOFTLOCK) { 3963 ASSERT(pplist == NULL); 3964 SEGVN_VMSTAT_FLTVNPAGES(12); 3965 ierr = -1; 3966 break; 3967 } 3968 ASSERT(!physcontig || 3969 segtype == MAP_PRIVATE || 3970 ppa[0] == NULL); 3971 if (physcontig && ppa[0] == NULL) { 3972 physcontig = 0; 3973 } 3974 } 3975 } else if (!brkcow && !tron && szc && ppa[0] != NULL) { 3976 SEGVN_VMSTAT_FLTVNPAGES(13); 3977 ASSERT(segtype == MAP_PRIVATE); 3978 physcontig = 1; 3979 } 3980 3981 if (!physcontig) { 3982 SEGVN_VMSTAT_FLTVNPAGES(14); 3983 ppa[0] = NULL; 3984 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz, 3985 &vpprot, ppa, pgsz, seg, a, arw, 3986 svd->cred, NULL); 3987 #ifdef DEBUG 3988 if (ierr == 0) { 3989 for (i = 0; i < pages; i++) { 3990 ASSERT(PAGE_LOCKED(ppa[i])); 3991 ASSERT(!PP_ISFREE(ppa[i])); 3992 ASSERT(ppa[i]->p_vnode == vp); 3993 ASSERT(ppa[i]->p_offset == 3994 off + (i << PAGESHIFT)); 3995 } 3996 } 3997 #endif /* DEBUG */ 3998 if (segtype == MAP_PRIVATE) { 3999 SEGVN_VMSTAT_FLTVNPAGES(15); 4000 vpprot &= ~PROT_WRITE; 4001 } 4002 } else { 4003 ASSERT(segtype == MAP_PRIVATE); 4004 SEGVN_VMSTAT_FLTVNPAGES(16); 4005 vpprot = PROT_ALL & ~PROT_WRITE; 4006 ierr = 0; 4007 } 4008 4009 if (ierr != 0) { 4010 SEGVN_VMSTAT_FLTVNPAGES(17); 4011 if (pplist != NULL) { 4012 SEGVN_VMSTAT_FLTVNPAGES(18); 4013 page_free_replacement_page(pplist); 4014 page_create_putback(pages); 4015 } 4016 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4017 if (a + pgsz <= eaddr) { 4018 SEGVN_VMSTAT_FLTVNPAGES(19); 4019 err = FC_MAKE_ERR(ierr); 4020 goto out; 4021 } 4022 va.va_mask = AT_SIZE; 4023 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) { 4024 SEGVN_VMSTAT_FLTVNPAGES(20); 4025 err = FC_MAKE_ERR(EIO); 4026 goto out; 4027 } 4028 if (btopr(va.va_size) >= btopr(off + pgsz)) { 4029 SEGVN_VMSTAT_FLTVNPAGES(21); 4030 err = FC_MAKE_ERR(ierr); 4031 goto out; 4032 } 4033 if (btopr(va.va_size) < 4034 btopr(off + (eaddr - a))) { 4035 SEGVN_VMSTAT_FLTVNPAGES(22); 4036 err = FC_MAKE_ERR(ierr); 4037 goto out; 4038 } 4039 if (brkcow || tron || type == F_SOFTLOCK) { 4040 /* can't reduce map area */ 4041 SEGVN_VMSTAT_FLTVNPAGES(23); 4042 vop_size_err = 1; 4043 goto out; 4044 } 4045 SEGVN_VMSTAT_FLTVNPAGES(24); 4046 ASSERT(szc != 0); 4047 pszc = 0; 4048 ierr = -1; 4049 break; 4050 } 4051 4052 if (amp != NULL) { 4053 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4054 anon_array_enter(amp, aindx, &an_cookie); 4055 } 4056 if (amp != NULL && 4057 anon_get_ptr(amp->ahp, aindx) != NULL) { 4058 ulong_t taindx = P2ALIGN(aindx, maxpages); 4059 4060 SEGVN_VMSTAT_FLTVNPAGES(25); 4061 ASSERT(anon_pages(amp->ahp, taindx, 4062 maxpages) == maxpages); 4063 for (i = 0; i < pages; i++) { 4064 page_unlock(ppa[i]); 4065 } 4066 anon_array_exit(&an_cookie); 4067 ANON_LOCK_EXIT(&->a_rwlock); 4068 if (pplist != NULL) { 4069 page_free_replacement_page(pplist); 4070 page_create_putback(pages); 4071 } 4072 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4073 if (szc < seg->s_szc) { 4074 SEGVN_VMSTAT_FLTVNPAGES(26); 4075 /* 4076 * For private segments SOFTLOCK 4077 * either always breaks cow (any rw 4078 * type except S_READ_NOCOW) or 4079 * address space is locked as writer 4080 * (S_READ_NOCOW case) and anon slots 4081 * can't show up on second check. 4082 * Therefore if we are here for 4083 * SOFTLOCK case it must be a cow 4084 * break but cow break never reduces 4085 * szc. text replication (tron) in 4086 * this case works as cow break. 4087 * Thus the assert below. 4088 */ 4089 ASSERT(!brkcow && !tron && 4090 type != F_SOFTLOCK); 4091 pszc = seg->s_szc; 4092 ierr = -2; 4093 break; 4094 } 4095 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4096 goto again; 4097 } 4098 #ifdef DEBUG 4099 if (amp != NULL) { 4100 ulong_t taindx = P2ALIGN(aindx, maxpages); 4101 ASSERT(!anon_pages(amp->ahp, taindx, maxpages)); 4102 } 4103 #endif /* DEBUG */ 4104 4105 if (brkcow || tron) { 4106 ASSERT(amp != NULL); 4107 ASSERT(pplist == NULL); 4108 ASSERT(szc == seg->s_szc); 4109 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4110 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 4111 SEGVN_VMSTAT_FLTVNPAGES(27); 4112 ierr = anon_map_privatepages(amp, aindx, szc, 4113 seg, a, prot, ppa, vpage, segvn_anypgsz, 4114 tron ? PG_LOCAL : 0, svd->cred); 4115 if (ierr != 0) { 4116 SEGVN_VMSTAT_FLTVNPAGES(28); 4117 anon_array_exit(&an_cookie); 4118 ANON_LOCK_EXIT(&->a_rwlock); 4119 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4120 err = FC_MAKE_ERR(ierr); 4121 goto out; 4122 } 4123 4124 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4125 /* 4126 * p_szc can't be changed for locked 4127 * swapfs pages. 4128 */ 4129 ASSERT(svd->rcookie == 4130 HAT_INVALID_REGION_COOKIE); 4131 hat_memload_array(hat, a, pgsz, ppa, prot, 4132 hat_flag); 4133 4134 if (!(hat_flag & HAT_LOAD_LOCK)) { 4135 SEGVN_VMSTAT_FLTVNPAGES(29); 4136 for (i = 0; i < pages; i++) { 4137 page_unlock(ppa[i]); 4138 } 4139 } 4140 anon_array_exit(&an_cookie); 4141 ANON_LOCK_EXIT(&->a_rwlock); 4142 goto next; 4143 } 4144 4145 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 4146 (!svd->pageprot && svd->prot == (prot & vpprot))); 4147 4148 pfn = page_pptonum(ppa[0]); 4149 /* 4150 * hat_page_demote() needs an SE_EXCL lock on one of 4151 * constituent page_t's and it decreases root's p_szc 4152 * last. This means if root's p_szc is equal szc and 4153 * all its constituent pages are locked 4154 * hat_page_demote() that could have changed p_szc to 4155 * szc is already done and no new have page_demote() 4156 * can start for this large page. 4157 */ 4158 4159 /* 4160 * we need to make sure same mapping size is used for 4161 * the same address range if there's a possibility the 4162 * adddress is already mapped because hat layer panics 4163 * when translation is loaded for the range already 4164 * mapped with a different page size. We achieve it 4165 * by always using largest page size possible subject 4166 * to the constraints of page size, segment page size 4167 * and page alignment. Since mappings are invalidated 4168 * when those constraints change and make it 4169 * impossible to use previously used mapping size no 4170 * mapping size conflicts should happen. 4171 */ 4172 4173 chkszc: 4174 if ((pszc = ppa[0]->p_szc) == szc && 4175 IS_P2ALIGNED(pfn, pages)) { 4176 4177 SEGVN_VMSTAT_FLTVNPAGES(30); 4178 #ifdef DEBUG 4179 for (i = 0; i < pages; i++) { 4180 ASSERT(PAGE_LOCKED(ppa[i])); 4181 ASSERT(!PP_ISFREE(ppa[i])); 4182 ASSERT(page_pptonum(ppa[i]) == 4183 pfn + i); 4184 ASSERT(ppa[i]->p_szc == szc); 4185 ASSERT(ppa[i]->p_vnode == vp); 4186 ASSERT(ppa[i]->p_offset == 4187 off + (i << PAGESHIFT)); 4188 } 4189 #endif /* DEBUG */ 4190 /* 4191 * All pages are of szc we need and they are 4192 * all locked so they can't change szc. load 4193 * translations. 4194 * 4195 * if page got promoted since last check 4196 * we don't need pplist. 4197 */ 4198 if (pplist != NULL) { 4199 page_free_replacement_page(pplist); 4200 page_create_putback(pages); 4201 } 4202 if (PP_ISMIGRATE(ppa[0])) { 4203 page_migrate(seg, a, ppa, pages); 4204 } 4205 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4206 prot, vpprot); 4207 if (!xhat) { 4208 hat_memload_array_region(hat, a, pgsz, 4209 ppa, prot & vpprot, hat_flag, 4210 svd->rcookie); 4211 } else { 4212 /* 4213 * avoid large xhat mappings to FS 4214 * pages so that hat_page_demote() 4215 * doesn't need to check for xhat 4216 * large mappings. 4217 * Don't use regions with xhats. 4218 */ 4219 for (i = 0; i < pages; i++) { 4220 hat_memload(hat, 4221 a + (i << PAGESHIFT), 4222 ppa[i], prot & vpprot, 4223 hat_flag); 4224 } 4225 } 4226 4227 if (!(hat_flag & HAT_LOAD_LOCK)) { 4228 for (i = 0; i < pages; i++) { 4229 page_unlock(ppa[i]); 4230 } 4231 } 4232 if (amp != NULL) { 4233 anon_array_exit(&an_cookie); 4234 ANON_LOCK_EXIT(&->a_rwlock); 4235 } 4236 goto next; 4237 } 4238 4239 /* 4240 * See if upsize is possible. 4241 */ 4242 if (pszc > szc && szc < seg->s_szc && 4243 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) { 4244 pgcnt_t aphase; 4245 uint_t pszc1 = MIN(pszc, seg->s_szc); 4246 ppgsz = page_get_pagesize(pszc1); 4247 ppages = btop(ppgsz); 4248 aphase = btop(P2PHASE((uintptr_t)a, ppgsz)); 4249 4250 ASSERT(type != F_SOFTLOCK); 4251 4252 SEGVN_VMSTAT_FLTVNPAGES(31); 4253 if (aphase != P2PHASE(pfn, ppages)) { 4254 segvn_faultvnmpss_align_err4++; 4255 } else { 4256 SEGVN_VMSTAT_FLTVNPAGES(32); 4257 if (pplist != NULL) { 4258 page_t *pl = pplist; 4259 page_free_replacement_page(pl); 4260 page_create_putback(pages); 4261 } 4262 for (i = 0; i < pages; i++) { 4263 page_unlock(ppa[i]); 4264 } 4265 if (amp != NULL) { 4266 anon_array_exit(&an_cookie); 4267 ANON_LOCK_EXIT(&->a_rwlock); 4268 } 4269 pszc = pszc1; 4270 ierr = -2; 4271 break; 4272 } 4273 } 4274 4275 /* 4276 * check if we should use smallest mapping size. 4277 */ 4278 upgrdfail = 0; 4279 if (szc == 0 || xhat || 4280 (pszc >= szc && 4281 !IS_P2ALIGNED(pfn, pages)) || 4282 (pszc < szc && 4283 !segvn_full_szcpages(ppa, szc, &upgrdfail, 4284 &pszc))) { 4285 4286 if (upgrdfail && type != F_SOFTLOCK) { 4287 /* 4288 * segvn_full_szcpages failed to lock 4289 * all pages EXCL. Size down. 4290 */ 4291 ASSERT(pszc < szc); 4292 4293 SEGVN_VMSTAT_FLTVNPAGES(33); 4294 4295 if (pplist != NULL) { 4296 page_t *pl = pplist; 4297 page_free_replacement_page(pl); 4298 page_create_putback(pages); 4299 } 4300 4301 for (i = 0; i < pages; i++) { 4302 page_unlock(ppa[i]); 4303 } 4304 if (amp != NULL) { 4305 anon_array_exit(&an_cookie); 4306 ANON_LOCK_EXIT(&->a_rwlock); 4307 } 4308 ierr = -1; 4309 break; 4310 } 4311 if (szc != 0 && !xhat && !upgrdfail) { 4312 segvn_faultvnmpss_align_err5++; 4313 } 4314 SEGVN_VMSTAT_FLTVNPAGES(34); 4315 if (pplist != NULL) { 4316 page_free_replacement_page(pplist); 4317 page_create_putback(pages); 4318 } 4319 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4320 prot, vpprot); 4321 if (upgrdfail && segvn_anypgsz_vnode) { 4322 /* SOFTLOCK case */ 4323 hat_memload_array_region(hat, a, pgsz, 4324 ppa, prot & vpprot, hat_flag, 4325 svd->rcookie); 4326 } else { 4327 for (i = 0; i < pages; i++) { 4328 hat_memload_region(hat, 4329 a + (i << PAGESHIFT), 4330 ppa[i], prot & vpprot, 4331 hat_flag, svd->rcookie); 4332 } 4333 } 4334 if (!(hat_flag & HAT_LOAD_LOCK)) { 4335 for (i = 0; i < pages; i++) { 4336 page_unlock(ppa[i]); 4337 } 4338 } 4339 if (amp != NULL) { 4340 anon_array_exit(&an_cookie); 4341 ANON_LOCK_EXIT(&->a_rwlock); 4342 } 4343 goto next; 4344 } 4345 4346 if (pszc == szc) { 4347 /* 4348 * segvn_full_szcpages() upgraded pages szc. 4349 */ 4350 ASSERT(pszc == ppa[0]->p_szc); 4351 ASSERT(IS_P2ALIGNED(pfn, pages)); 4352 goto chkszc; 4353 } 4354 4355 if (pszc > szc) { 4356 kmutex_t *szcmtx; 4357 SEGVN_VMSTAT_FLTVNPAGES(35); 4358 /* 4359 * p_szc of ppa[0] can change since we haven't 4360 * locked all constituent pages. Call 4361 * page_lock_szc() to prevent szc changes. 4362 * This should be a rare case that happens when 4363 * multiple segments use a different page size 4364 * to map the same file offsets. 4365 */ 4366 szcmtx = page_szc_lock(ppa[0]); 4367 pszc = ppa[0]->p_szc; 4368 ASSERT(szcmtx != NULL || pszc == 0); 4369 ASSERT(ppa[0]->p_szc <= pszc); 4370 if (pszc <= szc) { 4371 SEGVN_VMSTAT_FLTVNPAGES(36); 4372 if (szcmtx != NULL) { 4373 mutex_exit(szcmtx); 4374 } 4375 goto chkszc; 4376 } 4377 if (pplist != NULL) { 4378 /* 4379 * page got promoted since last check. 4380 * we don't need preaalocated large 4381 * page. 4382 */ 4383 SEGVN_VMSTAT_FLTVNPAGES(37); 4384 page_free_replacement_page(pplist); 4385 page_create_putback(pages); 4386 } 4387 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4388 prot, vpprot); 4389 hat_memload_array_region(hat, a, pgsz, ppa, 4390 prot & vpprot, hat_flag, svd->rcookie); 4391 mutex_exit(szcmtx); 4392 if (!(hat_flag & HAT_LOAD_LOCK)) { 4393 for (i = 0; i < pages; i++) { 4394 page_unlock(ppa[i]); 4395 } 4396 } 4397 if (amp != NULL) { 4398 anon_array_exit(&an_cookie); 4399 ANON_LOCK_EXIT(&->a_rwlock); 4400 } 4401 goto next; 4402 } 4403 4404 /* 4405 * if page got demoted since last check 4406 * we could have not allocated larger page. 4407 * allocate now. 4408 */ 4409 if (pplist == NULL && 4410 page_alloc_pages(vp, seg, a, &pplist, NULL, 4411 szc, 0, 0) && type != F_SOFTLOCK) { 4412 SEGVN_VMSTAT_FLTVNPAGES(38); 4413 for (i = 0; i < pages; i++) { 4414 page_unlock(ppa[i]); 4415 } 4416 if (amp != NULL) { 4417 anon_array_exit(&an_cookie); 4418 ANON_LOCK_EXIT(&->a_rwlock); 4419 } 4420 ierr = -1; 4421 alloc_failed |= (1 << szc); 4422 break; 4423 } 4424 4425 SEGVN_VMSTAT_FLTVNPAGES(39); 4426 4427 if (pplist != NULL) { 4428 segvn_relocate_pages(ppa, pplist); 4429 #ifdef DEBUG 4430 } else { 4431 ASSERT(type == F_SOFTLOCK); 4432 SEGVN_VMSTAT_FLTVNPAGES(40); 4433 #endif /* DEBUG */ 4434 } 4435 4436 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot); 4437 4438 if (pplist == NULL && segvn_anypgsz_vnode == 0) { 4439 ASSERT(type == F_SOFTLOCK); 4440 for (i = 0; i < pages; i++) { 4441 ASSERT(ppa[i]->p_szc < szc); 4442 hat_memload_region(hat, 4443 a + (i << PAGESHIFT), 4444 ppa[i], prot & vpprot, hat_flag, 4445 svd->rcookie); 4446 } 4447 } else { 4448 ASSERT(pplist != NULL || type == F_SOFTLOCK); 4449 hat_memload_array_region(hat, a, pgsz, ppa, 4450 prot & vpprot, hat_flag, svd->rcookie); 4451 } 4452 if (!(hat_flag & HAT_LOAD_LOCK)) { 4453 for (i = 0; i < pages; i++) { 4454 ASSERT(PAGE_SHARED(ppa[i])); 4455 page_unlock(ppa[i]); 4456 } 4457 } 4458 if (amp != NULL) { 4459 anon_array_exit(&an_cookie); 4460 ANON_LOCK_EXIT(&->a_rwlock); 4461 } 4462 4463 next: 4464 if (vpage != NULL) { 4465 vpage += pages; 4466 } 4467 adjszc_chk = 1; 4468 } 4469 if (a == lpgeaddr) 4470 break; 4471 ASSERT(a < lpgeaddr); 4472 4473 ASSERT(!brkcow && !tron && type != F_SOFTLOCK); 4474 4475 /* 4476 * ierr == -1 means we failed to map with a large page. 4477 * (either due to allocation/relocation failures or 4478 * misalignment with other mappings to this file. 4479 * 4480 * ierr == -2 means some other thread allocated a large page 4481 * after we gave up tp map with a large page. retry with 4482 * larger mapping. 4483 */ 4484 ASSERT(ierr == -1 || ierr == -2); 4485 ASSERT(ierr == -2 || szc != 0); 4486 ASSERT(ierr == -1 || szc < seg->s_szc); 4487 if (ierr == -2) { 4488 SEGVN_VMSTAT_FLTVNPAGES(41); 4489 ASSERT(pszc > szc && pszc <= seg->s_szc); 4490 szc = pszc; 4491 } else if (segvn_anypgsz_vnode) { 4492 SEGVN_VMSTAT_FLTVNPAGES(42); 4493 szc--; 4494 } else { 4495 SEGVN_VMSTAT_FLTVNPAGES(43); 4496 ASSERT(pszc < szc); 4497 /* 4498 * other process created pszc large page. 4499 * but we still have to drop to 0 szc. 4500 */ 4501 szc = 0; 4502 } 4503 4504 pgsz = page_get_pagesize(szc); 4505 pages = btop(pgsz); 4506 if (ierr == -2) { 4507 /* 4508 * Size up case. Note lpgaddr may only be needed for 4509 * softlock case so we don't adjust it here. 4510 */ 4511 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4512 ASSERT(a >= lpgaddr); 4513 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4514 off = svd->offset + (uintptr_t)(a - seg->s_base); 4515 aindx = svd->anon_index + seg_page(seg, a); 4516 vpage = (svd->vpage != NULL) ? 4517 &svd->vpage[seg_page(seg, a)] : NULL; 4518 } else { 4519 /* 4520 * Size down case. Note lpgaddr may only be needed for 4521 * softlock case so we don't adjust it here. 4522 */ 4523 ASSERT(IS_P2ALIGNED(a, pgsz)); 4524 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4525 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4526 ASSERT(a < lpgeaddr); 4527 if (a < addr) { 4528 SEGVN_VMSTAT_FLTVNPAGES(44); 4529 /* 4530 * The beginning of the large page region can 4531 * be pulled to the right to make a smaller 4532 * region. We haven't yet faulted a single 4533 * page. 4534 */ 4535 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4536 ASSERT(a >= lpgaddr); 4537 off = svd->offset + 4538 (uintptr_t)(a - seg->s_base); 4539 aindx = svd->anon_index + seg_page(seg, a); 4540 vpage = (svd->vpage != NULL) ? 4541 &svd->vpage[seg_page(seg, a)] : NULL; 4542 } 4543 } 4544 } 4545 out: 4546 kmem_free(ppa, ppasize); 4547 if (!err && !vop_size_err) { 4548 SEGVN_VMSTAT_FLTVNPAGES(45); 4549 return (0); 4550 } 4551 if (type == F_SOFTLOCK && a > lpgaddr) { 4552 SEGVN_VMSTAT_FLTVNPAGES(46); 4553 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4554 } 4555 if (!vop_size_err) { 4556 SEGVN_VMSTAT_FLTVNPAGES(47); 4557 return (err); 4558 } 4559 ASSERT(brkcow || tron || type == F_SOFTLOCK); 4560 /* 4561 * Large page end is mapped beyond the end of file and it's a cow 4562 * fault (can be a text replication induced cow) or softlock so we can't 4563 * reduce the map area. For now just demote the segment. This should 4564 * really only happen if the end of the file changed after the mapping 4565 * was established since when large page segments are created we make 4566 * sure they don't extend beyond the end of the file. 4567 */ 4568 SEGVN_VMSTAT_FLTVNPAGES(48); 4569 4570 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4571 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4572 err = 0; 4573 if (seg->s_szc != 0) { 4574 segvn_fltvnpages_clrszc_cnt++; 4575 ASSERT(svd->softlockcnt == 0); 4576 err = segvn_clrszc(seg); 4577 if (err != 0) { 4578 segvn_fltvnpages_clrszc_err++; 4579 } 4580 } 4581 ASSERT(err || seg->s_szc == 0); 4582 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock); 4583 /* segvn_fault will do its job as if szc had been zero to begin with */ 4584 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err)); 4585 } 4586 4587 /* 4588 * This routine will attempt to fault in one large page. 4589 * it will use smaller pages if that fails. 4590 * It should only be called for pure anonymous segments. 4591 */ 4592 static faultcode_t 4593 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 4594 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 4595 caddr_t eaddr, int brkcow) 4596 { 4597 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4598 struct anon_map *amp = svd->amp; 4599 uchar_t segtype = svd->type; 4600 uint_t szc = seg->s_szc; 4601 size_t pgsz = page_get_pagesize(szc); 4602 size_t maxpgsz = pgsz; 4603 pgcnt_t pages = btop(pgsz); 4604 uint_t ppaszc = szc; 4605 caddr_t a = lpgaddr; 4606 ulong_t aindx = svd->anon_index + seg_page(seg, a); 4607 struct vpage *vpage = (svd->vpage != NULL) ? 4608 &svd->vpage[seg_page(seg, a)] : NULL; 4609 page_t **ppa; 4610 uint_t ppa_szc; 4611 faultcode_t err; 4612 int ierr; 4613 uint_t protchk, prot, vpprot; 4614 ulong_t i; 4615 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 4616 anon_sync_obj_t cookie; 4617 int adjszc_chk; 4618 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0; 4619 4620 ASSERT(szc != 0); 4621 ASSERT(amp != NULL); 4622 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 4623 ASSERT(!(svd->flags & MAP_NORESERVE)); 4624 ASSERT(type != F_SOFTUNLOCK); 4625 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4626 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF); 4627 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4628 4629 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 4630 4631 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]); 4632 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]); 4633 4634 if (svd->flags & MAP_TEXT) { 4635 hat_flag |= HAT_LOAD_TEXT; 4636 } 4637 4638 if (svd->pageprot) { 4639 switch (rw) { 4640 case S_READ: 4641 protchk = PROT_READ; 4642 break; 4643 case S_WRITE: 4644 protchk = PROT_WRITE; 4645 break; 4646 case S_EXEC: 4647 protchk = PROT_EXEC; 4648 break; 4649 case S_OTHER: 4650 default: 4651 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 4652 break; 4653 } 4654 VM_STAT_ADD(segvnvmstats.fltanpages[2]); 4655 } else { 4656 prot = svd->prot; 4657 /* caller has already done segment level protection check. */ 4658 } 4659 4660 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP); 4661 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4662 for (;;) { 4663 adjszc_chk = 0; 4664 for (; a < lpgeaddr; a += pgsz, aindx += pages) { 4665 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 4666 VM_STAT_ADD(segvnvmstats.fltanpages[3]); 4667 ASSERT(vpage != NULL); 4668 prot = VPP_PROT(vpage); 4669 ASSERT(sameprot(seg, a, maxpgsz)); 4670 if ((prot & protchk) == 0) { 4671 err = FC_PROT; 4672 goto error; 4673 } 4674 } 4675 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) && 4676 pgsz < maxpgsz) { 4677 ASSERT(a > lpgaddr); 4678 szc = seg->s_szc; 4679 pgsz = maxpgsz; 4680 pages = btop(pgsz); 4681 ASSERT(IS_P2ALIGNED(aindx, pages)); 4682 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, 4683 pgsz); 4684 } 4685 if (type == F_SOFTLOCK) { 4686 atomic_add_long((ulong_t *)&svd->softlockcnt, 4687 pages); 4688 } 4689 anon_array_enter(amp, aindx, &cookie); 4690 ppa_szc = (uint_t)-1; 4691 ierr = anon_map_getpages(amp, aindx, szc, seg, a, 4692 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow, 4693 segvn_anypgsz, pgflags, svd->cred); 4694 if (ierr != 0) { 4695 anon_array_exit(&cookie); 4696 VM_STAT_ADD(segvnvmstats.fltanpages[4]); 4697 if (type == F_SOFTLOCK) { 4698 atomic_add_long( 4699 (ulong_t *)&svd->softlockcnt, 4700 -pages); 4701 } 4702 if (ierr > 0) { 4703 VM_STAT_ADD(segvnvmstats.fltanpages[6]); 4704 err = FC_MAKE_ERR(ierr); 4705 goto error; 4706 } 4707 break; 4708 } 4709 4710 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4711 4712 ASSERT(segtype == MAP_SHARED || 4713 ppa[0]->p_szc <= szc); 4714 ASSERT(segtype == MAP_PRIVATE || 4715 ppa[0]->p_szc >= szc); 4716 4717 /* 4718 * Handle pages that have been marked for migration 4719 */ 4720 if (lgrp_optimizations()) 4721 page_migrate(seg, a, ppa, pages); 4722 4723 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 4724 4725 if (segtype == MAP_SHARED) { 4726 vpprot |= PROT_WRITE; 4727 } 4728 4729 hat_memload_array(hat, a, pgsz, ppa, 4730 prot & vpprot, hat_flag); 4731 4732 if (hat_flag & HAT_LOAD_LOCK) { 4733 VM_STAT_ADD(segvnvmstats.fltanpages[7]); 4734 } else { 4735 VM_STAT_ADD(segvnvmstats.fltanpages[8]); 4736 for (i = 0; i < pages; i++) 4737 page_unlock(ppa[i]); 4738 } 4739 if (vpage != NULL) 4740 vpage += pages; 4741 4742 anon_array_exit(&cookie); 4743 adjszc_chk = 1; 4744 } 4745 if (a == lpgeaddr) 4746 break; 4747 ASSERT(a < lpgeaddr); 4748 /* 4749 * ierr == -1 means we failed to allocate a large page. 4750 * so do a size down operation. 4751 * 4752 * ierr == -2 means some other process that privately shares 4753 * pages with this process has allocated a larger page and we 4754 * need to retry with larger pages. So do a size up 4755 * operation. This relies on the fact that large pages are 4756 * never partially shared i.e. if we share any constituent 4757 * page of a large page with another process we must share the 4758 * entire large page. Note this cannot happen for SOFTLOCK 4759 * case, unless current address (a) is at the beginning of the 4760 * next page size boundary because the other process couldn't 4761 * have relocated locked pages. 4762 */ 4763 ASSERT(ierr == -1 || ierr == -2); 4764 4765 if (segvn_anypgsz) { 4766 ASSERT(ierr == -2 || szc != 0); 4767 ASSERT(ierr == -1 || szc < seg->s_szc); 4768 szc = (ierr == -1) ? szc - 1 : szc + 1; 4769 } else { 4770 /* 4771 * For non COW faults and segvn_anypgsz == 0 4772 * we need to be careful not to loop forever 4773 * if existing page is found with szc other 4774 * than 0 or seg->s_szc. This could be due 4775 * to page relocations on behalf of DR or 4776 * more likely large page creation. For this 4777 * case simply re-size to existing page's szc 4778 * if returned by anon_map_getpages(). 4779 */ 4780 if (ppa_szc == (uint_t)-1) { 4781 szc = (ierr == -1) ? 0 : seg->s_szc; 4782 } else { 4783 ASSERT(ppa_szc <= seg->s_szc); 4784 ASSERT(ierr == -2 || ppa_szc < szc); 4785 ASSERT(ierr == -1 || ppa_szc > szc); 4786 szc = ppa_szc; 4787 } 4788 } 4789 4790 pgsz = page_get_pagesize(szc); 4791 pages = btop(pgsz); 4792 ASSERT(type != F_SOFTLOCK || ierr == -1 || 4793 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz))); 4794 if (type == F_SOFTLOCK) { 4795 /* 4796 * For softlocks we cannot reduce the fault area 4797 * (calculated based on the largest page size for this 4798 * segment) for size down and a is already next 4799 * page size aligned as assertted above for size 4800 * ups. Therefore just continue in case of softlock. 4801 */ 4802 VM_STAT_ADD(segvnvmstats.fltanpages[9]); 4803 continue; /* keep lint happy */ 4804 } else if (ierr == -2) { 4805 4806 /* 4807 * Size up case. Note lpgaddr may only be needed for 4808 * softlock case so we don't adjust it here. 4809 */ 4810 VM_STAT_ADD(segvnvmstats.fltanpages[10]); 4811 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4812 ASSERT(a >= lpgaddr); 4813 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4814 aindx = svd->anon_index + seg_page(seg, a); 4815 vpage = (svd->vpage != NULL) ? 4816 &svd->vpage[seg_page(seg, a)] : NULL; 4817 } else { 4818 /* 4819 * Size down case. Note lpgaddr may only be needed for 4820 * softlock case so we don't adjust it here. 4821 */ 4822 VM_STAT_ADD(segvnvmstats.fltanpages[11]); 4823 ASSERT(IS_P2ALIGNED(a, pgsz)); 4824 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4825 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4826 ASSERT(a < lpgeaddr); 4827 if (a < addr) { 4828 /* 4829 * The beginning of the large page region can 4830 * be pulled to the right to make a smaller 4831 * region. We haven't yet faulted a single 4832 * page. 4833 */ 4834 VM_STAT_ADD(segvnvmstats.fltanpages[12]); 4835 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4836 ASSERT(a >= lpgaddr); 4837 aindx = svd->anon_index + seg_page(seg, a); 4838 vpage = (svd->vpage != NULL) ? 4839 &svd->vpage[seg_page(seg, a)] : NULL; 4840 } 4841 } 4842 } 4843 VM_STAT_ADD(segvnvmstats.fltanpages[13]); 4844 ANON_LOCK_EXIT(&->a_rwlock); 4845 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); 4846 return (0); 4847 error: 4848 VM_STAT_ADD(segvnvmstats.fltanpages[14]); 4849 ANON_LOCK_EXIT(&->a_rwlock); 4850 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); 4851 if (type == F_SOFTLOCK && a > lpgaddr) { 4852 VM_STAT_ADD(segvnvmstats.fltanpages[15]); 4853 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4854 } 4855 return (err); 4856 } 4857 4858 int fltadvice = 1; /* set to free behind pages for sequential access */ 4859 4860 /* 4861 * This routine is called via a machine specific fault handling routine. 4862 * It is also called by software routines wishing to lock or unlock 4863 * a range of addresses. 4864 * 4865 * Here is the basic algorithm: 4866 * If unlocking 4867 * Call segvn_softunlock 4868 * Return 4869 * endif 4870 * Checking and set up work 4871 * If we will need some non-anonymous pages 4872 * Call VOP_GETPAGE over the range of non-anonymous pages 4873 * endif 4874 * Loop over all addresses requested 4875 * Call segvn_faultpage passing in page list 4876 * to load up translations and handle anonymous pages 4877 * endloop 4878 * Load up translation to any additional pages in page list not 4879 * already handled that fit into this segment 4880 */ 4881 static faultcode_t 4882 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len, 4883 enum fault_type type, enum seg_rw rw) 4884 { 4885 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4886 page_t **plp, **ppp, *pp; 4887 u_offset_t off; 4888 caddr_t a; 4889 struct vpage *vpage; 4890 uint_t vpprot, prot; 4891 int err; 4892 page_t *pl[PVN_GETPAGE_NUM + 1]; 4893 size_t plsz, pl_alloc_sz; 4894 size_t page; 4895 ulong_t anon_index; 4896 struct anon_map *amp; 4897 int dogetpage = 0; 4898 caddr_t lpgaddr, lpgeaddr; 4899 size_t pgsz; 4900 anon_sync_obj_t cookie; 4901 int brkcow = BREAK_COW_SHARE(rw, type, svd->type); 4902 4903 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 4904 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE); 4905 4906 /* 4907 * First handle the easy stuff 4908 */ 4909 if (type == F_SOFTUNLOCK) { 4910 if (rw == S_READ_NOCOW) { 4911 rw = S_READ; 4912 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 4913 } 4914 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 4915 pgsz = (seg->s_szc == 0) ? PAGESIZE : 4916 page_get_pagesize(seg->s_szc); 4917 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]); 4918 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 4919 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw); 4920 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4921 return (0); 4922 } 4923 4924 ASSERT(svd->tr_state == SEGVN_TR_OFF || 4925 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 4926 if (brkcow == 0) { 4927 if (svd->tr_state == SEGVN_TR_INIT) { 4928 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4929 if (svd->tr_state == SEGVN_TR_INIT) { 4930 ASSERT(svd->vp != NULL && svd->amp == NULL); 4931 ASSERT(svd->flags & MAP_TEXT); 4932 ASSERT(svd->type == MAP_PRIVATE); 4933 segvn_textrepl(seg); 4934 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4935 ASSERT(svd->tr_state != SEGVN_TR_ON || 4936 svd->amp != NULL); 4937 } 4938 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4939 } 4940 } else if (svd->tr_state != SEGVN_TR_OFF) { 4941 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4942 4943 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) { 4944 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 4945 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4946 return (FC_PROT); 4947 } 4948 4949 if (svd->tr_state == SEGVN_TR_ON) { 4950 ASSERT(svd->vp != NULL && svd->amp != NULL); 4951 segvn_textunrepl(seg, 0); 4952 ASSERT(svd->amp == NULL && 4953 svd->tr_state == SEGVN_TR_OFF); 4954 } else if (svd->tr_state != SEGVN_TR_OFF) { 4955 svd->tr_state = SEGVN_TR_OFF; 4956 } 4957 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 4958 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4959 } 4960 4961 top: 4962 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 4963 4964 /* 4965 * If we have the same protections for the entire segment, 4966 * insure that the access being attempted is legitimate. 4967 */ 4968 4969 if (svd->pageprot == 0) { 4970 uint_t protchk; 4971 4972 switch (rw) { 4973 case S_READ: 4974 case S_READ_NOCOW: 4975 protchk = PROT_READ; 4976 break; 4977 case S_WRITE: 4978 protchk = PROT_WRITE; 4979 break; 4980 case S_EXEC: 4981 protchk = PROT_EXEC; 4982 break; 4983 case S_OTHER: 4984 default: 4985 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 4986 break; 4987 } 4988 4989 if ((svd->prot & protchk) == 0) { 4990 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4991 return (FC_PROT); /* illegal access type */ 4992 } 4993 } 4994 4995 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 4996 /* this must be SOFTLOCK S_READ fault */ 4997 ASSERT(svd->amp == NULL); 4998 ASSERT(svd->tr_state == SEGVN_TR_OFF); 4999 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5000 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5001 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5002 /* 5003 * this must be the first ever non S_READ_NOCOW 5004 * softlock for this segment. 5005 */ 5006 ASSERT(svd->softlockcnt == 0); 5007 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5008 HAT_REGION_TEXT); 5009 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5010 } 5011 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5012 goto top; 5013 } 5014 5015 /* 5016 * We can't allow the long term use of softlocks for vmpss segments, 5017 * because in some file truncation cases we should be able to demote 5018 * the segment, which requires that there are no softlocks. The 5019 * only case where it's ok to allow a SOFTLOCK fault against a vmpss 5020 * segment is S_READ_NOCOW, where the caller holds the address space 5021 * locked as writer and calls softunlock before dropping the as lock. 5022 * S_READ_NOCOW is used by /proc to read memory from another user. 5023 * 5024 * Another deadlock between SOFTLOCK and file truncation can happen 5025 * because segvn_fault_vnodepages() calls the FS one pagesize at 5026 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages() 5027 * can cause a deadlock because the first set of page_t's remain 5028 * locked SE_SHARED. To avoid this, we demote segments on a first 5029 * SOFTLOCK if they have a length greater than the segment's 5030 * page size. 5031 * 5032 * So for now, we only avoid demoting a segment on a SOFTLOCK when 5033 * the access type is S_READ_NOCOW and the fault length is less than 5034 * or equal to the segment's page size. While this is quite restrictive, 5035 * it should be the most common case of SOFTLOCK against a vmpss 5036 * segment. 5037 * 5038 * For S_READ_NOCOW, it's safe not to do a copy on write because the 5039 * caller makes sure no COW will be caused by another thread for a 5040 * softlocked page. 5041 */ 5042 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) { 5043 int demote = 0; 5044 5045 if (rw != S_READ_NOCOW) { 5046 demote = 1; 5047 } 5048 if (!demote && len > PAGESIZE) { 5049 pgsz = page_get_pagesize(seg->s_szc); 5050 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, 5051 lpgeaddr); 5052 if (lpgeaddr - lpgaddr > pgsz) { 5053 demote = 1; 5054 } 5055 } 5056 5057 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5058 5059 if (demote) { 5060 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5061 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5062 if (seg->s_szc != 0) { 5063 segvn_vmpss_clrszc_cnt++; 5064 ASSERT(svd->softlockcnt == 0); 5065 err = segvn_clrszc(seg); 5066 if (err) { 5067 segvn_vmpss_clrszc_err++; 5068 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5069 return (FC_MAKE_ERR(err)); 5070 } 5071 } 5072 ASSERT(seg->s_szc == 0); 5073 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5074 goto top; 5075 } 5076 } 5077 5078 /* 5079 * Check to see if we need to allocate an anon_map structure. 5080 */ 5081 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) { 5082 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5083 /* 5084 * Drop the "read" lock on the segment and acquire 5085 * the "write" version since we have to allocate the 5086 * anon_map. 5087 */ 5088 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5089 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5090 5091 if (svd->amp == NULL) { 5092 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 5093 svd->amp->a_szc = seg->s_szc; 5094 } 5095 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5096 5097 /* 5098 * Start all over again since segment protections 5099 * may have changed after we dropped the "read" lock. 5100 */ 5101 goto top; 5102 } 5103 5104 /* 5105 * S_READ_NOCOW vs S_READ distinction was 5106 * only needed for the code above. After 5107 * that we treat it as S_READ. 5108 */ 5109 if (rw == S_READ_NOCOW) { 5110 ASSERT(type == F_SOFTLOCK); 5111 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5112 rw = S_READ; 5113 } 5114 5115 amp = svd->amp; 5116 5117 /* 5118 * MADV_SEQUENTIAL work is ignored for large page segments. 5119 */ 5120 if (seg->s_szc != 0) { 5121 pgsz = page_get_pagesize(seg->s_szc); 5122 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 5123 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 5124 if (svd->vp == NULL) { 5125 err = segvn_fault_anonpages(hat, seg, lpgaddr, 5126 lpgeaddr, type, rw, addr, addr + len, brkcow); 5127 } else { 5128 err = segvn_fault_vnodepages(hat, seg, lpgaddr, 5129 lpgeaddr, type, rw, addr, addr + len, brkcow); 5130 if (err == IE_RETRY) { 5131 ASSERT(seg->s_szc == 0); 5132 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 5133 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5134 goto top; 5135 } 5136 } 5137 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5138 return (err); 5139 } 5140 5141 page = seg_page(seg, addr); 5142 if (amp != NULL) { 5143 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5144 anon_index = svd->anon_index + page; 5145 5146 if (type == F_PROT && rw == S_READ && 5147 svd->tr_state == SEGVN_TR_OFF && 5148 svd->type == MAP_PRIVATE && svd->pageprot == 0) { 5149 size_t index = anon_index; 5150 struct anon *ap; 5151 5152 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5153 /* 5154 * The fast path could apply to S_WRITE also, except 5155 * that the protection fault could be caused by lazy 5156 * tlb flush when ro->rw. In this case, the pte is 5157 * RW already. But RO in the other cpu's tlb causes 5158 * the fault. Since hat_chgprot won't do anything if 5159 * pte doesn't change, we may end up faulting 5160 * indefinitely until the RO tlb entry gets replaced. 5161 */ 5162 for (a = addr; a < addr + len; a += PAGESIZE, index++) { 5163 anon_array_enter(amp, index, &cookie); 5164 ap = anon_get_ptr(amp->ahp, index); 5165 anon_array_exit(&cookie); 5166 if ((ap == NULL) || (ap->an_refcnt != 1)) { 5167 ANON_LOCK_EXIT(&->a_rwlock); 5168 goto slow; 5169 } 5170 } 5171 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot); 5172 ANON_LOCK_EXIT(&->a_rwlock); 5173 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5174 return (0); 5175 } 5176 } 5177 slow: 5178 5179 if (svd->vpage == NULL) 5180 vpage = NULL; 5181 else 5182 vpage = &svd->vpage[page]; 5183 5184 off = svd->offset + (uintptr_t)(addr - seg->s_base); 5185 5186 /* 5187 * If MADV_SEQUENTIAL has been set for the particular page we 5188 * are faulting on, free behind all pages in the segment and put 5189 * them on the free list. 5190 */ 5191 5192 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) { 5193 struct vpage *vpp; 5194 ulong_t fanon_index; 5195 size_t fpage; 5196 u_offset_t pgoff, fpgoff; 5197 struct vnode *fvp; 5198 struct anon *fap = NULL; 5199 5200 if (svd->advice == MADV_SEQUENTIAL || 5201 (svd->pageadvice && 5202 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) { 5203 pgoff = off - PAGESIZE; 5204 fpage = page - 1; 5205 if (vpage != NULL) 5206 vpp = &svd->vpage[fpage]; 5207 if (amp != NULL) 5208 fanon_index = svd->anon_index + fpage; 5209 5210 while (pgoff > svd->offset) { 5211 if (svd->advice != MADV_SEQUENTIAL && 5212 (!svd->pageadvice || (vpage && 5213 VPP_ADVICE(vpp) != MADV_SEQUENTIAL))) 5214 break; 5215 5216 /* 5217 * If this is an anon page, we must find the 5218 * correct <vp, offset> for it 5219 */ 5220 fap = NULL; 5221 if (amp != NULL) { 5222 ANON_LOCK_ENTER(&->a_rwlock, 5223 RW_READER); 5224 anon_array_enter(amp, fanon_index, 5225 &cookie); 5226 fap = anon_get_ptr(amp->ahp, 5227 fanon_index); 5228 if (fap != NULL) { 5229 swap_xlate(fap, &fvp, &fpgoff); 5230 } else { 5231 fpgoff = pgoff; 5232 fvp = svd->vp; 5233 } 5234 anon_array_exit(&cookie); 5235 ANON_LOCK_EXIT(&->a_rwlock); 5236 } else { 5237 fpgoff = pgoff; 5238 fvp = svd->vp; 5239 } 5240 if (fvp == NULL) 5241 break; /* XXX */ 5242 /* 5243 * Skip pages that are free or have an 5244 * "exclusive" lock. 5245 */ 5246 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED); 5247 if (pp == NULL) 5248 break; 5249 /* 5250 * We don't need the page_struct_lock to test 5251 * as this is only advisory; even if we 5252 * acquire it someone might race in and lock 5253 * the page after we unlock and before the 5254 * PUTPAGE, then VOP_PUTPAGE will do nothing. 5255 */ 5256 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) { 5257 /* 5258 * Hold the vnode before releasing 5259 * the page lock to prevent it from 5260 * being freed and re-used by some 5261 * other thread. 5262 */ 5263 VN_HOLD(fvp); 5264 page_unlock(pp); 5265 /* 5266 * We should build a page list 5267 * to kluster putpages XXX 5268 */ 5269 (void) VOP_PUTPAGE(fvp, 5270 (offset_t)fpgoff, PAGESIZE, 5271 (B_DONTNEED|B_FREE|B_ASYNC), 5272 svd->cred, NULL); 5273 VN_RELE(fvp); 5274 } else { 5275 /* 5276 * XXX - Should the loop terminate if 5277 * the page is `locked'? 5278 */ 5279 page_unlock(pp); 5280 } 5281 --vpp; 5282 --fanon_index; 5283 pgoff -= PAGESIZE; 5284 } 5285 } 5286 } 5287 5288 plp = pl; 5289 *plp = NULL; 5290 pl_alloc_sz = 0; 5291 5292 /* 5293 * See if we need to call VOP_GETPAGE for 5294 * *any* of the range being faulted on. 5295 * We can skip all of this work if there 5296 * was no original vnode. 5297 */ 5298 if (svd->vp != NULL) { 5299 u_offset_t vp_off; 5300 size_t vp_len; 5301 struct anon *ap; 5302 vnode_t *vp; 5303 5304 vp_off = off; 5305 vp_len = len; 5306 5307 if (amp == NULL) 5308 dogetpage = 1; 5309 else { 5310 /* 5311 * Only acquire reader lock to prevent amp->ahp 5312 * from being changed. It's ok to miss pages, 5313 * hence we don't do anon_array_enter 5314 */ 5315 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5316 ap = anon_get_ptr(amp->ahp, anon_index); 5317 5318 if (len <= PAGESIZE) 5319 /* inline non_anon() */ 5320 dogetpage = (ap == NULL); 5321 else 5322 dogetpage = non_anon(amp->ahp, anon_index, 5323 &vp_off, &vp_len); 5324 ANON_LOCK_EXIT(&->a_rwlock); 5325 } 5326 5327 if (dogetpage) { 5328 enum seg_rw arw; 5329 struct as *as = seg->s_as; 5330 5331 if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) { 5332 /* 5333 * Page list won't fit in local array, 5334 * allocate one of the needed size. 5335 */ 5336 pl_alloc_sz = 5337 (btop(len) + 1) * sizeof (page_t *); 5338 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP); 5339 plp[0] = NULL; 5340 plsz = len; 5341 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE || 5342 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER || 5343 (((size_t)(addr + PAGESIZE) < 5344 (size_t)(seg->s_base + seg->s_size)) && 5345 hat_probe(as->a_hat, addr + PAGESIZE))) { 5346 /* 5347 * Ask VOP_GETPAGE to return the exact number 5348 * of pages if 5349 * (a) this is a COW fault, or 5350 * (b) this is a software fault, or 5351 * (c) next page is already mapped. 5352 */ 5353 plsz = len; 5354 } else { 5355 /* 5356 * Ask VOP_GETPAGE to return adjacent pages 5357 * within the segment. 5358 */ 5359 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t) 5360 ((seg->s_base + seg->s_size) - addr)); 5361 ASSERT((addr + plsz) <= 5362 (seg->s_base + seg->s_size)); 5363 } 5364 5365 /* 5366 * Need to get some non-anonymous pages. 5367 * We need to make only one call to GETPAGE to do 5368 * this to prevent certain deadlocking conditions 5369 * when we are doing locking. In this case 5370 * non_anon() should have picked up the smallest 5371 * range which includes all the non-anonymous 5372 * pages in the requested range. We have to 5373 * be careful regarding which rw flag to pass in 5374 * because on a private mapping, the underlying 5375 * object is never allowed to be written. 5376 */ 5377 if (rw == S_WRITE && svd->type == MAP_PRIVATE) { 5378 arw = S_READ; 5379 } else { 5380 arw = rw; 5381 } 5382 vp = svd->vp; 5383 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5384 "segvn_getpage:seg %p addr %p vp %p", 5385 seg, addr, vp); 5386 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len, 5387 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw, 5388 svd->cred, NULL); 5389 if (err) { 5390 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5391 segvn_pagelist_rele(plp); 5392 if (pl_alloc_sz) 5393 kmem_free(plp, pl_alloc_sz); 5394 return (FC_MAKE_ERR(err)); 5395 } 5396 if (svd->type == MAP_PRIVATE) 5397 vpprot &= ~PROT_WRITE; 5398 } 5399 } 5400 5401 /* 5402 * N.B. at this time the plp array has all the needed non-anon 5403 * pages in addition to (possibly) having some adjacent pages. 5404 */ 5405 5406 /* 5407 * Always acquire the anon_array_lock to prevent 5408 * 2 threads from allocating separate anon slots for 5409 * the same "addr". 5410 * 5411 * If this is a copy-on-write fault and we don't already 5412 * have the anon_array_lock, acquire it to prevent the 5413 * fault routine from handling multiple copy-on-write faults 5414 * on the same "addr" in the same address space. 5415 * 5416 * Only one thread should deal with the fault since after 5417 * it is handled, the other threads can acquire a translation 5418 * to the newly created private page. This prevents two or 5419 * more threads from creating different private pages for the 5420 * same fault. 5421 * 5422 * We grab "serialization" lock here if this is a MAP_PRIVATE segment 5423 * to prevent deadlock between this thread and another thread 5424 * which has soft-locked this page and wants to acquire serial_lock. 5425 * ( bug 4026339 ) 5426 * 5427 * The fix for bug 4026339 becomes unnecessary when using the 5428 * locking scheme with per amp rwlock and a global set of hash 5429 * lock, anon_array_lock. If we steal a vnode page when low 5430 * on memory and upgrad the page lock through page_rename, 5431 * then the page is PAGE_HANDLED, nothing needs to be done 5432 * for this page after returning from segvn_faultpage. 5433 * 5434 * But really, the page lock should be downgraded after 5435 * the stolen page is page_rename'd. 5436 */ 5437 5438 if (amp != NULL) 5439 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5440 5441 /* 5442 * Ok, now loop over the address range and handle faults 5443 */ 5444 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) { 5445 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot, 5446 type, rw, brkcow); 5447 if (err) { 5448 if (amp != NULL) 5449 ANON_LOCK_EXIT(&->a_rwlock); 5450 if (type == F_SOFTLOCK && a > addr) { 5451 segvn_softunlock(seg, addr, (a - addr), 5452 S_OTHER); 5453 } 5454 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5455 segvn_pagelist_rele(plp); 5456 if (pl_alloc_sz) 5457 kmem_free(plp, pl_alloc_sz); 5458 return (err); 5459 } 5460 if (vpage) { 5461 vpage++; 5462 } else if (svd->vpage) { 5463 page = seg_page(seg, addr); 5464 vpage = &svd->vpage[++page]; 5465 } 5466 } 5467 5468 /* Didn't get pages from the underlying fs so we're done */ 5469 if (!dogetpage) 5470 goto done; 5471 5472 /* 5473 * Now handle any other pages in the list returned. 5474 * If the page can be used, load up the translations now. 5475 * Note that the for loop will only be entered if "plp" 5476 * is pointing to a non-NULL page pointer which means that 5477 * VOP_GETPAGE() was called and vpprot has been initialized. 5478 */ 5479 if (svd->pageprot == 0) 5480 prot = svd->prot & vpprot; 5481 5482 5483 /* 5484 * Large Files: diff should be unsigned value because we started 5485 * supporting > 2GB segment sizes from 2.5.1 and when a 5486 * large file of size > 2GB gets mapped to address space 5487 * the diff value can be > 2GB. 5488 */ 5489 5490 for (ppp = plp; (pp = *ppp) != NULL; ppp++) { 5491 size_t diff; 5492 struct anon *ap; 5493 int anon_index; 5494 anon_sync_obj_t cookie; 5495 int hat_flag = HAT_LOAD_ADV; 5496 5497 if (svd->flags & MAP_TEXT) { 5498 hat_flag |= HAT_LOAD_TEXT; 5499 } 5500 5501 if (pp == PAGE_HANDLED) 5502 continue; 5503 5504 if (svd->tr_state != SEGVN_TR_ON && 5505 pp->p_offset >= svd->offset && 5506 pp->p_offset < svd->offset + seg->s_size) { 5507 5508 diff = pp->p_offset - svd->offset; 5509 5510 /* 5511 * Large Files: Following is the assertion 5512 * validating the above cast. 5513 */ 5514 ASSERT(svd->vp == pp->p_vnode); 5515 5516 page = btop(diff); 5517 if (svd->pageprot) 5518 prot = VPP_PROT(&svd->vpage[page]) & vpprot; 5519 5520 /* 5521 * Prevent other threads in the address space from 5522 * creating private pages (i.e., allocating anon slots) 5523 * while we are in the process of loading translations 5524 * to additional pages returned by the underlying 5525 * object. 5526 */ 5527 if (amp != NULL) { 5528 anon_index = svd->anon_index + page; 5529 anon_array_enter(amp, anon_index, &cookie); 5530 ap = anon_get_ptr(amp->ahp, anon_index); 5531 } 5532 if ((amp == NULL) || (ap == NULL)) { 5533 if (IS_VMODSORT(pp->p_vnode) || 5534 enable_mbit_wa) { 5535 if (rw == S_WRITE) 5536 hat_setmod(pp); 5537 else if (rw != S_OTHER && 5538 !hat_ismod(pp)) 5539 prot &= ~PROT_WRITE; 5540 } 5541 /* 5542 * Skip mapping read ahead pages marked 5543 * for migration, so they will get migrated 5544 * properly on fault 5545 */ 5546 ASSERT(amp == NULL || 5547 svd->rcookie == HAT_INVALID_REGION_COOKIE); 5548 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) { 5549 hat_memload_region(hat, 5550 seg->s_base + diff, 5551 pp, prot, hat_flag, 5552 svd->rcookie); 5553 } 5554 } 5555 if (amp != NULL) 5556 anon_array_exit(&cookie); 5557 } 5558 page_unlock(pp); 5559 } 5560 done: 5561 if (amp != NULL) 5562 ANON_LOCK_EXIT(&->a_rwlock); 5563 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5564 if (pl_alloc_sz) 5565 kmem_free(plp, pl_alloc_sz); 5566 return (0); 5567 } 5568 5569 /* 5570 * This routine is used to start I/O on pages asynchronously. XXX it will 5571 * only create PAGESIZE pages. At fault time they will be relocated into 5572 * larger pages. 5573 */ 5574 static faultcode_t 5575 segvn_faulta(struct seg *seg, caddr_t addr) 5576 { 5577 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5578 int err; 5579 struct anon_map *amp; 5580 vnode_t *vp; 5581 5582 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5583 5584 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 5585 if ((amp = svd->amp) != NULL) { 5586 struct anon *ap; 5587 5588 /* 5589 * Reader lock to prevent amp->ahp from being changed. 5590 * This is advisory, it's ok to miss a page, so 5591 * we don't do anon_array_enter lock. 5592 */ 5593 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5594 if ((ap = anon_get_ptr(amp->ahp, 5595 svd->anon_index + seg_page(seg, addr))) != NULL) { 5596 5597 err = anon_getpage(&ap, NULL, NULL, 5598 0, seg, addr, S_READ, svd->cred); 5599 5600 ANON_LOCK_EXIT(&->a_rwlock); 5601 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5602 if (err) 5603 return (FC_MAKE_ERR(err)); 5604 return (0); 5605 } 5606 ANON_LOCK_EXIT(&->a_rwlock); 5607 } 5608 5609 if (svd->vp == NULL) { 5610 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5611 return (0); /* zfod page - do nothing now */ 5612 } 5613 5614 vp = svd->vp; 5615 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5616 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp); 5617 err = VOP_GETPAGE(vp, 5618 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)), 5619 PAGESIZE, NULL, NULL, 0, seg, addr, 5620 S_OTHER, svd->cred, NULL); 5621 5622 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5623 if (err) 5624 return (FC_MAKE_ERR(err)); 5625 return (0); 5626 } 5627 5628 static int 5629 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 5630 { 5631 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5632 struct vpage *cvp, *svp, *evp; 5633 struct vnode *vp; 5634 size_t pgsz; 5635 pgcnt_t pgcnt; 5636 anon_sync_obj_t cookie; 5637 int unload_done = 0; 5638 5639 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5640 5641 if ((svd->maxprot & prot) != prot) 5642 return (EACCES); /* violated maxprot */ 5643 5644 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5645 5646 /* return if prot is the same */ 5647 if (!svd->pageprot && svd->prot == prot) { 5648 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5649 return (0); 5650 } 5651 5652 /* 5653 * Since we change protections we first have to flush the cache. 5654 * This makes sure all the pagelock calls have to recheck 5655 * protections. 5656 */ 5657 if (svd->softlockcnt > 0) { 5658 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5659 5660 /* 5661 * If this is shared segment non 0 softlockcnt 5662 * means locked pages are still in use. 5663 */ 5664 if (svd->type == MAP_SHARED) { 5665 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5666 return (EAGAIN); 5667 } 5668 5669 /* 5670 * Since we do have the segvn writers lock nobody can fill 5671 * the cache with entries belonging to this seg during 5672 * the purge. The flush either succeeds or we still have 5673 * pending I/Os. 5674 */ 5675 segvn_purge(seg); 5676 if (svd->softlockcnt > 0) { 5677 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5678 return (EAGAIN); 5679 } 5680 } 5681 5682 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5683 ASSERT(svd->amp == NULL); 5684 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5685 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5686 HAT_REGION_TEXT); 5687 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5688 unload_done = 1; 5689 } else if (svd->tr_state == SEGVN_TR_INIT) { 5690 svd->tr_state = SEGVN_TR_OFF; 5691 } else if (svd->tr_state == SEGVN_TR_ON) { 5692 ASSERT(svd->amp != NULL); 5693 segvn_textunrepl(seg, 0); 5694 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 5695 unload_done = 1; 5696 } 5697 5698 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED && 5699 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) { 5700 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 5701 segvn_inval_trcache(svd->vp); 5702 } 5703 if (seg->s_szc != 0) { 5704 int err; 5705 pgsz = page_get_pagesize(seg->s_szc); 5706 pgcnt = pgsz >> PAGESHIFT; 5707 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 5708 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 5709 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5710 ASSERT(seg->s_base != addr || seg->s_size != len); 5711 /* 5712 * If we are holding the as lock as a reader then 5713 * we need to return IE_RETRY and let the as 5714 * layer drop and re-acquire the lock as a writer. 5715 */ 5716 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) 5717 return (IE_RETRY); 5718 VM_STAT_ADD(segvnvmstats.demoterange[1]); 5719 if (svd->type == MAP_PRIVATE || svd->vp != NULL) { 5720 err = segvn_demote_range(seg, addr, len, 5721 SDR_END, 0); 5722 } else { 5723 uint_t szcvec = map_pgszcvec(seg->s_base, 5724 pgsz, (uintptr_t)seg->s_base, 5725 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0); 5726 err = segvn_demote_range(seg, addr, len, 5727 SDR_END, szcvec); 5728 } 5729 if (err == 0) 5730 return (IE_RETRY); 5731 if (err == ENOMEM) 5732 return (IE_NOMEM); 5733 return (err); 5734 } 5735 } 5736 5737 5738 /* 5739 * If it's a private mapping and we're making it writable then we 5740 * may have to reserve the additional swap space now. If we are 5741 * making writable only a part of the segment then we use its vpage 5742 * array to keep a record of the pages for which we have reserved 5743 * swap. In this case we set the pageswap field in the segment's 5744 * segvn structure to record this. 5745 * 5746 * If it's a private mapping to a file (i.e., vp != NULL) and we're 5747 * removing write permission on the entire segment and we haven't 5748 * modified any pages, we can release the swap space. 5749 */ 5750 if (svd->type == MAP_PRIVATE) { 5751 if (prot & PROT_WRITE) { 5752 if (!(svd->flags & MAP_NORESERVE) && 5753 !(svd->swresv && svd->pageswap == 0)) { 5754 size_t sz = 0; 5755 5756 /* 5757 * Start by determining how much swap 5758 * space is required. 5759 */ 5760 if (addr == seg->s_base && 5761 len == seg->s_size && 5762 svd->pageswap == 0) { 5763 /* The whole segment */ 5764 sz = seg->s_size; 5765 } else { 5766 /* 5767 * Make sure that the vpage array 5768 * exists, and make a note of the 5769 * range of elements corresponding 5770 * to len. 5771 */ 5772 segvn_vpage(seg); 5773 if (svd->vpage == NULL) { 5774 SEGVN_LOCK_EXIT(seg->s_as, 5775 &svd->lock); 5776 return (ENOMEM); 5777 } 5778 svp = &svd->vpage[seg_page(seg, addr)]; 5779 evp = &svd->vpage[seg_page(seg, 5780 addr + len)]; 5781 5782 if (svd->pageswap == 0) { 5783 /* 5784 * This is the first time we've 5785 * asked for a part of this 5786 * segment, so we need to 5787 * reserve everything we've 5788 * been asked for. 5789 */ 5790 sz = len; 5791 } else { 5792 /* 5793 * We have to count the number 5794 * of pages required. 5795 */ 5796 for (cvp = svp; cvp < evp; 5797 cvp++) { 5798 if (!VPP_ISSWAPRES(cvp)) 5799 sz++; 5800 } 5801 sz <<= PAGESHIFT; 5802 } 5803 } 5804 5805 /* Try to reserve the necessary swap. */ 5806 if (anon_resv_zone(sz, 5807 seg->s_as->a_proc->p_zone) == 0) { 5808 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5809 return (IE_NOMEM); 5810 } 5811 5812 /* 5813 * Make a note of how much swap space 5814 * we've reserved. 5815 */ 5816 if (svd->pageswap == 0 && sz == seg->s_size) { 5817 svd->swresv = sz; 5818 } else { 5819 ASSERT(svd->vpage != NULL); 5820 svd->swresv += sz; 5821 svd->pageswap = 1; 5822 for (cvp = svp; cvp < evp; cvp++) { 5823 if (!VPP_ISSWAPRES(cvp)) 5824 VPP_SETSWAPRES(cvp); 5825 } 5826 } 5827 } 5828 } else { 5829 /* 5830 * Swap space is released only if this segment 5831 * does not map anonymous memory, since read faults 5832 * on such segments still need an anon slot to read 5833 * in the data. 5834 */ 5835 if (svd->swresv != 0 && svd->vp != NULL && 5836 svd->amp == NULL && addr == seg->s_base && 5837 len == seg->s_size && svd->pageprot == 0) { 5838 ASSERT(svd->pageswap == 0); 5839 anon_unresv_zone(svd->swresv, 5840 seg->s_as->a_proc->p_zone); 5841 svd->swresv = 0; 5842 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 5843 "anon proc:%p %lu %u", seg, 0, 0); 5844 } 5845 } 5846 } 5847 5848 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) { 5849 if (svd->prot == prot) { 5850 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5851 return (0); /* all done */ 5852 } 5853 svd->prot = (uchar_t)prot; 5854 } else if (svd->type == MAP_PRIVATE) { 5855 struct anon *ap = NULL; 5856 page_t *pp; 5857 u_offset_t offset, off; 5858 struct anon_map *amp; 5859 ulong_t anon_idx = 0; 5860 5861 /* 5862 * A vpage structure exists or else the change does not 5863 * involve the entire segment. Establish a vpage structure 5864 * if none is there. Then, for each page in the range, 5865 * adjust its individual permissions. Note that write- 5866 * enabling a MAP_PRIVATE page can affect the claims for 5867 * locked down memory. Overcommitting memory terminates 5868 * the operation. 5869 */ 5870 segvn_vpage(seg); 5871 if (svd->vpage == NULL) { 5872 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5873 return (ENOMEM); 5874 } 5875 svd->pageprot = 1; 5876 if ((amp = svd->amp) != NULL) { 5877 anon_idx = svd->anon_index + seg_page(seg, addr); 5878 ASSERT(seg->s_szc == 0 || 5879 IS_P2ALIGNED(anon_idx, pgcnt)); 5880 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5881 } 5882 5883 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 5884 evp = &svd->vpage[seg_page(seg, addr + len)]; 5885 5886 /* 5887 * See Statement at the beginning of segvn_lockop regarding 5888 * the way cowcnts and lckcnts are handled. 5889 */ 5890 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 5891 5892 if (seg->s_szc != 0) { 5893 if (amp != NULL) { 5894 anon_array_enter(amp, anon_idx, 5895 &cookie); 5896 } 5897 if (IS_P2ALIGNED(anon_idx, pgcnt) && 5898 !segvn_claim_pages(seg, svp, offset, 5899 anon_idx, prot)) { 5900 if (amp != NULL) { 5901 anon_array_exit(&cookie); 5902 } 5903 break; 5904 } 5905 if (amp != NULL) { 5906 anon_array_exit(&cookie); 5907 } 5908 anon_idx++; 5909 } else { 5910 if (amp != NULL) { 5911 anon_array_enter(amp, anon_idx, 5912 &cookie); 5913 ap = anon_get_ptr(amp->ahp, anon_idx++); 5914 } 5915 5916 if (VPP_ISPPLOCK(svp) && 5917 VPP_PROT(svp) != prot) { 5918 5919 if (amp == NULL || ap == NULL) { 5920 vp = svd->vp; 5921 off = offset; 5922 } else 5923 swap_xlate(ap, &vp, &off); 5924 if (amp != NULL) 5925 anon_array_exit(&cookie); 5926 5927 if ((pp = page_lookup(vp, off, 5928 SE_SHARED)) == NULL) { 5929 panic("segvn_setprot: no page"); 5930 /*NOTREACHED*/ 5931 } 5932 ASSERT(seg->s_szc == 0); 5933 if ((VPP_PROT(svp) ^ prot) & 5934 PROT_WRITE) { 5935 if (prot & PROT_WRITE) { 5936 if (!page_addclaim( 5937 pp)) { 5938 page_unlock(pp); 5939 break; 5940 } 5941 } else { 5942 if (!page_subclaim( 5943 pp)) { 5944 page_unlock(pp); 5945 break; 5946 } 5947 } 5948 } 5949 page_unlock(pp); 5950 } else if (amp != NULL) 5951 anon_array_exit(&cookie); 5952 } 5953 VPP_SETPROT(svp, prot); 5954 offset += PAGESIZE; 5955 } 5956 if (amp != NULL) 5957 ANON_LOCK_EXIT(&->a_rwlock); 5958 5959 /* 5960 * Did we terminate prematurely? If so, simply unload 5961 * the translations to the things we've updated so far. 5962 */ 5963 if (svp != evp) { 5964 if (unload_done) { 5965 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5966 return (IE_NOMEM); 5967 } 5968 len = (svp - &svd->vpage[seg_page(seg, addr)]) * 5969 PAGESIZE; 5970 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz)); 5971 if (len != 0) 5972 hat_unload(seg->s_as->a_hat, addr, 5973 len, HAT_UNLOAD); 5974 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5975 return (IE_NOMEM); 5976 } 5977 } else { 5978 segvn_vpage(seg); 5979 if (svd->vpage == NULL) { 5980 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5981 return (ENOMEM); 5982 } 5983 svd->pageprot = 1; 5984 evp = &svd->vpage[seg_page(seg, addr + len)]; 5985 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 5986 VPP_SETPROT(svp, prot); 5987 } 5988 } 5989 5990 if (unload_done) { 5991 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5992 return (0); 5993 } 5994 5995 if (((prot & PROT_WRITE) != 0 && 5996 (svd->vp != NULL || svd->type == MAP_PRIVATE)) || 5997 (prot & ~PROT_USER) == PROT_NONE) { 5998 /* 5999 * Either private or shared data with write access (in 6000 * which case we need to throw out all former translations 6001 * so that we get the right translations set up on fault 6002 * and we don't allow write access to any copy-on-write pages 6003 * that might be around or to prevent write access to pages 6004 * representing holes in a file), or we don't have permission 6005 * to access the memory at all (in which case we have to 6006 * unload any current translations that might exist). 6007 */ 6008 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 6009 } else { 6010 /* 6011 * A shared mapping or a private mapping in which write 6012 * protection is going to be denied - just change all the 6013 * protections over the range of addresses in question. 6014 * segvn does not support any other attributes other 6015 * than prot so we can use hat_chgattr. 6016 */ 6017 hat_chgattr(seg->s_as->a_hat, addr, len, prot); 6018 } 6019 6020 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6021 6022 return (0); 6023 } 6024 6025 /* 6026 * segvn_setpagesize is called via SEGOP_SETPAGESIZE from as_setpagesize, 6027 * to determine if the seg is capable of mapping the requested szc. 6028 */ 6029 static int 6030 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 6031 { 6032 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6033 struct segvn_data *nsvd; 6034 struct anon_map *amp = svd->amp; 6035 struct seg *nseg; 6036 caddr_t eaddr = addr + len, a; 6037 size_t pgsz = page_get_pagesize(szc); 6038 pgcnt_t pgcnt = page_get_pagecnt(szc); 6039 int err; 6040 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base); 6041 6042 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6043 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6044 6045 if (seg->s_szc == szc || segvn_lpg_disable != 0) { 6046 return (0); 6047 } 6048 6049 /* 6050 * addr should always be pgsz aligned but eaddr may be misaligned if 6051 * it's at the end of the segment. 6052 * 6053 * XXX we should assert this condition since as_setpagesize() logic 6054 * guarantees it. 6055 */ 6056 if (!IS_P2ALIGNED(addr, pgsz) || 6057 (!IS_P2ALIGNED(eaddr, pgsz) && 6058 eaddr != seg->s_base + seg->s_size)) { 6059 6060 segvn_setpgsz_align_err++; 6061 return (EINVAL); 6062 } 6063 6064 if (amp != NULL && svd->type == MAP_SHARED) { 6065 ulong_t an_idx = svd->anon_index + seg_page(seg, addr); 6066 if (!IS_P2ALIGNED(an_idx, pgcnt)) { 6067 6068 segvn_setpgsz_anon_align_err++; 6069 return (EINVAL); 6070 } 6071 } 6072 6073 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas || 6074 szc > segvn_maxpgszc) { 6075 return (EINVAL); 6076 } 6077 6078 /* paranoid check */ 6079 if (svd->vp != NULL && 6080 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) { 6081 return (EINVAL); 6082 } 6083 6084 if (seg->s_szc == 0 && svd->vp != NULL && 6085 map_addr_vacalign_check(addr, off)) { 6086 return (EINVAL); 6087 } 6088 6089 /* 6090 * Check that protections are the same within new page 6091 * size boundaries. 6092 */ 6093 if (svd->pageprot) { 6094 for (a = addr; a < eaddr; a += pgsz) { 6095 if ((a + pgsz) > eaddr) { 6096 if (!sameprot(seg, a, eaddr - a)) { 6097 return (EINVAL); 6098 } 6099 } else { 6100 if (!sameprot(seg, a, pgsz)) { 6101 return (EINVAL); 6102 } 6103 } 6104 } 6105 } 6106 6107 /* 6108 * Since we are changing page size we first have to flush 6109 * the cache. This makes sure all the pagelock calls have 6110 * to recheck protections. 6111 */ 6112 if (svd->softlockcnt > 0) { 6113 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6114 6115 /* 6116 * If this is shared segment non 0 softlockcnt 6117 * means locked pages are still in use. 6118 */ 6119 if (svd->type == MAP_SHARED) { 6120 return (EAGAIN); 6121 } 6122 6123 /* 6124 * Since we do have the segvn writers lock nobody can fill 6125 * the cache with entries belonging to this seg during 6126 * the purge. The flush either succeeds or we still have 6127 * pending I/Os. 6128 */ 6129 segvn_purge(seg); 6130 if (svd->softlockcnt > 0) { 6131 return (EAGAIN); 6132 } 6133 } 6134 6135 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6136 ASSERT(svd->amp == NULL); 6137 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6138 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6139 HAT_REGION_TEXT); 6140 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6141 } else if (svd->tr_state == SEGVN_TR_INIT) { 6142 svd->tr_state = SEGVN_TR_OFF; 6143 } else if (svd->tr_state == SEGVN_TR_ON) { 6144 ASSERT(svd->amp != NULL); 6145 segvn_textunrepl(seg, 1); 6146 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6147 amp = NULL; 6148 } 6149 6150 /* 6151 * Operation for sub range of existing segment. 6152 */ 6153 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) { 6154 if (szc < seg->s_szc) { 6155 VM_STAT_ADD(segvnvmstats.demoterange[2]); 6156 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0); 6157 if (err == 0) { 6158 return (IE_RETRY); 6159 } 6160 if (err == ENOMEM) { 6161 return (IE_NOMEM); 6162 } 6163 return (err); 6164 } 6165 if (addr != seg->s_base) { 6166 nseg = segvn_split_seg(seg, addr); 6167 if (eaddr != (nseg->s_base + nseg->s_size)) { 6168 /* eaddr is szc aligned */ 6169 (void) segvn_split_seg(nseg, eaddr); 6170 } 6171 return (IE_RETRY); 6172 } 6173 if (eaddr != (seg->s_base + seg->s_size)) { 6174 /* eaddr is szc aligned */ 6175 (void) segvn_split_seg(seg, eaddr); 6176 } 6177 return (IE_RETRY); 6178 } 6179 6180 /* 6181 * Break any low level sharing and reset seg->s_szc to 0. 6182 */ 6183 if ((err = segvn_clrszc(seg)) != 0) { 6184 if (err == ENOMEM) { 6185 err = IE_NOMEM; 6186 } 6187 return (err); 6188 } 6189 ASSERT(seg->s_szc == 0); 6190 6191 /* 6192 * If the end of the current segment is not pgsz aligned 6193 * then attempt to concatenate with the next segment. 6194 */ 6195 if (!IS_P2ALIGNED(eaddr, pgsz)) { 6196 nseg = AS_SEGNEXT(seg->s_as, seg); 6197 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) { 6198 return (ENOMEM); 6199 } 6200 if (nseg->s_ops != &segvn_ops) { 6201 return (EINVAL); 6202 } 6203 nsvd = (struct segvn_data *)nseg->s_data; 6204 if (nsvd->softlockcnt > 0) { 6205 /* 6206 * If this is shared segment non 0 softlockcnt 6207 * means locked pages are still in use. 6208 */ 6209 if (nsvd->type == MAP_SHARED) { 6210 return (EAGAIN); 6211 } 6212 segvn_purge(nseg); 6213 if (nsvd->softlockcnt > 0) { 6214 return (EAGAIN); 6215 } 6216 } 6217 err = segvn_clrszc(nseg); 6218 if (err == ENOMEM) { 6219 err = IE_NOMEM; 6220 } 6221 if (err != 0) { 6222 return (err); 6223 } 6224 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6225 err = segvn_concat(seg, nseg, 1); 6226 if (err == -1) { 6227 return (EINVAL); 6228 } 6229 if (err == -2) { 6230 return (IE_NOMEM); 6231 } 6232 return (IE_RETRY); 6233 } 6234 6235 /* 6236 * May need to re-align anon array to 6237 * new szc. 6238 */ 6239 if (amp != NULL) { 6240 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) { 6241 struct anon_hdr *nahp; 6242 6243 ASSERT(svd->type == MAP_PRIVATE); 6244 6245 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6246 ASSERT(amp->refcnt == 1); 6247 nahp = anon_create(btop(amp->size), ANON_NOSLEEP); 6248 if (nahp == NULL) { 6249 ANON_LOCK_EXIT(&->a_rwlock); 6250 return (IE_NOMEM); 6251 } 6252 if (anon_copy_ptr(amp->ahp, svd->anon_index, 6253 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) { 6254 anon_release(nahp, btop(amp->size)); 6255 ANON_LOCK_EXIT(&->a_rwlock); 6256 return (IE_NOMEM); 6257 } 6258 anon_release(amp->ahp, btop(amp->size)); 6259 amp->ahp = nahp; 6260 svd->anon_index = 0; 6261 ANON_LOCK_EXIT(&->a_rwlock); 6262 } 6263 } 6264 if (svd->vp != NULL && szc != 0) { 6265 struct vattr va; 6266 u_offset_t eoffpage = svd->offset; 6267 va.va_mask = AT_SIZE; 6268 eoffpage += seg->s_size; 6269 eoffpage = btopr(eoffpage); 6270 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) { 6271 segvn_setpgsz_getattr_err++; 6272 return (EINVAL); 6273 } 6274 if (btopr(va.va_size) < eoffpage) { 6275 segvn_setpgsz_eof_err++; 6276 return (EINVAL); 6277 } 6278 if (amp != NULL) { 6279 /* 6280 * anon_fill_cow_holes() may call VOP_GETPAGE(). 6281 * don't take anon map lock here to avoid holding it 6282 * across VOP_GETPAGE() calls that may call back into 6283 * segvn for klsutering checks. We don't really need 6284 * anon map lock here since it's a private segment and 6285 * we hold as level lock as writers. 6286 */ 6287 if ((err = anon_fill_cow_holes(seg, seg->s_base, 6288 amp->ahp, svd->anon_index, svd->vp, svd->offset, 6289 seg->s_size, szc, svd->prot, svd->vpage, 6290 svd->cred)) != 0) { 6291 return (EINVAL); 6292 } 6293 } 6294 segvn_setvnode_mpss(svd->vp); 6295 } 6296 6297 if (amp != NULL) { 6298 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6299 if (svd->type == MAP_PRIVATE) { 6300 amp->a_szc = szc; 6301 } else if (szc > amp->a_szc) { 6302 amp->a_szc = szc; 6303 } 6304 ANON_LOCK_EXIT(&->a_rwlock); 6305 } 6306 6307 seg->s_szc = szc; 6308 6309 return (0); 6310 } 6311 6312 static int 6313 segvn_clrszc(struct seg *seg) 6314 { 6315 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6316 struct anon_map *amp = svd->amp; 6317 size_t pgsz; 6318 pgcnt_t pages; 6319 int err = 0; 6320 caddr_t a = seg->s_base; 6321 caddr_t ea = a + seg->s_size; 6322 ulong_t an_idx = svd->anon_index; 6323 vnode_t *vp = svd->vp; 6324 struct vpage *vpage = svd->vpage; 6325 page_t *anon_pl[1 + 1], *pp; 6326 struct anon *ap, *oldap; 6327 uint_t prot = svd->prot, vpprot; 6328 int pageflag = 0; 6329 6330 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6331 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 6332 ASSERT(svd->softlockcnt == 0); 6333 6334 if (vp == NULL && amp == NULL) { 6335 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6336 seg->s_szc = 0; 6337 return (0); 6338 } 6339 6340 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6341 ASSERT(svd->amp == NULL); 6342 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6343 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6344 HAT_REGION_TEXT); 6345 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6346 } else if (svd->tr_state == SEGVN_TR_ON) { 6347 ASSERT(svd->amp != NULL); 6348 segvn_textunrepl(seg, 1); 6349 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6350 amp = NULL; 6351 } else { 6352 if (svd->tr_state != SEGVN_TR_OFF) { 6353 ASSERT(svd->tr_state == SEGVN_TR_INIT); 6354 svd->tr_state = SEGVN_TR_OFF; 6355 } 6356 6357 /* 6358 * do HAT_UNLOAD_UNMAP since we are changing the pagesize. 6359 * unload argument is 0 when we are freeing the segment 6360 * and unload was already done. 6361 */ 6362 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size, 6363 HAT_UNLOAD_UNMAP); 6364 } 6365 6366 if (amp == NULL || svd->type == MAP_SHARED) { 6367 seg->s_szc = 0; 6368 return (0); 6369 } 6370 6371 pgsz = page_get_pagesize(seg->s_szc); 6372 pages = btop(pgsz); 6373 6374 /* 6375 * XXX anon rwlock is not really needed because this is a 6376 * private segment and we are writers. 6377 */ 6378 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6379 6380 for (; a < ea; a += pgsz, an_idx += pages) { 6381 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) { 6382 ASSERT(vpage != NULL || svd->pageprot == 0); 6383 if (vpage != NULL) { 6384 ASSERT(sameprot(seg, a, pgsz)); 6385 prot = VPP_PROT(vpage); 6386 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0; 6387 } 6388 if (seg->s_szc != 0) { 6389 ASSERT(vp == NULL || anon_pages(amp->ahp, 6390 an_idx, pages) == pages); 6391 if ((err = anon_map_demotepages(amp, an_idx, 6392 seg, a, prot, vpage, svd->cred)) != 0) { 6393 goto out; 6394 } 6395 } else { 6396 if (oldap->an_refcnt == 1) { 6397 continue; 6398 } 6399 if ((err = anon_getpage(&oldap, &vpprot, 6400 anon_pl, PAGESIZE, seg, a, S_READ, 6401 svd->cred))) { 6402 goto out; 6403 } 6404 if ((pp = anon_private(&ap, seg, a, prot, 6405 anon_pl[0], pageflag, svd->cred)) == NULL) { 6406 err = ENOMEM; 6407 goto out; 6408 } 6409 anon_decref(oldap); 6410 (void) anon_set_ptr(amp->ahp, an_idx, ap, 6411 ANON_SLEEP); 6412 page_unlock(pp); 6413 } 6414 } 6415 vpage = (vpage == NULL) ? NULL : vpage + pages; 6416 } 6417 6418 amp->a_szc = 0; 6419 seg->s_szc = 0; 6420 out: 6421 ANON_LOCK_EXIT(&->a_rwlock); 6422 return (err); 6423 } 6424 6425 static int 6426 segvn_claim_pages( 6427 struct seg *seg, 6428 struct vpage *svp, 6429 u_offset_t off, 6430 ulong_t anon_idx, 6431 uint_t prot) 6432 { 6433 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6434 size_t ppasize = (pgcnt + 1) * sizeof (page_t *); 6435 page_t **ppa; 6436 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6437 struct anon_map *amp = svd->amp; 6438 struct vpage *evp = svp + pgcnt; 6439 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT) 6440 + seg->s_base; 6441 struct anon *ap; 6442 struct vnode *vp = svd->vp; 6443 page_t *pp; 6444 pgcnt_t pg_idx, i; 6445 int err = 0; 6446 anoff_t aoff; 6447 int anon = (amp != NULL) ? 1 : 0; 6448 6449 ASSERT(svd->type == MAP_PRIVATE); 6450 ASSERT(svd->vpage != NULL); 6451 ASSERT(seg->s_szc != 0); 6452 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 6453 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt)); 6454 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT)); 6455 6456 if (VPP_PROT(svp) == prot) 6457 return (1); 6458 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE)) 6459 return (1); 6460 6461 ppa = kmem_alloc(ppasize, KM_SLEEP); 6462 if (anon && vp != NULL) { 6463 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) { 6464 anon = 0; 6465 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt)); 6466 } 6467 ASSERT(!anon || 6468 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt); 6469 } 6470 6471 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) { 6472 if (!VPP_ISPPLOCK(svp)) 6473 continue; 6474 if (anon) { 6475 ap = anon_get_ptr(amp->ahp, anon_idx); 6476 if (ap == NULL) { 6477 panic("segvn_claim_pages: no anon slot"); 6478 } 6479 swap_xlate(ap, &vp, &aoff); 6480 off = (u_offset_t)aoff; 6481 } 6482 ASSERT(vp != NULL); 6483 if ((pp = page_lookup(vp, 6484 (u_offset_t)off, SE_SHARED)) == NULL) { 6485 panic("segvn_claim_pages: no page"); 6486 } 6487 ppa[pg_idx++] = pp; 6488 off += PAGESIZE; 6489 } 6490 6491 if (ppa[0] == NULL) { 6492 kmem_free(ppa, ppasize); 6493 return (1); 6494 } 6495 6496 ASSERT(pg_idx <= pgcnt); 6497 ppa[pg_idx] = NULL; 6498 6499 6500 /* Find each large page within ppa, and adjust its claim */ 6501 6502 /* Does ppa cover a single large page? */ 6503 if (ppa[0]->p_szc == seg->s_szc) { 6504 if (prot & PROT_WRITE) 6505 err = page_addclaim_pages(ppa); 6506 else 6507 err = page_subclaim_pages(ppa); 6508 } else { 6509 for (i = 0; ppa[i]; i += pgcnt) { 6510 ASSERT(IS_P2ALIGNED(page_pptonum(ppa[i]), pgcnt)); 6511 if (prot & PROT_WRITE) 6512 err = page_addclaim_pages(&ppa[i]); 6513 else 6514 err = page_subclaim_pages(&ppa[i]); 6515 if (err == 0) 6516 break; 6517 } 6518 } 6519 6520 for (i = 0; i < pg_idx; i++) { 6521 ASSERT(ppa[i] != NULL); 6522 page_unlock(ppa[i]); 6523 } 6524 6525 kmem_free(ppa, ppasize); 6526 return (err); 6527 } 6528 6529 /* 6530 * Returns right (upper address) segment if split occurred. 6531 * If the address is equal to the beginning or end of its segment it returns 6532 * the current segment. 6533 */ 6534 static struct seg * 6535 segvn_split_seg(struct seg *seg, caddr_t addr) 6536 { 6537 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6538 struct seg *nseg; 6539 size_t nsize; 6540 struct segvn_data *nsvd; 6541 6542 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6543 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6544 6545 ASSERT(addr >= seg->s_base); 6546 ASSERT(addr <= seg->s_base + seg->s_size); 6547 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6548 6549 if (addr == seg->s_base || addr == seg->s_base + seg->s_size) 6550 return (seg); 6551 6552 nsize = seg->s_base + seg->s_size - addr; 6553 seg->s_size = addr - seg->s_base; 6554 nseg = seg_alloc(seg->s_as, addr, nsize); 6555 ASSERT(nseg != NULL); 6556 nseg->s_ops = seg->s_ops; 6557 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 6558 nseg->s_data = (void *)nsvd; 6559 nseg->s_szc = seg->s_szc; 6560 *nsvd = *svd; 6561 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6562 nsvd->seg = nseg; 6563 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL); 6564 6565 if (nsvd->vp != NULL) { 6566 VN_HOLD(nsvd->vp); 6567 nsvd->offset = svd->offset + 6568 (uintptr_t)(nseg->s_base - seg->s_base); 6569 if (nsvd->type == MAP_SHARED) 6570 lgrp_shm_policy_init(NULL, nsvd->vp); 6571 } else { 6572 /* 6573 * The offset for an anonymous segment has no signifigance in 6574 * terms of an offset into a file. If we were to use the above 6575 * calculation instead, the structures read out of 6576 * /proc/<pid>/xmap would be more difficult to decipher since 6577 * it would be unclear whether two seemingly contiguous 6578 * prxmap_t structures represented different segments or a 6579 * single segment that had been split up into multiple prxmap_t 6580 * structures (e.g. if some part of the segment had not yet 6581 * been faulted in). 6582 */ 6583 nsvd->offset = 0; 6584 } 6585 6586 ASSERT(svd->softlockcnt == 0); 6587 ASSERT(svd->softlockcnt_sbase == 0); 6588 ASSERT(svd->softlockcnt_send == 0); 6589 crhold(svd->cred); 6590 6591 if (svd->vpage != NULL) { 6592 size_t bytes = vpgtob(seg_pages(seg)); 6593 size_t nbytes = vpgtob(seg_pages(nseg)); 6594 struct vpage *ovpage = svd->vpage; 6595 6596 svd->vpage = kmem_alloc(bytes, KM_SLEEP); 6597 bcopy(ovpage, svd->vpage, bytes); 6598 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 6599 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes); 6600 kmem_free(ovpage, bytes + nbytes); 6601 } 6602 if (svd->amp != NULL && svd->type == MAP_PRIVATE) { 6603 struct anon_map *oamp = svd->amp, *namp; 6604 struct anon_hdr *nahp; 6605 6606 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER); 6607 ASSERT(oamp->refcnt == 1); 6608 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 6609 (void) anon_copy_ptr(oamp->ahp, svd->anon_index, 6610 nahp, 0, btop(seg->s_size), ANON_SLEEP); 6611 6612 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 6613 namp->a_szc = nseg->s_szc; 6614 (void) anon_copy_ptr(oamp->ahp, 6615 svd->anon_index + btop(seg->s_size), 6616 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 6617 anon_release(oamp->ahp, btop(oamp->size)); 6618 oamp->ahp = nahp; 6619 oamp->size = seg->s_size; 6620 svd->anon_index = 0; 6621 nsvd->amp = namp; 6622 nsvd->anon_index = 0; 6623 ANON_LOCK_EXIT(&oamp->a_rwlock); 6624 } else if (svd->amp != NULL) { 6625 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6626 ASSERT(svd->amp == nsvd->amp); 6627 ASSERT(seg->s_szc <= svd->amp->a_szc); 6628 nsvd->anon_index = svd->anon_index + seg_pages(seg); 6629 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt)); 6630 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER); 6631 svd->amp->refcnt++; 6632 ANON_LOCK_EXIT(&svd->amp->a_rwlock); 6633 } 6634 6635 /* 6636 * Split the amount of swap reserved. 6637 */ 6638 if (svd->swresv) { 6639 /* 6640 * For MAP_NORESERVE, only allocate swap reserve for pages 6641 * being used. Other segments get enough to cover whole 6642 * segment. 6643 */ 6644 if (svd->flags & MAP_NORESERVE) { 6645 size_t oswresv; 6646 6647 ASSERT(svd->amp); 6648 oswresv = svd->swresv; 6649 svd->swresv = ptob(anon_pages(svd->amp->ahp, 6650 svd->anon_index, btop(seg->s_size))); 6651 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 6652 nsvd->anon_index, btop(nseg->s_size))); 6653 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 6654 } else { 6655 if (svd->pageswap) { 6656 svd->swresv = segvn_count_swap_by_vpages(seg); 6657 ASSERT(nsvd->swresv >= svd->swresv); 6658 nsvd->swresv -= svd->swresv; 6659 } else { 6660 ASSERT(svd->swresv == seg->s_size + 6661 nseg->s_size); 6662 svd->swresv = seg->s_size; 6663 nsvd->swresv = nseg->s_size; 6664 } 6665 } 6666 } 6667 6668 return (nseg); 6669 } 6670 6671 /* 6672 * called on memory operations (unmap, setprot, setpagesize) for a subset 6673 * of a large page segment to either demote the memory range (SDR_RANGE) 6674 * or the ends (SDR_END) by addr/len. 6675 * 6676 * returns 0 on success. returns errno, including ENOMEM, on failure. 6677 */ 6678 static int 6679 segvn_demote_range( 6680 struct seg *seg, 6681 caddr_t addr, 6682 size_t len, 6683 int flag, 6684 uint_t szcvec) 6685 { 6686 caddr_t eaddr = addr + len; 6687 caddr_t lpgaddr, lpgeaddr; 6688 struct seg *nseg; 6689 struct seg *badseg1 = NULL; 6690 struct seg *badseg2 = NULL; 6691 size_t pgsz; 6692 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6693 int err; 6694 uint_t szc = seg->s_szc; 6695 uint_t tszcvec; 6696 6697 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6698 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6699 ASSERT(szc != 0); 6700 pgsz = page_get_pagesize(szc); 6701 ASSERT(seg->s_base != addr || seg->s_size != len); 6702 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6703 ASSERT(svd->softlockcnt == 0); 6704 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6705 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED)); 6706 6707 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 6708 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr); 6709 if (flag == SDR_RANGE) { 6710 /* demote entire range */ 6711 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6712 (void) segvn_split_seg(nseg, lpgeaddr); 6713 ASSERT(badseg1->s_base == lpgaddr); 6714 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr); 6715 } else if (addr != lpgaddr) { 6716 ASSERT(flag == SDR_END); 6717 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6718 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz && 6719 eaddr < lpgaddr + 2 * pgsz) { 6720 (void) segvn_split_seg(nseg, lpgeaddr); 6721 ASSERT(badseg1->s_base == lpgaddr); 6722 ASSERT(badseg1->s_size == 2 * pgsz); 6723 } else { 6724 nseg = segvn_split_seg(nseg, lpgaddr + pgsz); 6725 ASSERT(badseg1->s_base == lpgaddr); 6726 ASSERT(badseg1->s_size == pgsz); 6727 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) { 6728 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz); 6729 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz); 6730 badseg2 = nseg; 6731 (void) segvn_split_seg(nseg, lpgeaddr); 6732 ASSERT(badseg2->s_base == lpgeaddr - pgsz); 6733 ASSERT(badseg2->s_size == pgsz); 6734 } 6735 } 6736 } else { 6737 ASSERT(flag == SDR_END); 6738 ASSERT(eaddr < lpgeaddr); 6739 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz); 6740 (void) segvn_split_seg(nseg, lpgeaddr); 6741 ASSERT(badseg1->s_base == lpgeaddr - pgsz); 6742 ASSERT(badseg1->s_size == pgsz); 6743 } 6744 6745 ASSERT(badseg1 != NULL); 6746 ASSERT(badseg1->s_szc == szc); 6747 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz || 6748 badseg1->s_size == 2 * pgsz); 6749 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz)); 6750 ASSERT(badseg1->s_size == pgsz || 6751 sameprot(badseg1, badseg1->s_base + pgsz, pgsz)); 6752 if (err = segvn_clrszc(badseg1)) { 6753 return (err); 6754 } 6755 ASSERT(badseg1->s_szc == 0); 6756 6757 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6758 uint_t tszc = highbit(tszcvec) - 1; 6759 caddr_t ta = MAX(addr, badseg1->s_base); 6760 caddr_t te; 6761 size_t tpgsz = page_get_pagesize(tszc); 6762 6763 ASSERT(svd->type == MAP_SHARED); 6764 ASSERT(flag == SDR_END); 6765 ASSERT(tszc < szc && tszc > 0); 6766 6767 if (eaddr > badseg1->s_base + badseg1->s_size) { 6768 te = badseg1->s_base + badseg1->s_size; 6769 } else { 6770 te = eaddr; 6771 } 6772 6773 ASSERT(ta <= te); 6774 badseg1->s_szc = tszc; 6775 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) { 6776 if (badseg2 != NULL) { 6777 err = segvn_demote_range(badseg1, ta, te - ta, 6778 SDR_END, tszcvec); 6779 if (err != 0) { 6780 return (err); 6781 } 6782 } else { 6783 return (segvn_demote_range(badseg1, ta, 6784 te - ta, SDR_END, tszcvec)); 6785 } 6786 } 6787 } 6788 6789 if (badseg2 == NULL) 6790 return (0); 6791 ASSERT(badseg2->s_szc == szc); 6792 ASSERT(badseg2->s_size == pgsz); 6793 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size)); 6794 if (err = segvn_clrszc(badseg2)) { 6795 return (err); 6796 } 6797 ASSERT(badseg2->s_szc == 0); 6798 6799 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6800 uint_t tszc = highbit(tszcvec) - 1; 6801 size_t tpgsz = page_get_pagesize(tszc); 6802 6803 ASSERT(svd->type == MAP_SHARED); 6804 ASSERT(flag == SDR_END); 6805 ASSERT(tszc < szc && tszc > 0); 6806 ASSERT(badseg2->s_base > addr); 6807 ASSERT(eaddr > badseg2->s_base); 6808 ASSERT(eaddr < badseg2->s_base + badseg2->s_size); 6809 6810 badseg2->s_szc = tszc; 6811 if (!IS_P2ALIGNED(eaddr, tpgsz)) { 6812 return (segvn_demote_range(badseg2, badseg2->s_base, 6813 eaddr - badseg2->s_base, SDR_END, tszcvec)); 6814 } 6815 } 6816 6817 return (0); 6818 } 6819 6820 static int 6821 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 6822 { 6823 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6824 struct vpage *vp, *evp; 6825 6826 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6827 6828 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6829 /* 6830 * If segment protection can be used, simply check against them. 6831 */ 6832 if (svd->pageprot == 0) { 6833 int err; 6834 6835 err = ((svd->prot & prot) != prot) ? EACCES : 0; 6836 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6837 return (err); 6838 } 6839 6840 /* 6841 * Have to check down to the vpage level. 6842 */ 6843 evp = &svd->vpage[seg_page(seg, addr + len)]; 6844 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) { 6845 if ((VPP_PROT(vp) & prot) != prot) { 6846 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6847 return (EACCES); 6848 } 6849 } 6850 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6851 return (0); 6852 } 6853 6854 static int 6855 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 6856 { 6857 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6858 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1; 6859 6860 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6861 6862 if (pgno != 0) { 6863 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6864 if (svd->pageprot == 0) { 6865 do { 6866 protv[--pgno] = svd->prot; 6867 } while (pgno != 0); 6868 } else { 6869 size_t pgoff = seg_page(seg, addr); 6870 6871 do { 6872 pgno--; 6873 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]); 6874 } while (pgno != 0); 6875 } 6876 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6877 } 6878 return (0); 6879 } 6880 6881 static u_offset_t 6882 segvn_getoffset(struct seg *seg, caddr_t addr) 6883 { 6884 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6885 6886 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6887 6888 return (svd->offset + (uintptr_t)(addr - seg->s_base)); 6889 } 6890 6891 /*ARGSUSED*/ 6892 static int 6893 segvn_gettype(struct seg *seg, caddr_t addr) 6894 { 6895 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6896 6897 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6898 6899 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT | 6900 MAP_INITDATA))); 6901 } 6902 6903 /*ARGSUSED*/ 6904 static int 6905 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 6906 { 6907 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6908 6909 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6910 6911 *vpp = svd->vp; 6912 return (0); 6913 } 6914 6915 /* 6916 * Check to see if it makes sense to do kluster/read ahead to 6917 * addr + delta relative to the mapping at addr. We assume here 6918 * that delta is a signed PAGESIZE'd multiple (which can be negative). 6919 * 6920 * For segvn, we currently "approve" of the action if we are 6921 * still in the segment and it maps from the same vp/off, 6922 * or if the advice stored in segvn_data or vpages allows it. 6923 * Currently, klustering is not allowed only if MADV_RANDOM is set. 6924 */ 6925 static int 6926 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 6927 { 6928 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6929 struct anon *oap, *ap; 6930 ssize_t pd; 6931 size_t page; 6932 struct vnode *vp1, *vp2; 6933 u_offset_t off1, off2; 6934 struct anon_map *amp; 6935 6936 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6937 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6938 SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 6939 6940 if (addr + delta < seg->s_base || 6941 addr + delta >= (seg->s_base + seg->s_size)) 6942 return (-1); /* exceeded segment bounds */ 6943 6944 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */ 6945 page = seg_page(seg, addr); 6946 6947 /* 6948 * Check to see if either of the pages addr or addr + delta 6949 * have advice set that prevents klustering (if MADV_RANDOM advice 6950 * is set for entire segment, or MADV_SEQUENTIAL is set and delta 6951 * is negative). 6952 */ 6953 if (svd->advice == MADV_RANDOM || 6954 svd->advice == MADV_SEQUENTIAL && delta < 0) 6955 return (-1); 6956 else if (svd->pageadvice && svd->vpage) { 6957 struct vpage *bvpp, *evpp; 6958 6959 bvpp = &svd->vpage[page]; 6960 evpp = &svd->vpage[page + pd]; 6961 if (VPP_ADVICE(bvpp) == MADV_RANDOM || 6962 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0) 6963 return (-1); 6964 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) && 6965 VPP_ADVICE(evpp) == MADV_RANDOM) 6966 return (-1); 6967 } 6968 6969 if (svd->type == MAP_SHARED) 6970 return (0); /* shared mapping - all ok */ 6971 6972 if ((amp = svd->amp) == NULL) 6973 return (0); /* off original vnode */ 6974 6975 page += svd->anon_index; 6976 6977 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 6978 6979 oap = anon_get_ptr(amp->ahp, page); 6980 ap = anon_get_ptr(amp->ahp, page + pd); 6981 6982 ANON_LOCK_EXIT(&->a_rwlock); 6983 6984 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) { 6985 return (-1); /* one with and one without an anon */ 6986 } 6987 6988 if (oap == NULL) { /* implies that ap == NULL */ 6989 return (0); /* off original vnode */ 6990 } 6991 6992 /* 6993 * Now we know we have two anon pointers - check to 6994 * see if they happen to be properly allocated. 6995 */ 6996 6997 /* 6998 * XXX We cheat here and don't lock the anon slots. We can't because 6999 * we may have been called from the anon layer which might already 7000 * have locked them. We are holding a refcnt on the slots so they 7001 * can't disappear. The worst that will happen is we'll get the wrong 7002 * names (vp, off) for the slots and make a poor klustering decision. 7003 */ 7004 swap_xlate(ap, &vp1, &off1); 7005 swap_xlate(oap, &vp2, &off2); 7006 7007 7008 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta) 7009 return (-1); 7010 return (0); 7011 } 7012 7013 /* 7014 * Swap the pages of seg out to secondary storage, returning the 7015 * number of bytes of storage freed. 7016 * 7017 * The basic idea is first to unload all translations and then to call 7018 * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the 7019 * swap device. Pages to which other segments have mappings will remain 7020 * mapped and won't be swapped. Our caller (as_swapout) has already 7021 * performed the unloading step. 7022 * 7023 * The value returned is intended to correlate well with the process's 7024 * memory requirements. However, there are some caveats: 7025 * 1) When given a shared segment as argument, this routine will 7026 * only succeed in swapping out pages for the last sharer of the 7027 * segment. (Previous callers will only have decremented mapping 7028 * reference counts.) 7029 * 2) We assume that the hat layer maintains a large enough translation 7030 * cache to capture process reference patterns. 7031 */ 7032 static size_t 7033 segvn_swapout(struct seg *seg) 7034 { 7035 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7036 struct anon_map *amp; 7037 pgcnt_t pgcnt = 0; 7038 pgcnt_t npages; 7039 pgcnt_t page; 7040 ulong_t anon_index; 7041 7042 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7043 7044 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7045 /* 7046 * Find pages unmapped by our caller and force them 7047 * out to the virtual swap device. 7048 */ 7049 if ((amp = svd->amp) != NULL) 7050 anon_index = svd->anon_index; 7051 npages = seg->s_size >> PAGESHIFT; 7052 for (page = 0; page < npages; page++) { 7053 page_t *pp; 7054 struct anon *ap; 7055 struct vnode *vp; 7056 u_offset_t off; 7057 anon_sync_obj_t cookie; 7058 7059 /* 7060 * Obtain <vp, off> pair for the page, then look it up. 7061 * 7062 * Note that this code is willing to consider regular 7063 * pages as well as anon pages. Is this appropriate here? 7064 */ 7065 ap = NULL; 7066 if (amp != NULL) { 7067 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7068 if (anon_array_try_enter(amp, anon_index + page, 7069 &cookie)) { 7070 ANON_LOCK_EXIT(&->a_rwlock); 7071 continue; 7072 } 7073 ap = anon_get_ptr(amp->ahp, anon_index + page); 7074 if (ap != NULL) { 7075 swap_xlate(ap, &vp, &off); 7076 } else { 7077 vp = svd->vp; 7078 off = svd->offset + ptob(page); 7079 } 7080 anon_array_exit(&cookie); 7081 ANON_LOCK_EXIT(&->a_rwlock); 7082 } else { 7083 vp = svd->vp; 7084 off = svd->offset + ptob(page); 7085 } 7086 if (vp == NULL) { /* untouched zfod page */ 7087 ASSERT(ap == NULL); 7088 continue; 7089 } 7090 7091 pp = page_lookup_nowait(vp, off, SE_SHARED); 7092 if (pp == NULL) 7093 continue; 7094 7095 7096 /* 7097 * Examine the page to see whether it can be tossed out, 7098 * keeping track of how many we've found. 7099 */ 7100 if (!page_tryupgrade(pp)) { 7101 /* 7102 * If the page has an i/o lock and no mappings, 7103 * it's very likely that the page is being 7104 * written out as a result of klustering. 7105 * Assume this is so and take credit for it here. 7106 */ 7107 if (!page_io_trylock(pp)) { 7108 if (!hat_page_is_mapped(pp)) 7109 pgcnt++; 7110 } else { 7111 page_io_unlock(pp); 7112 } 7113 page_unlock(pp); 7114 continue; 7115 } 7116 ASSERT(!page_iolock_assert(pp)); 7117 7118 7119 /* 7120 * Skip if page is locked or has mappings. 7121 * We don't need the page_struct_lock to look at lckcnt 7122 * and cowcnt because the page is exclusive locked. 7123 */ 7124 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 7125 hat_page_is_mapped(pp)) { 7126 page_unlock(pp); 7127 continue; 7128 } 7129 7130 /* 7131 * dispose skips large pages so try to demote first. 7132 */ 7133 if (pp->p_szc != 0 && !page_try_demote_pages(pp)) { 7134 page_unlock(pp); 7135 /* 7136 * XXX should skip the remaining page_t's of this 7137 * large page. 7138 */ 7139 continue; 7140 } 7141 7142 ASSERT(pp->p_szc == 0); 7143 7144 /* 7145 * No longer mapped -- we can toss it out. How 7146 * we do so depends on whether or not it's dirty. 7147 */ 7148 if (hat_ismod(pp) && pp->p_vnode) { 7149 /* 7150 * We must clean the page before it can be 7151 * freed. Setting B_FREE will cause pvn_done 7152 * to free the page when the i/o completes. 7153 * XXX: This also causes it to be accounted 7154 * as a pageout instead of a swap: need 7155 * B_SWAPOUT bit to use instead of B_FREE. 7156 * 7157 * Hold the vnode before releasing the page lock 7158 * to prevent it from being freed and re-used by 7159 * some other thread. 7160 */ 7161 VN_HOLD(vp); 7162 page_unlock(pp); 7163 7164 /* 7165 * Queue all i/o requests for the pageout thread 7166 * to avoid saturating the pageout devices. 7167 */ 7168 if (!queue_io_request(vp, off)) 7169 VN_RELE(vp); 7170 } else { 7171 /* 7172 * The page was clean, free it. 7173 * 7174 * XXX: Can we ever encounter modified pages 7175 * with no associated vnode here? 7176 */ 7177 ASSERT(pp->p_vnode != NULL); 7178 /*LINTED: constant in conditional context*/ 7179 VN_DISPOSE(pp, B_FREE, 0, kcred); 7180 } 7181 7182 /* 7183 * Credit now even if i/o is in progress. 7184 */ 7185 pgcnt++; 7186 } 7187 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7188 7189 /* 7190 * Wakeup pageout to initiate i/o on all queued requests. 7191 */ 7192 cv_signal_pageout(); 7193 return (ptob(pgcnt)); 7194 } 7195 7196 /* 7197 * Synchronize primary storage cache with real object in virtual memory. 7198 * 7199 * XXX - Anonymous pages should not be sync'ed out at all. 7200 */ 7201 static int 7202 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) 7203 { 7204 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7205 struct vpage *vpp; 7206 page_t *pp; 7207 u_offset_t offset; 7208 struct vnode *vp; 7209 u_offset_t off; 7210 caddr_t eaddr; 7211 int bflags; 7212 int err = 0; 7213 int segtype; 7214 int pageprot; 7215 int prot; 7216 ulong_t anon_index; 7217 struct anon_map *amp; 7218 struct anon *ap; 7219 anon_sync_obj_t cookie; 7220 7221 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7222 7223 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7224 7225 if (svd->softlockcnt > 0) { 7226 /* 7227 * If this is shared segment non 0 softlockcnt 7228 * means locked pages are still in use. 7229 */ 7230 if (svd->type == MAP_SHARED) { 7231 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7232 return (EAGAIN); 7233 } 7234 7235 /* 7236 * flush all pages from seg cache 7237 * otherwise we may deadlock in swap_putpage 7238 * for B_INVAL page (4175402). 7239 * 7240 * Even if we grab segvn WRITER's lock 7241 * here, there might be another thread which could've 7242 * successfully performed lookup/insert just before 7243 * we acquired the lock here. So, grabbing either 7244 * lock here is of not much use. Until we devise 7245 * a strategy at upper layers to solve the 7246 * synchronization issues completely, we expect 7247 * applications to handle this appropriately. 7248 */ 7249 segvn_purge(seg); 7250 if (svd->softlockcnt > 0) { 7251 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7252 return (EAGAIN); 7253 } 7254 } else if (svd->type == MAP_SHARED && svd->amp != NULL && 7255 svd->amp->a_softlockcnt > 0) { 7256 /* 7257 * Try to purge this amp's entries from pcache. It will 7258 * succeed only if other segments that share the amp have no 7259 * outstanding softlock's. 7260 */ 7261 segvn_purge(seg); 7262 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) { 7263 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7264 return (EAGAIN); 7265 } 7266 } 7267 7268 vpp = svd->vpage; 7269 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7270 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) | 7271 ((flags & MS_INVALIDATE) ? B_INVAL : 0); 7272 7273 if (attr) { 7274 pageprot = attr & ~(SHARED|PRIVATE); 7275 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE; 7276 7277 /* 7278 * We are done if the segment types don't match 7279 * or if we have segment level protections and 7280 * they don't match. 7281 */ 7282 if (svd->type != segtype) { 7283 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7284 return (0); 7285 } 7286 if (vpp == NULL) { 7287 if (svd->prot != pageprot) { 7288 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7289 return (0); 7290 } 7291 prot = svd->prot; 7292 } else 7293 vpp = &svd->vpage[seg_page(seg, addr)]; 7294 7295 } else if (svd->vp && svd->amp == NULL && 7296 (flags & MS_INVALIDATE) == 0) { 7297 7298 /* 7299 * No attributes, no anonymous pages and MS_INVALIDATE flag 7300 * is not on, just use one big request. 7301 */ 7302 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len, 7303 bflags, svd->cred, NULL); 7304 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7305 return (err); 7306 } 7307 7308 if ((amp = svd->amp) != NULL) 7309 anon_index = svd->anon_index + seg_page(seg, addr); 7310 7311 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) { 7312 ap = NULL; 7313 if (amp != NULL) { 7314 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7315 anon_array_enter(amp, anon_index, &cookie); 7316 ap = anon_get_ptr(amp->ahp, anon_index++); 7317 if (ap != NULL) { 7318 swap_xlate(ap, &vp, &off); 7319 } else { 7320 vp = svd->vp; 7321 off = offset; 7322 } 7323 anon_array_exit(&cookie); 7324 ANON_LOCK_EXIT(&->a_rwlock); 7325 } else { 7326 vp = svd->vp; 7327 off = offset; 7328 } 7329 offset += PAGESIZE; 7330 7331 if (vp == NULL) /* untouched zfod page */ 7332 continue; 7333 7334 if (attr) { 7335 if (vpp) { 7336 prot = VPP_PROT(vpp); 7337 vpp++; 7338 } 7339 if (prot != pageprot) { 7340 continue; 7341 } 7342 } 7343 7344 /* 7345 * See if any of these pages are locked -- if so, then we 7346 * will have to truncate an invalidate request at the first 7347 * locked one. We don't need the page_struct_lock to test 7348 * as this is only advisory; even if we acquire it someone 7349 * might race in and lock the page after we unlock and before 7350 * we do the PUTPAGE, then PUTPAGE simply does nothing. 7351 */ 7352 if (flags & MS_INVALIDATE) { 7353 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) { 7354 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 7355 page_unlock(pp); 7356 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7357 return (EBUSY); 7358 } 7359 if (ap != NULL && pp->p_szc != 0 && 7360 page_tryupgrade(pp)) { 7361 if (pp->p_lckcnt == 0 && 7362 pp->p_cowcnt == 0) { 7363 /* 7364 * swapfs VN_DISPOSE() won't 7365 * invalidate large pages. 7366 * Attempt to demote. 7367 * XXX can't help it if it 7368 * fails. But for swapfs 7369 * pages it is no big deal. 7370 */ 7371 (void) page_try_demote_pages( 7372 pp); 7373 } 7374 } 7375 page_unlock(pp); 7376 } 7377 } else if (svd->type == MAP_SHARED && amp != NULL) { 7378 /* 7379 * Avoid writing out to disk ISM's large pages 7380 * because segspt_free_pages() relies on NULL an_pvp 7381 * of anon slots of such pages. 7382 */ 7383 7384 ASSERT(svd->vp == NULL); 7385 /* 7386 * swapfs uses page_lookup_nowait if not freeing or 7387 * invalidating and skips a page if 7388 * page_lookup_nowait returns NULL. 7389 */ 7390 pp = page_lookup_nowait(vp, off, SE_SHARED); 7391 if (pp == NULL) { 7392 continue; 7393 } 7394 if (pp->p_szc != 0) { 7395 page_unlock(pp); 7396 continue; 7397 } 7398 7399 /* 7400 * Note ISM pages are created large so (vp, off)'s 7401 * page cannot suddenly become large after we unlock 7402 * pp. 7403 */ 7404 page_unlock(pp); 7405 } 7406 /* 7407 * XXX - Should ultimately try to kluster 7408 * calls to VOP_PUTPAGE() for performance. 7409 */ 7410 VN_HOLD(vp); 7411 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 7412 (bflags | (IS_SWAPFSVP(vp) ? B_PAGE_NOWAIT : 0)), 7413 svd->cred, NULL); 7414 7415 VN_RELE(vp); 7416 if (err) 7417 break; 7418 } 7419 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7420 return (err); 7421 } 7422 7423 /* 7424 * Determine if we have data corresponding to pages in the 7425 * primary storage virtual memory cache (i.e., "in core"). 7426 */ 7427 static size_t 7428 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec) 7429 { 7430 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7431 struct vnode *vp, *avp; 7432 u_offset_t offset, aoffset; 7433 size_t p, ep; 7434 int ret; 7435 struct vpage *vpp; 7436 page_t *pp; 7437 uint_t start; 7438 struct anon_map *amp; /* XXX - for locknest */ 7439 struct anon *ap; 7440 uint_t attr; 7441 anon_sync_obj_t cookie; 7442 7443 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7444 7445 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7446 if (svd->amp == NULL && svd->vp == NULL) { 7447 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7448 bzero(vec, btopr(len)); 7449 return (len); /* no anonymous pages created yet */ 7450 } 7451 7452 p = seg_page(seg, addr); 7453 ep = seg_page(seg, addr + len); 7454 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0; 7455 7456 amp = svd->amp; 7457 for (; p < ep; p++, addr += PAGESIZE) { 7458 vpp = (svd->vpage) ? &svd->vpage[p]: NULL; 7459 ret = start; 7460 ap = NULL; 7461 avp = NULL; 7462 /* Grab the vnode/offset for the anon slot */ 7463 if (amp != NULL) { 7464 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7465 anon_array_enter(amp, svd->anon_index + p, &cookie); 7466 ap = anon_get_ptr(amp->ahp, svd->anon_index + p); 7467 if (ap != NULL) { 7468 swap_xlate(ap, &avp, &aoffset); 7469 } 7470 anon_array_exit(&cookie); 7471 ANON_LOCK_EXIT(&->a_rwlock); 7472 } 7473 if ((avp != NULL) && page_exists(avp, aoffset)) { 7474 /* A page exists for the anon slot */ 7475 ret |= SEG_PAGE_INCORE; 7476 7477 /* 7478 * If page is mapped and writable 7479 */ 7480 attr = (uint_t)0; 7481 if ((hat_getattr(seg->s_as->a_hat, addr, 7482 &attr) != -1) && (attr & PROT_WRITE)) { 7483 ret |= SEG_PAGE_ANON; 7484 } 7485 /* 7486 * Don't get page_struct lock for lckcnt and cowcnt, 7487 * since this is purely advisory. 7488 */ 7489 if ((pp = page_lookup_nowait(avp, aoffset, 7490 SE_SHARED)) != NULL) { 7491 if (pp->p_lckcnt) 7492 ret |= SEG_PAGE_SOFTLOCK; 7493 if (pp->p_cowcnt) 7494 ret |= SEG_PAGE_HASCOW; 7495 page_unlock(pp); 7496 } 7497 } 7498 7499 /* Gather vnode statistics */ 7500 vp = svd->vp; 7501 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7502 7503 if (vp != NULL) { 7504 /* 7505 * Try to obtain a "shared" lock on the page 7506 * without blocking. If this fails, determine 7507 * if the page is in memory. 7508 */ 7509 pp = page_lookup_nowait(vp, offset, SE_SHARED); 7510 if ((pp == NULL) && (page_exists(vp, offset))) { 7511 /* Page is incore, and is named */ 7512 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7513 } 7514 /* 7515 * Don't get page_struct lock for lckcnt and cowcnt, 7516 * since this is purely advisory. 7517 */ 7518 if (pp != NULL) { 7519 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7520 if (pp->p_lckcnt) 7521 ret |= SEG_PAGE_SOFTLOCK; 7522 if (pp->p_cowcnt) 7523 ret |= SEG_PAGE_HASCOW; 7524 page_unlock(pp); 7525 } 7526 } 7527 7528 /* Gather virtual page information */ 7529 if (vpp) { 7530 if (VPP_ISPPLOCK(vpp)) 7531 ret |= SEG_PAGE_LOCKED; 7532 vpp++; 7533 } 7534 7535 *vec++ = (char)ret; 7536 } 7537 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7538 return (len); 7539 } 7540 7541 /* 7542 * Statement for p_cowcnts/p_lckcnts. 7543 * 7544 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region 7545 * irrespective of the following factors or anything else: 7546 * 7547 * (1) anon slots are populated or not 7548 * (2) cow is broken or not 7549 * (3) refcnt on ap is 1 or greater than 1 7550 * 7551 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock 7552 * and munlock. 7553 * 7554 * 7555 * Handling p_cowcnts/p_lckcnts during copy-on-write fault: 7556 * 7557 * if vpage has PROT_WRITE 7558 * transfer cowcnt on the oldpage -> cowcnt on the newpage 7559 * else 7560 * transfer lckcnt on the oldpage -> lckcnt on the newpage 7561 * 7562 * During copy-on-write, decrement p_cowcnt on the oldpage and increment 7563 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE. 7564 * 7565 * We may also break COW if softlocking on read access in the physio case. 7566 * In this case, vpage may not have PROT_WRITE. So, we need to decrement 7567 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the 7568 * vpage doesn't have PROT_WRITE. 7569 * 7570 * 7571 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region: 7572 * 7573 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and 7574 * increment p_lckcnt by calling page_subclaim() which takes care of 7575 * availrmem accounting and p_lckcnt overflow. 7576 * 7577 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and 7578 * increment p_cowcnt by calling page_addclaim() which takes care of 7579 * availrmem availability and p_cowcnt overflow. 7580 */ 7581 7582 /* 7583 * Lock down (or unlock) pages mapped by this segment. 7584 * 7585 * XXX only creates PAGESIZE pages if anon slots are not initialized. 7586 * At fault time they will be relocated into larger pages. 7587 */ 7588 static int 7589 segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 7590 int attr, int op, ulong_t *lockmap, size_t pos) 7591 { 7592 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7593 struct vpage *vpp; 7594 struct vpage *evp; 7595 page_t *pp; 7596 u_offset_t offset; 7597 u_offset_t off; 7598 int segtype; 7599 int pageprot; 7600 int claim; 7601 struct vnode *vp; 7602 ulong_t anon_index; 7603 struct anon_map *amp; 7604 struct anon *ap; 7605 struct vattr va; 7606 anon_sync_obj_t cookie; 7607 struct kshmid *sp = NULL; 7608 struct proc *p = curproc; 7609 kproject_t *proj = NULL; 7610 int chargeproc = 1; 7611 size_t locked_bytes = 0; 7612 size_t unlocked_bytes = 0; 7613 int err = 0; 7614 7615 /* 7616 * Hold write lock on address space because may split or concatenate 7617 * segments 7618 */ 7619 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7620 7621 /* 7622 * If this is a shm, use shm's project and zone, else use 7623 * project and zone of calling process 7624 */ 7625 7626 /* Determine if this segment backs a sysV shm */ 7627 if (svd->amp != NULL && svd->amp->a_sp != NULL) { 7628 ASSERT(svd->type == MAP_SHARED); 7629 ASSERT(svd->tr_state == SEGVN_TR_OFF); 7630 sp = svd->amp->a_sp; 7631 proj = sp->shm_perm.ipc_proj; 7632 chargeproc = 0; 7633 } 7634 7635 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 7636 if (attr) { 7637 pageprot = attr & ~(SHARED|PRIVATE); 7638 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE; 7639 7640 /* 7641 * We are done if the segment types don't match 7642 * or if we have segment level protections and 7643 * they don't match. 7644 */ 7645 if (svd->type != segtype) { 7646 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7647 return (0); 7648 } 7649 if (svd->pageprot == 0 && svd->prot != pageprot) { 7650 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7651 return (0); 7652 } 7653 } 7654 7655 if (op == MC_LOCK) { 7656 if (svd->tr_state == SEGVN_TR_INIT) { 7657 svd->tr_state = SEGVN_TR_OFF; 7658 } else if (svd->tr_state == SEGVN_TR_ON) { 7659 ASSERT(svd->amp != NULL); 7660 segvn_textunrepl(seg, 0); 7661 ASSERT(svd->amp == NULL && 7662 svd->tr_state == SEGVN_TR_OFF); 7663 } 7664 } 7665 7666 /* 7667 * If we're locking, then we must create a vpage structure if 7668 * none exists. If we're unlocking, then check to see if there 7669 * is a vpage -- if not, then we could not have locked anything. 7670 */ 7671 7672 if ((vpp = svd->vpage) == NULL) { 7673 if (op == MC_LOCK) { 7674 segvn_vpage(seg); 7675 if (svd->vpage == NULL) { 7676 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7677 return (ENOMEM); 7678 } 7679 } else { 7680 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7681 return (0); 7682 } 7683 } 7684 7685 /* 7686 * The anonymous data vector (i.e., previously 7687 * unreferenced mapping to swap space) can be allocated 7688 * by lazily testing for its existence. 7689 */ 7690 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) { 7691 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 7692 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 7693 svd->amp->a_szc = seg->s_szc; 7694 } 7695 7696 if ((amp = svd->amp) != NULL) { 7697 anon_index = svd->anon_index + seg_page(seg, addr); 7698 } 7699 7700 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7701 evp = &svd->vpage[seg_page(seg, addr + len)]; 7702 7703 if (sp != NULL) 7704 mutex_enter(&sp->shm_mlock); 7705 7706 /* determine number of unlocked bytes in range for lock operation */ 7707 if (op == MC_LOCK) { 7708 7709 if (sp == NULL) { 7710 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7711 vpp++) { 7712 if (!VPP_ISPPLOCK(vpp)) 7713 unlocked_bytes += PAGESIZE; 7714 } 7715 } else { 7716 ulong_t i_idx, i_edx; 7717 anon_sync_obj_t i_cookie; 7718 struct anon *i_ap; 7719 struct vnode *i_vp; 7720 u_offset_t i_off; 7721 7722 /* Only count sysV pages once for locked memory */ 7723 i_edx = svd->anon_index + seg_page(seg, addr + len); 7724 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7725 for (i_idx = anon_index; i_idx < i_edx; i_idx++) { 7726 anon_array_enter(amp, i_idx, &i_cookie); 7727 i_ap = anon_get_ptr(amp->ahp, i_idx); 7728 if (i_ap == NULL) { 7729 unlocked_bytes += PAGESIZE; 7730 anon_array_exit(&i_cookie); 7731 continue; 7732 } 7733 swap_xlate(i_ap, &i_vp, &i_off); 7734 anon_array_exit(&i_cookie); 7735 pp = page_lookup(i_vp, i_off, SE_SHARED); 7736 if (pp == NULL) { 7737 unlocked_bytes += PAGESIZE; 7738 continue; 7739 } else if (pp->p_lckcnt == 0) 7740 unlocked_bytes += PAGESIZE; 7741 page_unlock(pp); 7742 } 7743 ANON_LOCK_EXIT(&->a_rwlock); 7744 } 7745 7746 mutex_enter(&p->p_lock); 7747 err = rctl_incr_locked_mem(p, proj, unlocked_bytes, 7748 chargeproc); 7749 mutex_exit(&p->p_lock); 7750 7751 if (err) { 7752 if (sp != NULL) 7753 mutex_exit(&sp->shm_mlock); 7754 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7755 return (err); 7756 } 7757 } 7758 /* 7759 * Loop over all pages in the range. Process if we're locking and 7760 * page has not already been locked in this mapping; or if we're 7761 * unlocking and the page has been locked. 7762 */ 7763 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7764 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) { 7765 if ((attr == 0 || VPP_PROT(vpp) == pageprot) && 7766 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) || 7767 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) { 7768 7769 if (amp != NULL) 7770 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7771 /* 7772 * If this isn't a MAP_NORESERVE segment and 7773 * we're locking, allocate anon slots if they 7774 * don't exist. The page is brought in later on. 7775 */ 7776 if (op == MC_LOCK && svd->vp == NULL && 7777 ((svd->flags & MAP_NORESERVE) == 0) && 7778 amp != NULL && 7779 ((ap = anon_get_ptr(amp->ahp, anon_index)) 7780 == NULL)) { 7781 anon_array_enter(amp, anon_index, &cookie); 7782 7783 if ((ap = anon_get_ptr(amp->ahp, 7784 anon_index)) == NULL) { 7785 pp = anon_zero(seg, addr, &ap, 7786 svd->cred); 7787 if (pp == NULL) { 7788 anon_array_exit(&cookie); 7789 ANON_LOCK_EXIT(&->a_rwlock); 7790 err = ENOMEM; 7791 goto out; 7792 } 7793 ASSERT(anon_get_ptr(amp->ahp, 7794 anon_index) == NULL); 7795 (void) anon_set_ptr(amp->ahp, 7796 anon_index, ap, ANON_SLEEP); 7797 page_unlock(pp); 7798 } 7799 anon_array_exit(&cookie); 7800 } 7801 7802 /* 7803 * Get name for page, accounting for 7804 * existence of private copy. 7805 */ 7806 ap = NULL; 7807 if (amp != NULL) { 7808 anon_array_enter(amp, anon_index, &cookie); 7809 ap = anon_get_ptr(amp->ahp, anon_index); 7810 if (ap != NULL) { 7811 swap_xlate(ap, &vp, &off); 7812 } else { 7813 if (svd->vp == NULL && 7814 (svd->flags & MAP_NORESERVE)) { 7815 anon_array_exit(&cookie); 7816 ANON_LOCK_EXIT(&->a_rwlock); 7817 continue; 7818 } 7819 vp = svd->vp; 7820 off = offset; 7821 } 7822 if (op != MC_LOCK || ap == NULL) { 7823 anon_array_exit(&cookie); 7824 ANON_LOCK_EXIT(&->a_rwlock); 7825 } 7826 } else { 7827 vp = svd->vp; 7828 off = offset; 7829 } 7830 7831 /* 7832 * Get page frame. It's ok if the page is 7833 * not available when we're unlocking, as this 7834 * may simply mean that a page we locked got 7835 * truncated out of existence after we locked it. 7836 * 7837 * Invoke VOP_GETPAGE() to obtain the page struct 7838 * since we may need to read it from disk if its 7839 * been paged out. 7840 */ 7841 if (op != MC_LOCK) 7842 pp = page_lookup(vp, off, SE_SHARED); 7843 else { 7844 page_t *pl[1 + 1]; 7845 int error; 7846 7847 ASSERT(vp != NULL); 7848 7849 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 7850 (uint_t *)NULL, pl, PAGESIZE, seg, addr, 7851 S_OTHER, svd->cred, NULL); 7852 7853 if (error && ap != NULL) { 7854 anon_array_exit(&cookie); 7855 ANON_LOCK_EXIT(&->a_rwlock); 7856 } 7857 7858 /* 7859 * If the error is EDEADLK then we must bounce 7860 * up and drop all vm subsystem locks and then 7861 * retry the operation later 7862 * This behavior is a temporary measure because 7863 * ufs/sds logging is badly designed and will 7864 * deadlock if we don't allow this bounce to 7865 * happen. The real solution is to re-design 7866 * the logging code to work properly. See bug 7867 * 4125102 for details of the problem. 7868 */ 7869 if (error == EDEADLK) { 7870 err = error; 7871 goto out; 7872 } 7873 /* 7874 * Quit if we fail to fault in the page. Treat 7875 * the failure as an error, unless the addr 7876 * is mapped beyond the end of a file. 7877 */ 7878 if (error && svd->vp) { 7879 va.va_mask = AT_SIZE; 7880 if (VOP_GETATTR(svd->vp, &va, 0, 7881 svd->cred, NULL) != 0) { 7882 err = EIO; 7883 goto out; 7884 } 7885 if (btopr(va.va_size) >= 7886 btopr(off + 1)) { 7887 err = EIO; 7888 goto out; 7889 } 7890 goto out; 7891 7892 } else if (error) { 7893 err = EIO; 7894 goto out; 7895 } 7896 pp = pl[0]; 7897 ASSERT(pp != NULL); 7898 } 7899 7900 /* 7901 * See Statement at the beginning of this routine. 7902 * 7903 * claim is always set if MAP_PRIVATE and PROT_WRITE 7904 * irrespective of following factors: 7905 * 7906 * (1) anon slots are populated or not 7907 * (2) cow is broken or not 7908 * (3) refcnt on ap is 1 or greater than 1 7909 * 7910 * See 4140683 for details 7911 */ 7912 claim = ((VPP_PROT(vpp) & PROT_WRITE) && 7913 (svd->type == MAP_PRIVATE)); 7914 7915 /* 7916 * Perform page-level operation appropriate to 7917 * operation. If locking, undo the SOFTLOCK 7918 * performed to bring the page into memory 7919 * after setting the lock. If unlocking, 7920 * and no page was found, account for the claim 7921 * separately. 7922 */ 7923 if (op == MC_LOCK) { 7924 int ret = 1; /* Assume success */ 7925 7926 ASSERT(!VPP_ISPPLOCK(vpp)); 7927 7928 ret = page_pp_lock(pp, claim, 0); 7929 if (ap != NULL) { 7930 if (ap->an_pvp != NULL) { 7931 anon_swap_free(ap, pp); 7932 } 7933 anon_array_exit(&cookie); 7934 ANON_LOCK_EXIT(&->a_rwlock); 7935 } 7936 if (ret == 0) { 7937 /* locking page failed */ 7938 page_unlock(pp); 7939 err = EAGAIN; 7940 goto out; 7941 } 7942 VPP_SETPPLOCK(vpp); 7943 if (sp != NULL) { 7944 if (pp->p_lckcnt == 1) 7945 locked_bytes += PAGESIZE; 7946 } else 7947 locked_bytes += PAGESIZE; 7948 7949 if (lockmap != (ulong_t *)NULL) 7950 BT_SET(lockmap, pos); 7951 7952 page_unlock(pp); 7953 } else { 7954 ASSERT(VPP_ISPPLOCK(vpp)); 7955 if (pp != NULL) { 7956 /* sysV pages should be locked */ 7957 ASSERT(sp == NULL || pp->p_lckcnt > 0); 7958 page_pp_unlock(pp, claim, 0); 7959 if (sp != NULL) { 7960 if (pp->p_lckcnt == 0) 7961 unlocked_bytes 7962 += PAGESIZE; 7963 } else 7964 unlocked_bytes += PAGESIZE; 7965 page_unlock(pp); 7966 } else { 7967 ASSERT(sp == NULL); 7968 unlocked_bytes += PAGESIZE; 7969 } 7970 VPP_CLRPPLOCK(vpp); 7971 } 7972 } 7973 } 7974 out: 7975 if (op == MC_LOCK) { 7976 /* Credit back bytes that did not get locked */ 7977 if ((unlocked_bytes - locked_bytes) > 0) { 7978 if (proj == NULL) 7979 mutex_enter(&p->p_lock); 7980 rctl_decr_locked_mem(p, proj, 7981 (unlocked_bytes - locked_bytes), chargeproc); 7982 if (proj == NULL) 7983 mutex_exit(&p->p_lock); 7984 } 7985 7986 } else { 7987 /* Account bytes that were unlocked */ 7988 if (unlocked_bytes > 0) { 7989 if (proj == NULL) 7990 mutex_enter(&p->p_lock); 7991 rctl_decr_locked_mem(p, proj, unlocked_bytes, 7992 chargeproc); 7993 if (proj == NULL) 7994 mutex_exit(&p->p_lock); 7995 } 7996 } 7997 if (sp != NULL) 7998 mutex_exit(&sp->shm_mlock); 7999 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8000 8001 return (err); 8002 } 8003 8004 /* 8005 * Set advice from user for specified pages 8006 * There are 5 types of advice: 8007 * MADV_NORMAL - Normal (default) behavior (whatever that is) 8008 * MADV_RANDOM - Random page references 8009 * do not allow readahead or 'klustering' 8010 * MADV_SEQUENTIAL - Sequential page references 8011 * Pages previous to the one currently being 8012 * accessed (determined by fault) are 'not needed' 8013 * and are freed immediately 8014 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl) 8015 * MADV_DONTNEED - Pages are not needed (synced out in mctl) 8016 * MADV_FREE - Contents can be discarded 8017 * MADV_ACCESS_DEFAULT- Default access 8018 * MADV_ACCESS_LWP - Next LWP will access heavily 8019 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily 8020 */ 8021 static int 8022 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 8023 { 8024 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8025 size_t page; 8026 int err = 0; 8027 int already_set; 8028 struct anon_map *amp; 8029 ulong_t anon_index; 8030 struct seg *next; 8031 lgrp_mem_policy_t policy; 8032 struct seg *prev; 8033 struct vnode *vp; 8034 8035 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8036 8037 /* 8038 * In case of MADV_FREE, we won't be modifying any segment private 8039 * data structures; so, we only need to grab READER's lock 8040 */ 8041 if (behav != MADV_FREE) { 8042 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 8043 if (svd->tr_state != SEGVN_TR_OFF) { 8044 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8045 return (0); 8046 } 8047 } else { 8048 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8049 } 8050 8051 /* 8052 * Large pages are assumed to be only turned on when accesses to the 8053 * segment's address range have spatial and temporal locality. That 8054 * justifies ignoring MADV_SEQUENTIAL for large page segments. 8055 * Also, ignore advice affecting lgroup memory allocation 8056 * if don't need to do lgroup optimizations on this system 8057 */ 8058 8059 if ((behav == MADV_SEQUENTIAL && 8060 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) || 8061 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT || 8062 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) { 8063 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8064 return (0); 8065 } 8066 8067 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT || 8068 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) { 8069 /* 8070 * Since we are going to unload hat mappings 8071 * we first have to flush the cache. Otherwise 8072 * this might lead to system panic if another 8073 * thread is doing physio on the range whose 8074 * mappings are unloaded by madvise(3C). 8075 */ 8076 if (svd->softlockcnt > 0) { 8077 /* 8078 * If this is shared segment non 0 softlockcnt 8079 * means locked pages are still in use. 8080 */ 8081 if (svd->type == MAP_SHARED) { 8082 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8083 return (EAGAIN); 8084 } 8085 /* 8086 * Since we do have the segvn writers lock 8087 * nobody can fill the cache with entries 8088 * belonging to this seg during the purge. 8089 * The flush either succeeds or we still 8090 * have pending I/Os. In the later case, 8091 * madvise(3C) fails. 8092 */ 8093 segvn_purge(seg); 8094 if (svd->softlockcnt > 0) { 8095 /* 8096 * Since madvise(3C) is advisory and 8097 * it's not part of UNIX98, madvise(3C) 8098 * failure here doesn't cause any hardship. 8099 * Note that we don't block in "as" layer. 8100 */ 8101 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8102 return (EAGAIN); 8103 } 8104 } else if (svd->type == MAP_SHARED && svd->amp != NULL && 8105 svd->amp->a_softlockcnt > 0) { 8106 /* 8107 * Try to purge this amp's entries from pcache. It 8108 * will succeed only if other segments that share the 8109 * amp have no outstanding softlock's. 8110 */ 8111 segvn_purge(seg); 8112 } 8113 } 8114 8115 amp = svd->amp; 8116 vp = svd->vp; 8117 if (behav == MADV_FREE) { 8118 /* 8119 * MADV_FREE is not supported for segments with 8120 * underlying object; if anonmap is NULL, anon slots 8121 * are not yet populated and there is nothing for 8122 * us to do. As MADV_FREE is advisory, we don't 8123 * return error in either case. 8124 */ 8125 if (vp != NULL || amp == NULL) { 8126 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8127 return (0); 8128 } 8129 8130 segvn_purge(seg); 8131 8132 page = seg_page(seg, addr); 8133 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8134 anon_disclaim(amp, svd->anon_index + page, len); 8135 ANON_LOCK_EXIT(&->a_rwlock); 8136 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8137 return (0); 8138 } 8139 8140 /* 8141 * If advice is to be applied to entire segment, 8142 * use advice field in seg_data structure 8143 * otherwise use appropriate vpage entry. 8144 */ 8145 if ((addr == seg->s_base) && (len == seg->s_size)) { 8146 switch (behav) { 8147 case MADV_ACCESS_LWP: 8148 case MADV_ACCESS_MANY: 8149 case MADV_ACCESS_DEFAULT: 8150 /* 8151 * Set memory allocation policy for this segment 8152 */ 8153 policy = lgrp_madv_to_policy(behav, len, svd->type); 8154 if (svd->type == MAP_SHARED) 8155 already_set = lgrp_shm_policy_set(policy, amp, 8156 svd->anon_index, vp, svd->offset, len); 8157 else { 8158 /* 8159 * For private memory, need writers lock on 8160 * address space because the segment may be 8161 * split or concatenated when changing policy 8162 */ 8163 if (AS_READ_HELD(seg->s_as, 8164 &seg->s_as->a_lock)) { 8165 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8166 return (IE_RETRY); 8167 } 8168 8169 already_set = lgrp_privm_policy_set(policy, 8170 &svd->policy_info, len); 8171 } 8172 8173 /* 8174 * If policy set already and it shouldn't be reapplied, 8175 * don't do anything. 8176 */ 8177 if (already_set && 8178 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8179 break; 8180 8181 /* 8182 * Mark any existing pages in given range for 8183 * migration 8184 */ 8185 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8186 vp, svd->offset, 1); 8187 8188 /* 8189 * If same policy set already or this is a shared 8190 * memory segment, don't need to try to concatenate 8191 * segment with adjacent ones. 8192 */ 8193 if (already_set || svd->type == MAP_SHARED) 8194 break; 8195 8196 /* 8197 * Try to concatenate this segment with previous 8198 * one and next one, since we changed policy for 8199 * this one and it may be compatible with adjacent 8200 * ones now. 8201 */ 8202 prev = AS_SEGPREV(seg->s_as, seg); 8203 next = AS_SEGNEXT(seg->s_as, seg); 8204 8205 if (next && next->s_ops == &segvn_ops && 8206 addr + len == next->s_base) 8207 (void) segvn_concat(seg, next, 1); 8208 8209 if (prev && prev->s_ops == &segvn_ops && 8210 addr == prev->s_base + prev->s_size) { 8211 /* 8212 * Drop lock for private data of current 8213 * segment before concatenating (deleting) it 8214 * and return IE_REATTACH to tell as_ctl() that 8215 * current segment has changed 8216 */ 8217 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8218 if (!segvn_concat(prev, seg, 1)) 8219 err = IE_REATTACH; 8220 8221 return (err); 8222 } 8223 break; 8224 8225 case MADV_SEQUENTIAL: 8226 /* 8227 * unloading mapping guarantees 8228 * detection in segvn_fault 8229 */ 8230 ASSERT(seg->s_szc == 0); 8231 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8232 hat_unload(seg->s_as->a_hat, addr, len, 8233 HAT_UNLOAD); 8234 /* FALLTHROUGH */ 8235 case MADV_NORMAL: 8236 case MADV_RANDOM: 8237 svd->advice = (uchar_t)behav; 8238 svd->pageadvice = 0; 8239 break; 8240 case MADV_WILLNEED: /* handled in memcntl */ 8241 case MADV_DONTNEED: /* handled in memcntl */ 8242 case MADV_FREE: /* handled above */ 8243 break; 8244 default: 8245 err = EINVAL; 8246 } 8247 } else { 8248 caddr_t eaddr; 8249 struct seg *new_seg; 8250 struct segvn_data *new_svd; 8251 u_offset_t off; 8252 caddr_t oldeaddr; 8253 8254 page = seg_page(seg, addr); 8255 8256 segvn_vpage(seg); 8257 if (svd->vpage == NULL) { 8258 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8259 return (ENOMEM); 8260 } 8261 8262 switch (behav) { 8263 struct vpage *bvpp, *evpp; 8264 8265 case MADV_ACCESS_LWP: 8266 case MADV_ACCESS_MANY: 8267 case MADV_ACCESS_DEFAULT: 8268 /* 8269 * Set memory allocation policy for portion of this 8270 * segment 8271 */ 8272 8273 /* 8274 * Align address and length of advice to page 8275 * boundaries for large pages 8276 */ 8277 if (seg->s_szc != 0) { 8278 size_t pgsz; 8279 8280 pgsz = page_get_pagesize(seg->s_szc); 8281 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 8282 len = P2ROUNDUP(len, pgsz); 8283 } 8284 8285 /* 8286 * Check to see whether policy is set already 8287 */ 8288 policy = lgrp_madv_to_policy(behav, len, svd->type); 8289 8290 anon_index = svd->anon_index + page; 8291 off = svd->offset + (uintptr_t)(addr - seg->s_base); 8292 8293 if (svd->type == MAP_SHARED) 8294 already_set = lgrp_shm_policy_set(policy, amp, 8295 anon_index, vp, off, len); 8296 else 8297 already_set = 8298 (policy == svd->policy_info.mem_policy); 8299 8300 /* 8301 * If policy set already and it shouldn't be reapplied, 8302 * don't do anything. 8303 */ 8304 if (already_set && 8305 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8306 break; 8307 8308 /* 8309 * For private memory, need writers lock on 8310 * address space because the segment may be 8311 * split or concatenated when changing policy 8312 */ 8313 if (svd->type == MAP_PRIVATE && 8314 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) { 8315 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8316 return (IE_RETRY); 8317 } 8318 8319 /* 8320 * Mark any existing pages in given range for 8321 * migration 8322 */ 8323 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8324 vp, svd->offset, 1); 8325 8326 /* 8327 * Don't need to try to split or concatenate 8328 * segments, since policy is same or this is a shared 8329 * memory segment 8330 */ 8331 if (already_set || svd->type == MAP_SHARED) 8332 break; 8333 8334 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 8335 ASSERT(svd->amp == NULL); 8336 ASSERT(svd->tr_state == SEGVN_TR_OFF); 8337 ASSERT(svd->softlockcnt == 0); 8338 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 8339 HAT_REGION_TEXT); 8340 svd->rcookie = HAT_INVALID_REGION_COOKIE; 8341 } 8342 8343 /* 8344 * Split off new segment if advice only applies to a 8345 * portion of existing segment starting in middle 8346 */ 8347 new_seg = NULL; 8348 eaddr = addr + len; 8349 oldeaddr = seg->s_base + seg->s_size; 8350 if (addr > seg->s_base) { 8351 /* 8352 * Must flush I/O page cache 8353 * before splitting segment 8354 */ 8355 if (svd->softlockcnt > 0) 8356 segvn_purge(seg); 8357 8358 /* 8359 * Split segment and return IE_REATTACH to tell 8360 * as_ctl() that current segment changed 8361 */ 8362 new_seg = segvn_split_seg(seg, addr); 8363 new_svd = (struct segvn_data *)new_seg->s_data; 8364 err = IE_REATTACH; 8365 8366 /* 8367 * If new segment ends where old one 8368 * did, try to concatenate the new 8369 * segment with next one. 8370 */ 8371 if (eaddr == oldeaddr) { 8372 /* 8373 * Set policy for new segment 8374 */ 8375 (void) lgrp_privm_policy_set(policy, 8376 &new_svd->policy_info, 8377 new_seg->s_size); 8378 8379 next = AS_SEGNEXT(new_seg->s_as, 8380 new_seg); 8381 8382 if (next && 8383 next->s_ops == &segvn_ops && 8384 eaddr == next->s_base) 8385 (void) segvn_concat(new_seg, 8386 next, 1); 8387 } 8388 } 8389 8390 /* 8391 * Split off end of existing segment if advice only 8392 * applies to a portion of segment ending before 8393 * end of the existing segment 8394 */ 8395 if (eaddr < oldeaddr) { 8396 /* 8397 * Must flush I/O page cache 8398 * before splitting segment 8399 */ 8400 if (svd->softlockcnt > 0) 8401 segvn_purge(seg); 8402 8403 /* 8404 * If beginning of old segment was already 8405 * split off, use new segment to split end off 8406 * from. 8407 */ 8408 if (new_seg != NULL && new_seg != seg) { 8409 /* 8410 * Split segment 8411 */ 8412 (void) segvn_split_seg(new_seg, eaddr); 8413 8414 /* 8415 * Set policy for new segment 8416 */ 8417 (void) lgrp_privm_policy_set(policy, 8418 &new_svd->policy_info, 8419 new_seg->s_size); 8420 } else { 8421 /* 8422 * Split segment and return IE_REATTACH 8423 * to tell as_ctl() that current 8424 * segment changed 8425 */ 8426 (void) segvn_split_seg(seg, eaddr); 8427 err = IE_REATTACH; 8428 8429 (void) lgrp_privm_policy_set(policy, 8430 &svd->policy_info, seg->s_size); 8431 8432 /* 8433 * If new segment starts where old one 8434 * did, try to concatenate it with 8435 * previous segment. 8436 */ 8437 if (addr == seg->s_base) { 8438 prev = AS_SEGPREV(seg->s_as, 8439 seg); 8440 8441 /* 8442 * Drop lock for private data 8443 * of current segment before 8444 * concatenating (deleting) it 8445 */ 8446 if (prev && 8447 prev->s_ops == 8448 &segvn_ops && 8449 addr == prev->s_base + 8450 prev->s_size) { 8451 SEGVN_LOCK_EXIT( 8452 seg->s_as, 8453 &svd->lock); 8454 (void) segvn_concat( 8455 prev, seg, 1); 8456 return (err); 8457 } 8458 } 8459 } 8460 } 8461 break; 8462 case MADV_SEQUENTIAL: 8463 ASSERT(seg->s_szc == 0); 8464 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8465 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 8466 /* FALLTHROUGH */ 8467 case MADV_NORMAL: 8468 case MADV_RANDOM: 8469 bvpp = &svd->vpage[page]; 8470 evpp = &svd->vpage[page + (len >> PAGESHIFT)]; 8471 for (; bvpp < evpp; bvpp++) 8472 VPP_SETADVICE(bvpp, behav); 8473 svd->advice = MADV_NORMAL; 8474 break; 8475 case MADV_WILLNEED: /* handled in memcntl */ 8476 case MADV_DONTNEED: /* handled in memcntl */ 8477 case MADV_FREE: /* handled above */ 8478 break; 8479 default: 8480 err = EINVAL; 8481 } 8482 } 8483 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8484 return (err); 8485 } 8486 8487 /* 8488 * Create a vpage structure for this seg. 8489 */ 8490 static void 8491 segvn_vpage(struct seg *seg) 8492 { 8493 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8494 struct vpage *vp, *evp; 8495 static pgcnt_t page_limit = 0; 8496 8497 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 8498 8499 /* 8500 * If no vpage structure exists, allocate one. Copy the protections 8501 * and the advice from the segment itself to the individual pages. 8502 */ 8503 if (svd->vpage == NULL) { 8504 /* 8505 * Start by calculating the number of pages we must allocate to 8506 * track the per-page vpage structs needs for this entire 8507 * segment. If we know now that it will require more than our 8508 * heuristic for the maximum amount of kmem we can consume then 8509 * fail. We do this here, instead of trying to detect this deep 8510 * in page_resv and propagating the error up, since the entire 8511 * memory allocation stack is not amenable to passing this 8512 * back. Instead, it wants to keep trying. 8513 * 8514 * As a heuristic we set a page limit of 5/8s of total_pages 8515 * for this allocation. We use shifts so that no floating 8516 * point conversion takes place and only need to do the 8517 * calculation once. 8518 */ 8519 ulong_t mem_needed = seg_pages(seg) * sizeof (struct vpage); 8520 pgcnt_t npages = mem_needed >> PAGESHIFT; 8521 8522 if (page_limit == 0) 8523 page_limit = (total_pages >> 1) + (total_pages >> 3); 8524 8525 if (npages > page_limit) 8526 return; 8527 8528 svd->pageadvice = 1; 8529 svd->vpage = kmem_zalloc(mem_needed, KM_SLEEP); 8530 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 8531 for (vp = svd->vpage; vp < evp; vp++) { 8532 VPP_SETPROT(vp, svd->prot); 8533 VPP_SETADVICE(vp, svd->advice); 8534 } 8535 } 8536 } 8537 8538 /* 8539 * Dump the pages belonging to this segvn segment. 8540 */ 8541 static void 8542 segvn_dump(struct seg *seg) 8543 { 8544 struct segvn_data *svd; 8545 page_t *pp; 8546 struct anon_map *amp; 8547 ulong_t anon_index; 8548 struct vnode *vp; 8549 u_offset_t off, offset; 8550 pfn_t pfn; 8551 pgcnt_t page, npages; 8552 caddr_t addr; 8553 8554 npages = seg_pages(seg); 8555 svd = (struct segvn_data *)seg->s_data; 8556 vp = svd->vp; 8557 off = offset = svd->offset; 8558 addr = seg->s_base; 8559 8560 if ((amp = svd->amp) != NULL) { 8561 anon_index = svd->anon_index; 8562 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8563 } 8564 8565 for (page = 0; page < npages; page++, offset += PAGESIZE) { 8566 struct anon *ap; 8567 int we_own_it = 0; 8568 8569 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) { 8570 swap_xlate_nopanic(ap, &vp, &off); 8571 } else { 8572 vp = svd->vp; 8573 off = offset; 8574 } 8575 8576 /* 8577 * If pp == NULL, the page either does not exist 8578 * or is exclusively locked. So determine if it 8579 * exists before searching for it. 8580 */ 8581 8582 if ((pp = page_lookup_nowait(vp, off, SE_SHARED))) 8583 we_own_it = 1; 8584 else 8585 pp = page_exists(vp, off); 8586 8587 if (pp) { 8588 pfn = page_pptonum(pp); 8589 dump_addpage(seg->s_as, addr, pfn); 8590 if (we_own_it) 8591 page_unlock(pp); 8592 } 8593 addr += PAGESIZE; 8594 dump_timeleft = dump_timeout; 8595 } 8596 8597 if (amp != NULL) 8598 ANON_LOCK_EXIT(&->a_rwlock); 8599 } 8600 8601 #ifdef DEBUG 8602 static uint32_t segvn_pglock_mtbf = 0; 8603 #endif 8604 8605 #define PCACHE_SHWLIST ((page_t *)-2) 8606 #define NOPCACHE_SHWLIST ((page_t *)-1) 8607 8608 /* 8609 * Lock/Unlock anon pages over a given range. Return shadow list. This routine 8610 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages 8611 * to avoid the overhead of per page locking, unlocking for subsequent IOs to 8612 * the same parts of the segment. Currently shadow list creation is only 8613 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are 8614 * tagged with segment pointer, starting virtual address and length. This 8615 * approach for MAP_SHARED segments may add many pcache entries for the same 8616 * set of pages and lead to long hash chains that decrease pcache lookup 8617 * performance. To avoid this issue for shared segments shared anon map and 8618 * starting anon index are used for pcache entry tagging. This allows all 8619 * segments to share pcache entries for the same anon range and reduces pcache 8620 * chain's length as well as memory overhead from duplicate shadow lists and 8621 * pcache entries. 8622 * 8623 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd 8624 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock 8625 * part of softlockcnt accounting is done differently for private and shared 8626 * segments. In private segment case softlock is only incremented when a new 8627 * shadow list is created but not when an existing one is found via 8628 * seg_plookup(). pcache entries have reference count incremented/decremented 8629 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0 8630 * reference count can be purged (and purging is needed before segment can be 8631 * freed). When a private segment pcache entry is purged segvn_reclaim() will 8632 * decrement softlockcnt. Since in private segment case each of its pcache 8633 * entries only belongs to this segment we can expect that when 8634 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this 8635 * segment purge will succeed and softlockcnt will drop to 0. In shared 8636 * segment case reference count in pcache entry counts active locks from many 8637 * different segments so we can't expect segment purging to succeed even when 8638 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this 8639 * segment. To be able to determine when there're no pending pagelocks in 8640 * shared segment case we don't rely on purging to make softlockcnt drop to 0 8641 * but instead softlockcnt is incremented and decremented for every 8642 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow 8643 * list was created or an existing one was found. When softlockcnt drops to 0 8644 * this segment no longer has any claims for pcached shadow lists and the 8645 * segment can be freed even if there're still active pcache entries 8646 * shared by this segment anon map. Shared segment pcache entries belong to 8647 * anon map and are typically removed when anon map is freed after all 8648 * processes destroy the segments that use this anon map. 8649 */ 8650 static int 8651 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp, 8652 enum lock_type type, enum seg_rw rw) 8653 { 8654 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8655 size_t np; 8656 pgcnt_t adjustpages; 8657 pgcnt_t npages; 8658 ulong_t anon_index; 8659 uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE; 8660 uint_t error; 8661 struct anon_map *amp; 8662 pgcnt_t anpgcnt; 8663 struct page **pplist, **pl, *pp; 8664 caddr_t a; 8665 size_t page; 8666 caddr_t lpgaddr, lpgeaddr; 8667 anon_sync_obj_t cookie; 8668 int anlock; 8669 struct anon_map *pamp; 8670 caddr_t paddr; 8671 seg_preclaim_cbfunc_t preclaim_callback; 8672 size_t pgsz; 8673 int use_pcache; 8674 size_t wlen; 8675 uint_t pflags = 0; 8676 int sftlck_sbase = 0; 8677 int sftlck_send = 0; 8678 8679 #ifdef DEBUG 8680 if (type == L_PAGELOCK && segvn_pglock_mtbf) { 8681 hrtime_t ts = gethrtime(); 8682 if ((ts % segvn_pglock_mtbf) == 0) { 8683 return (ENOTSUP); 8684 } 8685 if ((ts % segvn_pglock_mtbf) == 1) { 8686 return (EFAULT); 8687 } 8688 } 8689 #endif 8690 8691 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START, 8692 "segvn_pagelock: start seg %p addr %p", seg, addr); 8693 8694 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8695 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); 8696 8697 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8698 8699 /* 8700 * for now we only support pagelock to anon memory. We would have to 8701 * check protections for vnode objects and call into the vnode driver. 8702 * That's too much for a fast path. Let the fault entry point handle 8703 * it. 8704 */ 8705 if (svd->vp != NULL) { 8706 if (type == L_PAGELOCK) { 8707 error = ENOTSUP; 8708 goto out; 8709 } 8710 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL"); 8711 } 8712 if ((amp = svd->amp) == NULL) { 8713 if (type == L_PAGELOCK) { 8714 error = EFAULT; 8715 goto out; 8716 } 8717 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL"); 8718 } 8719 if (rw != S_READ && rw != S_WRITE) { 8720 if (type == L_PAGELOCK) { 8721 error = ENOTSUP; 8722 goto out; 8723 } 8724 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw"); 8725 } 8726 8727 if (seg->s_szc != 0) { 8728 /* 8729 * We are adjusting the pagelock region to the large page size 8730 * boundary because the unlocked part of a large page cannot 8731 * be freed anyway unless all constituent pages of a large 8732 * page are locked. Bigger regions reduce pcache chain length 8733 * and improve lookup performance. The tradeoff is that the 8734 * very first segvn_pagelock() call for a given page is more 8735 * expensive if only 1 page_t is needed for IO. This is only 8736 * an issue if pcache entry doesn't get reused by several 8737 * subsequent calls. We optimize here for the case when pcache 8738 * is heavily used by repeated IOs to the same address range. 8739 * 8740 * Note segment's page size cannot change while we are holding 8741 * as lock. And then it cannot change while softlockcnt is 8742 * not 0. This will allow us to correctly recalculate large 8743 * page size region for the matching pageunlock/reclaim call 8744 * since as_pageunlock() caller must always match 8745 * as_pagelock() call's addr and len. 8746 * 8747 * For pageunlock *ppp points to the pointer of page_t that 8748 * corresponds to the real unadjusted start address. Similar 8749 * for pagelock *ppp must point to the pointer of page_t that 8750 * corresponds to the real unadjusted start address. 8751 */ 8752 pgsz = page_get_pagesize(seg->s_szc); 8753 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 8754 adjustpages = btop((uintptr_t)(addr - lpgaddr)); 8755 } else if (len < segvn_pglock_comb_thrshld) { 8756 lpgaddr = addr; 8757 lpgeaddr = addr + len; 8758 adjustpages = 0; 8759 pgsz = PAGESIZE; 8760 } else { 8761 /* 8762 * Align the address range of large enough requests to allow 8763 * combining of different shadow lists into 1 to reduce memory 8764 * overhead from potentially overlapping large shadow lists 8765 * (worst case is we have a 1MB IO into buffers with start 8766 * addresses separated by 4K). Alignment is only possible if 8767 * padded chunks have sufficient access permissions. Note 8768 * permissions won't change between L_PAGELOCK and 8769 * L_PAGEUNLOCK calls since non 0 softlockcnt will force 8770 * segvn_setprot() to wait until softlockcnt drops to 0. This 8771 * allows us to determine in L_PAGEUNLOCK the same range we 8772 * computed in L_PAGELOCK. 8773 * 8774 * If alignment is limited by segment ends set 8775 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when 8776 * these flags are set bump softlockcnt_sbase/softlockcnt_send 8777 * per segment counters. In L_PAGEUNLOCK case decrease 8778 * softlockcnt_sbase/softlockcnt_send counters if 8779 * sftlck_sbase/sftlck_send flags are set. When 8780 * softlockcnt_sbase/softlockcnt_send are non 0 8781 * segvn_concat()/segvn_extend_prev()/segvn_extend_next() 8782 * won't merge the segments. This restriction combined with 8783 * restriction on segment unmapping and splitting for segments 8784 * that have non 0 softlockcnt allows L_PAGEUNLOCK to 8785 * correctly determine the same range that was previously 8786 * locked by matching L_PAGELOCK. 8787 */ 8788 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16); 8789 pgsz = PAGESIZE; 8790 if (svd->type == MAP_PRIVATE) { 8791 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr, 8792 segvn_pglock_comb_balign); 8793 if (lpgaddr < seg->s_base) { 8794 lpgaddr = seg->s_base; 8795 sftlck_sbase = 1; 8796 } 8797 } else { 8798 ulong_t aix = svd->anon_index + seg_page(seg, addr); 8799 ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign); 8800 if (aaix < svd->anon_index) { 8801 lpgaddr = seg->s_base; 8802 sftlck_sbase = 1; 8803 } else { 8804 lpgaddr = addr - ptob(aix - aaix); 8805 ASSERT(lpgaddr >= seg->s_base); 8806 } 8807 } 8808 if (svd->pageprot && lpgaddr != addr) { 8809 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)]; 8810 struct vpage *evp = &svd->vpage[seg_page(seg, addr)]; 8811 while (vp < evp) { 8812 if ((VPP_PROT(vp) & protchk) == 0) { 8813 break; 8814 } 8815 vp++; 8816 } 8817 if (vp < evp) { 8818 lpgaddr = addr; 8819 pflags = 0; 8820 } 8821 } 8822 lpgeaddr = addr + len; 8823 if (pflags) { 8824 if (svd->type == MAP_PRIVATE) { 8825 lpgeaddr = (caddr_t)P2ROUNDUP( 8826 (uintptr_t)lpgeaddr, 8827 segvn_pglock_comb_balign); 8828 } else { 8829 ulong_t aix = svd->anon_index + 8830 seg_page(seg, lpgeaddr); 8831 ulong_t aaix = P2ROUNDUP(aix, 8832 segvn_pglock_comb_palign); 8833 if (aaix < aix) { 8834 lpgeaddr = 0; 8835 } else { 8836 lpgeaddr += ptob(aaix - aix); 8837 } 8838 } 8839 if (lpgeaddr == 0 || 8840 lpgeaddr > seg->s_base + seg->s_size) { 8841 lpgeaddr = seg->s_base + seg->s_size; 8842 sftlck_send = 1; 8843 } 8844 } 8845 if (svd->pageprot && lpgeaddr != addr + len) { 8846 struct vpage *vp; 8847 struct vpage *evp; 8848 8849 vp = &svd->vpage[seg_page(seg, addr + len)]; 8850 evp = &svd->vpage[seg_page(seg, lpgeaddr)]; 8851 8852 while (vp < evp) { 8853 if ((VPP_PROT(vp) & protchk) == 0) { 8854 break; 8855 } 8856 vp++; 8857 } 8858 if (vp < evp) { 8859 lpgeaddr = addr + len; 8860 } 8861 } 8862 adjustpages = btop((uintptr_t)(addr - lpgaddr)); 8863 } 8864 8865 /* 8866 * For MAP_SHARED segments we create pcache entries tagged by amp and 8867 * anon index so that we can share pcache entries with other segments 8868 * that map this amp. For private segments pcache entries are tagged 8869 * with segment and virtual address. 8870 */ 8871 if (svd->type == MAP_SHARED) { 8872 pamp = amp; 8873 paddr = (caddr_t)((lpgaddr - seg->s_base) + 8874 ptob(svd->anon_index)); 8875 preclaim_callback = shamp_reclaim; 8876 } else { 8877 pamp = NULL; 8878 paddr = lpgaddr; 8879 preclaim_callback = segvn_reclaim; 8880 } 8881 8882 if (type == L_PAGEUNLOCK) { 8883 VM_STAT_ADD(segvnvmstats.pagelock[0]); 8884 8885 /* 8886 * update hat ref bits for /proc. We need to make sure 8887 * that threads tracing the ref and mod bits of the 8888 * address space get the right data. 8889 * Note: page ref and mod bits are updated at reclaim time 8890 */ 8891 if (seg->s_as->a_vbits) { 8892 for (a = addr; a < addr + len; a += PAGESIZE) { 8893 if (rw == S_WRITE) { 8894 hat_setstat(seg->s_as, a, 8895 PAGESIZE, P_REF | P_MOD); 8896 } else { 8897 hat_setstat(seg->s_as, a, 8898 PAGESIZE, P_REF); 8899 } 8900 } 8901 } 8902 8903 /* 8904 * Check the shadow list entry after the last page used in 8905 * this IO request. If it's NOPCACHE_SHWLIST the shadow list 8906 * was not inserted into pcache and is not large page 8907 * adjusted. In this case call reclaim callback directly and 8908 * don't adjust the shadow list start and size for large 8909 * pages. 8910 */ 8911 npages = btop(len); 8912 if ((*ppp)[npages] == NOPCACHE_SHWLIST) { 8913 void *ptag; 8914 if (pamp != NULL) { 8915 ASSERT(svd->type == MAP_SHARED); 8916 ptag = (void *)pamp; 8917 paddr = (caddr_t)((addr - seg->s_base) + 8918 ptob(svd->anon_index)); 8919 } else { 8920 ptag = (void *)seg; 8921 paddr = addr; 8922 } 8923 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0); 8924 } else { 8925 ASSERT((*ppp)[npages] == PCACHE_SHWLIST || 8926 IS_SWAPFSVP((*ppp)[npages]->p_vnode)); 8927 len = lpgeaddr - lpgaddr; 8928 npages = btop(len); 8929 seg_pinactive(seg, pamp, paddr, len, 8930 *ppp - adjustpages, rw, pflags, preclaim_callback); 8931 } 8932 8933 if (pamp != NULL) { 8934 ASSERT(svd->type == MAP_SHARED); 8935 ASSERT(svd->softlockcnt >= npages); 8936 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages); 8937 } 8938 8939 if (sftlck_sbase) { 8940 ASSERT(svd->softlockcnt_sbase > 0); 8941 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_sbase); 8942 } 8943 if (sftlck_send) { 8944 ASSERT(svd->softlockcnt_send > 0); 8945 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_send); 8946 } 8947 8948 /* 8949 * If someone is blocked while unmapping, we purge 8950 * segment page cache and thus reclaim pplist synchronously 8951 * without waiting for seg_pasync_thread. This speeds up 8952 * unmapping in cases where munmap(2) is called, while 8953 * raw async i/o is still in progress or where a thread 8954 * exits on data fault in a multithreaded application. 8955 */ 8956 if (AS_ISUNMAPWAIT(seg->s_as)) { 8957 if (svd->softlockcnt == 0) { 8958 mutex_enter(&seg->s_as->a_contents); 8959 if (AS_ISUNMAPWAIT(seg->s_as)) { 8960 AS_CLRUNMAPWAIT(seg->s_as); 8961 cv_broadcast(&seg->s_as->a_cv); 8962 } 8963 mutex_exit(&seg->s_as->a_contents); 8964 } else if (pamp == NULL) { 8965 /* 8966 * softlockcnt is not 0 and this is a 8967 * MAP_PRIVATE segment. Try to purge its 8968 * pcache entries to reduce softlockcnt. 8969 * If it drops to 0 segvn_reclaim() 8970 * will wake up a thread waiting on 8971 * unmapwait flag. 8972 * 8973 * We don't purge MAP_SHARED segments with non 8974 * 0 softlockcnt since IO is still in progress 8975 * for such segments. 8976 */ 8977 ASSERT(svd->type == MAP_PRIVATE); 8978 segvn_purge(seg); 8979 } 8980 } 8981 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8982 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END, 8983 "segvn_pagelock: unlock seg %p addr %p", seg, addr); 8984 return (0); 8985 } 8986 8987 /* The L_PAGELOCK case ... */ 8988 8989 VM_STAT_ADD(segvnvmstats.pagelock[1]); 8990 8991 /* 8992 * For MAP_SHARED segments we have to check protections before 8993 * seg_plookup() since pcache entries may be shared by many segments 8994 * with potentially different page protections. 8995 */ 8996 if (pamp != NULL) { 8997 ASSERT(svd->type == MAP_SHARED); 8998 if (svd->pageprot == 0) { 8999 if ((svd->prot & protchk) == 0) { 9000 error = EACCES; 9001 goto out; 9002 } 9003 } else { 9004 /* 9005 * check page protections 9006 */ 9007 caddr_t ea; 9008 9009 if (seg->s_szc) { 9010 a = lpgaddr; 9011 ea = lpgeaddr; 9012 } else { 9013 a = addr; 9014 ea = addr + len; 9015 } 9016 for (; a < ea; a += pgsz) { 9017 struct vpage *vp; 9018 9019 ASSERT(seg->s_szc == 0 || 9020 sameprot(seg, a, pgsz)); 9021 vp = &svd->vpage[seg_page(seg, a)]; 9022 if ((VPP_PROT(vp) & protchk) == 0) { 9023 error = EACCES; 9024 goto out; 9025 } 9026 } 9027 } 9028 } 9029 9030 /* 9031 * try to find pages in segment page cache 9032 */ 9033 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags); 9034 if (pplist != NULL) { 9035 if (pamp != NULL) { 9036 npages = btop((uintptr_t)(lpgeaddr - lpgaddr)); 9037 ASSERT(svd->type == MAP_SHARED); 9038 atomic_add_long((ulong_t *)&svd->softlockcnt, 9039 npages); 9040 } 9041 if (sftlck_sbase) { 9042 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase); 9043 } 9044 if (sftlck_send) { 9045 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send); 9046 } 9047 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9048 *ppp = pplist + adjustpages; 9049 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END, 9050 "segvn_pagelock: cache hit seg %p addr %p", seg, addr); 9051 return (0); 9052 } 9053 9054 /* 9055 * For MAP_SHARED segments we already verified above that segment 9056 * protections allow this pagelock operation. 9057 */ 9058 if (pamp == NULL) { 9059 ASSERT(svd->type == MAP_PRIVATE); 9060 if (svd->pageprot == 0) { 9061 if ((svd->prot & protchk) == 0) { 9062 error = EACCES; 9063 goto out; 9064 } 9065 if (svd->prot & PROT_WRITE) { 9066 wlen = lpgeaddr - lpgaddr; 9067 } else { 9068 wlen = 0; 9069 ASSERT(rw == S_READ); 9070 } 9071 } else { 9072 int wcont = 1; 9073 /* 9074 * check page protections 9075 */ 9076 for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) { 9077 struct vpage *vp; 9078 9079 ASSERT(seg->s_szc == 0 || 9080 sameprot(seg, a, pgsz)); 9081 vp = &svd->vpage[seg_page(seg, a)]; 9082 if ((VPP_PROT(vp) & protchk) == 0) { 9083 error = EACCES; 9084 goto out; 9085 } 9086 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) { 9087 wlen += pgsz; 9088 } else { 9089 wcont = 0; 9090 ASSERT(rw == S_READ); 9091 } 9092 } 9093 } 9094 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr); 9095 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr); 9096 } 9097 9098 /* 9099 * Only build large page adjusted shadow list if we expect to insert 9100 * it into pcache. For large enough pages it's a big overhead to 9101 * create a shadow list of the entire large page. But this overhead 9102 * should be amortized over repeated pcache hits on subsequent reuse 9103 * of this shadow list (IO into any range within this shadow list will 9104 * find it in pcache since we large page align the request for pcache 9105 * lookups). pcache performance is improved with bigger shadow lists 9106 * as it reduces the time to pcache the entire big segment and reduces 9107 * pcache chain length. 9108 */ 9109 if (seg_pinsert_check(seg, pamp, paddr, 9110 lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) { 9111 addr = lpgaddr; 9112 len = lpgeaddr - lpgaddr; 9113 use_pcache = 1; 9114 } else { 9115 use_pcache = 0; 9116 /* 9117 * Since this entry will not be inserted into the pcache, we 9118 * will not do any adjustments to the starting address or 9119 * size of the memory to be locked. 9120 */ 9121 adjustpages = 0; 9122 } 9123 npages = btop(len); 9124 9125 pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP); 9126 pl = pplist; 9127 *ppp = pplist + adjustpages; 9128 /* 9129 * If use_pcache is 0 this shadow list is not large page adjusted. 9130 * Record this info in the last entry of shadow array so that 9131 * L_PAGEUNLOCK can determine if it should large page adjust the 9132 * address range to find the real range that was locked. 9133 */ 9134 pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST; 9135 9136 page = seg_page(seg, addr); 9137 anon_index = svd->anon_index + page; 9138 9139 anlock = 0; 9140 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 9141 ASSERT(amp->a_szc >= seg->s_szc); 9142 anpgcnt = page_get_pagecnt(amp->a_szc); 9143 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) { 9144 struct anon *ap; 9145 struct vnode *vp; 9146 u_offset_t off; 9147 9148 /* 9149 * Lock and unlock anon array only once per large page. 9150 * anon_array_enter() locks the root anon slot according to 9151 * a_szc which can't change while anon map is locked. We lock 9152 * anon the first time through this loop and each time we 9153 * reach anon index that corresponds to a root of a large 9154 * page. 9155 */ 9156 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) { 9157 ASSERT(anlock == 0); 9158 anon_array_enter(amp, anon_index, &cookie); 9159 anlock = 1; 9160 } 9161 ap = anon_get_ptr(amp->ahp, anon_index); 9162 9163 /* 9164 * We must never use seg_pcache for COW pages 9165 * because we might end up with original page still 9166 * lying in seg_pcache even after private page is 9167 * created. This leads to data corruption as 9168 * aio_write refers to the page still in cache 9169 * while all other accesses refer to the private 9170 * page. 9171 */ 9172 if (ap == NULL || ap->an_refcnt != 1) { 9173 struct vpage *vpage; 9174 9175 if (seg->s_szc) { 9176 error = EFAULT; 9177 break; 9178 } 9179 if (svd->vpage != NULL) { 9180 vpage = &svd->vpage[seg_page(seg, a)]; 9181 } else { 9182 vpage = NULL; 9183 } 9184 ASSERT(anlock); 9185 anon_array_exit(&cookie); 9186 anlock = 0; 9187 pp = NULL; 9188 error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0, 9189 vpage, &pp, 0, F_INVAL, rw, 1); 9190 if (error) { 9191 error = fc_decode(error); 9192 break; 9193 } 9194 anon_array_enter(amp, anon_index, &cookie); 9195 anlock = 1; 9196 ap = anon_get_ptr(amp->ahp, anon_index); 9197 if (ap == NULL || ap->an_refcnt != 1) { 9198 error = EFAULT; 9199 break; 9200 } 9201 } 9202 swap_xlate(ap, &vp, &off); 9203 pp = page_lookup_nowait(vp, off, SE_SHARED); 9204 if (pp == NULL) { 9205 error = EFAULT; 9206 break; 9207 } 9208 if (ap->an_pvp != NULL) { 9209 anon_swap_free(ap, pp); 9210 } 9211 /* 9212 * Unlock anon if this is the last slot in a large page. 9213 */ 9214 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) { 9215 ASSERT(anlock); 9216 anon_array_exit(&cookie); 9217 anlock = 0; 9218 } 9219 *pplist++ = pp; 9220 } 9221 if (anlock) { /* Ensure the lock is dropped */ 9222 anon_array_exit(&cookie); 9223 } 9224 ANON_LOCK_EXIT(&->a_rwlock); 9225 9226 if (a >= addr + len) { 9227 atomic_add_long((ulong_t *)&svd->softlockcnt, npages); 9228 if (pamp != NULL) { 9229 ASSERT(svd->type == MAP_SHARED); 9230 atomic_add_long((ulong_t *)&pamp->a_softlockcnt, 9231 npages); 9232 wlen = len; 9233 } 9234 if (sftlck_sbase) { 9235 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase); 9236 } 9237 if (sftlck_send) { 9238 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send); 9239 } 9240 if (use_pcache) { 9241 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl, 9242 rw, pflags, preclaim_callback); 9243 } 9244 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9245 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END, 9246 "segvn_pagelock: cache fill seg %p addr %p", seg, addr); 9247 return (0); 9248 } 9249 9250 pplist = pl; 9251 np = ((uintptr_t)(a - addr)) >> PAGESHIFT; 9252 while (np > (uint_t)0) { 9253 ASSERT(PAGE_LOCKED(*pplist)); 9254 page_unlock(*pplist); 9255 np--; 9256 pplist++; 9257 } 9258 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9259 out: 9260 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9261 *ppp = NULL; 9262 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END, 9263 "segvn_pagelock: cache miss seg %p addr %p", seg, addr); 9264 return (error); 9265 } 9266 9267 /* 9268 * purge any cached pages in the I/O page cache 9269 */ 9270 static void 9271 segvn_purge(struct seg *seg) 9272 { 9273 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9274 9275 /* 9276 * pcache is only used by pure anon segments. 9277 */ 9278 if (svd->amp == NULL || svd->vp != NULL) { 9279 return; 9280 } 9281 9282 /* 9283 * For MAP_SHARED segments non 0 segment's softlockcnt means 9284 * active IO is still in progress via this segment. So we only 9285 * purge MAP_SHARED segments when their softlockcnt is 0. 9286 */ 9287 if (svd->type == MAP_PRIVATE) { 9288 if (svd->softlockcnt) { 9289 seg_ppurge(seg, NULL, 0); 9290 } 9291 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) { 9292 seg_ppurge(seg, svd->amp, 0); 9293 } 9294 } 9295 9296 /* 9297 * If async argument is not 0 we are called from pcache async thread and don't 9298 * hold AS lock. 9299 */ 9300 9301 /*ARGSUSED*/ 9302 static int 9303 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 9304 enum seg_rw rw, int async) 9305 { 9306 struct seg *seg = (struct seg *)ptag; 9307 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9308 pgcnt_t np, npages; 9309 struct page **pl; 9310 9311 npages = np = btop(len); 9312 ASSERT(npages); 9313 9314 ASSERT(svd->vp == NULL && svd->amp != NULL); 9315 ASSERT(svd->softlockcnt >= npages); 9316 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9317 9318 pl = pplist; 9319 9320 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST); 9321 ASSERT(!async || pl[np] == PCACHE_SHWLIST); 9322 9323 while (np > (uint_t)0) { 9324 if (rw == S_WRITE) { 9325 hat_setrefmod(*pplist); 9326 } else { 9327 hat_setref(*pplist); 9328 } 9329 page_unlock(*pplist); 9330 np--; 9331 pplist++; 9332 } 9333 9334 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9335 9336 /* 9337 * If we are pcache async thread we don't hold AS lock. This means if 9338 * softlockcnt drops to 0 after the decrement below address space may 9339 * get freed. We can't allow it since after softlock derement to 0 we 9340 * still need to access as structure for possible wakeup of unmap 9341 * waiters. To prevent the disappearance of as we take this segment 9342 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to 9343 * make sure this routine completes before segment is freed. 9344 * 9345 * The second complication we have to deal with in async case is a 9346 * possibility of missed wake up of unmap wait thread. When we don't 9347 * hold as lock here we may take a_contents lock before unmap wait 9348 * thread that was first to see softlockcnt was still not 0. As a 9349 * result we'll fail to wake up an unmap wait thread. To avoid this 9350 * race we set nounmapwait flag in as structure if we drop softlockcnt 9351 * to 0 when we were called by pcache async thread. unmapwait thread 9352 * will not block if this flag is set. 9353 */ 9354 if (async) { 9355 mutex_enter(&svd->segfree_syncmtx); 9356 } 9357 9358 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) { 9359 if (async || AS_ISUNMAPWAIT(seg->s_as)) { 9360 mutex_enter(&seg->s_as->a_contents); 9361 if (async) { 9362 AS_SETNOUNMAPWAIT(seg->s_as); 9363 } 9364 if (AS_ISUNMAPWAIT(seg->s_as)) { 9365 AS_CLRUNMAPWAIT(seg->s_as); 9366 cv_broadcast(&seg->s_as->a_cv); 9367 } 9368 mutex_exit(&seg->s_as->a_contents); 9369 } 9370 } 9371 9372 if (async) { 9373 mutex_exit(&svd->segfree_syncmtx); 9374 } 9375 return (0); 9376 } 9377 9378 /*ARGSUSED*/ 9379 static int 9380 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 9381 enum seg_rw rw, int async) 9382 { 9383 amp_t *amp = (amp_t *)ptag; 9384 pgcnt_t np, npages; 9385 struct page **pl; 9386 9387 npages = np = btop(len); 9388 ASSERT(npages); 9389 ASSERT(amp->a_softlockcnt >= npages); 9390 9391 pl = pplist; 9392 9393 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST); 9394 ASSERT(!async || pl[np] == PCACHE_SHWLIST); 9395 9396 while (np > (uint_t)0) { 9397 if (rw == S_WRITE) { 9398 hat_setrefmod(*pplist); 9399 } else { 9400 hat_setref(*pplist); 9401 } 9402 page_unlock(*pplist); 9403 np--; 9404 pplist++; 9405 } 9406 9407 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9408 9409 /* 9410 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt 9411 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0 9412 * and anonmap_purge() acquires a_purgemtx. 9413 */ 9414 mutex_enter(&->a_purgemtx); 9415 if (!atomic_add_long_nv((ulong_t *)&->a_softlockcnt, -npages) && 9416 amp->a_purgewait) { 9417 amp->a_purgewait = 0; 9418 cv_broadcast(&->a_purgecv); 9419 } 9420 mutex_exit(&->a_purgemtx); 9421 return (0); 9422 } 9423 9424 /* 9425 * get a memory ID for an addr in a given segment 9426 * 9427 * XXX only creates PAGESIZE pages if anon slots are not initialized. 9428 * At fault time they will be relocated into larger pages. 9429 */ 9430 static int 9431 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 9432 { 9433 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9434 struct anon *ap = NULL; 9435 ulong_t anon_index; 9436 struct anon_map *amp; 9437 anon_sync_obj_t cookie; 9438 9439 if (svd->type == MAP_PRIVATE) { 9440 memidp->val[0] = (uintptr_t)seg->s_as; 9441 memidp->val[1] = (uintptr_t)addr; 9442 return (0); 9443 } 9444 9445 if (svd->type == MAP_SHARED) { 9446 if (svd->vp) { 9447 memidp->val[0] = (uintptr_t)svd->vp; 9448 memidp->val[1] = (u_longlong_t)svd->offset + 9449 (uintptr_t)(addr - seg->s_base); 9450 return (0); 9451 } else { 9452 9453 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 9454 if ((amp = svd->amp) != NULL) { 9455 anon_index = svd->anon_index + 9456 seg_page(seg, addr); 9457 } 9458 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9459 9460 ASSERT(amp != NULL); 9461 9462 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 9463 anon_array_enter(amp, anon_index, &cookie); 9464 ap = anon_get_ptr(amp->ahp, anon_index); 9465 if (ap == NULL) { 9466 page_t *pp; 9467 9468 pp = anon_zero(seg, addr, &ap, svd->cred); 9469 if (pp == NULL) { 9470 anon_array_exit(&cookie); 9471 ANON_LOCK_EXIT(&->a_rwlock); 9472 return (ENOMEM); 9473 } 9474 ASSERT(anon_get_ptr(amp->ahp, anon_index) 9475 == NULL); 9476 (void) anon_set_ptr(amp->ahp, anon_index, 9477 ap, ANON_SLEEP); 9478 page_unlock(pp); 9479 } 9480 9481 anon_array_exit(&cookie); 9482 ANON_LOCK_EXIT(&->a_rwlock); 9483 9484 memidp->val[0] = (uintptr_t)ap; 9485 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 9486 return (0); 9487 } 9488 } 9489 return (EINVAL); 9490 } 9491 9492 static int 9493 sameprot(struct seg *seg, caddr_t a, size_t len) 9494 { 9495 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9496 struct vpage *vpage; 9497 spgcnt_t pages = btop(len); 9498 uint_t prot; 9499 9500 if (svd->pageprot == 0) 9501 return (1); 9502 9503 ASSERT(svd->vpage != NULL); 9504 9505 vpage = &svd->vpage[seg_page(seg, a)]; 9506 prot = VPP_PROT(vpage); 9507 vpage++; 9508 pages--; 9509 while (pages-- > 0) { 9510 if (prot != VPP_PROT(vpage)) 9511 return (0); 9512 vpage++; 9513 } 9514 return (1); 9515 } 9516 9517 /* 9518 * Get memory allocation policy info for specified address in given segment 9519 */ 9520 static lgrp_mem_policy_info_t * 9521 segvn_getpolicy(struct seg *seg, caddr_t addr) 9522 { 9523 struct anon_map *amp; 9524 ulong_t anon_index; 9525 lgrp_mem_policy_info_t *policy_info; 9526 struct segvn_data *svn_data; 9527 u_offset_t vn_off; 9528 vnode_t *vp; 9529 9530 ASSERT(seg != NULL); 9531 9532 svn_data = (struct segvn_data *)seg->s_data; 9533 if (svn_data == NULL) 9534 return (NULL); 9535 9536 /* 9537 * Get policy info for private or shared memory 9538 */ 9539 if (svn_data->type != MAP_SHARED) { 9540 if (svn_data->tr_state != SEGVN_TR_ON) { 9541 policy_info = &svn_data->policy_info; 9542 } else { 9543 policy_info = &svn_data->tr_policy_info; 9544 ASSERT(policy_info->mem_policy == 9545 LGRP_MEM_POLICY_NEXT_SEG); 9546 } 9547 } else { 9548 amp = svn_data->amp; 9549 anon_index = svn_data->anon_index + seg_page(seg, addr); 9550 vp = svn_data->vp; 9551 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base); 9552 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off); 9553 } 9554 9555 return (policy_info); 9556 } 9557 9558 /*ARGSUSED*/ 9559 static int 9560 segvn_capable(struct seg *seg, segcapability_t capability) 9561 { 9562 return (0); 9563 } 9564 9565 /* 9566 * Bind text vnode segment to an amp. If we bind successfully mappings will be 9567 * established to per vnode mapping per lgroup amp pages instead of to vnode 9568 * pages. There's one amp per vnode text mapping per lgroup. Many processes 9569 * may share the same text replication amp. If a suitable amp doesn't already 9570 * exist in svntr hash table create a new one. We may fail to bind to amp if 9571 * segment is not eligible for text replication. Code below first checks for 9572 * these conditions. If binding is successful segment tr_state is set to on 9573 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and 9574 * svd->amp remains as NULL. 9575 */ 9576 static void 9577 segvn_textrepl(struct seg *seg) 9578 { 9579 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9580 vnode_t *vp = svd->vp; 9581 u_offset_t off = svd->offset; 9582 size_t size = seg->s_size; 9583 u_offset_t eoff = off + size; 9584 uint_t szc = seg->s_szc; 9585 ulong_t hash = SVNTR_HASH_FUNC(vp); 9586 svntr_t *svntrp; 9587 struct vattr va; 9588 proc_t *p = seg->s_as->a_proc; 9589 lgrp_id_t lgrp_id; 9590 lgrp_id_t olid; 9591 int first; 9592 struct anon_map *amp; 9593 9594 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9595 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 9596 ASSERT(p != NULL); 9597 ASSERT(svd->tr_state == SEGVN_TR_INIT); 9598 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9599 ASSERT(svd->flags & MAP_TEXT); 9600 ASSERT(svd->type == MAP_PRIVATE); 9601 ASSERT(vp != NULL && svd->amp == NULL); 9602 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 9603 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0); 9604 ASSERT(seg->s_as != &kas); 9605 ASSERT(off < eoff); 9606 ASSERT(svntr_hashtab != NULL); 9607 9608 /* 9609 * If numa optimizations are no longer desired bail out. 9610 */ 9611 if (!lgrp_optimizations()) { 9612 svd->tr_state = SEGVN_TR_OFF; 9613 return; 9614 } 9615 9616 /* 9617 * Avoid creating anon maps with size bigger than the file size. 9618 * If VOP_GETATTR() call fails bail out. 9619 */ 9620 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME; 9621 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) { 9622 svd->tr_state = SEGVN_TR_OFF; 9623 SEGVN_TR_ADDSTAT(gaerr); 9624 return; 9625 } 9626 if (btopr(va.va_size) < btopr(eoff)) { 9627 svd->tr_state = SEGVN_TR_OFF; 9628 SEGVN_TR_ADDSTAT(overmap); 9629 return; 9630 } 9631 9632 /* 9633 * VVMEXEC may not be set yet if exec() prefaults text segment. Set 9634 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED 9635 * mapping that checks if trcache for this vnode needs to be 9636 * invalidated can't miss us. 9637 */ 9638 if (!(vp->v_flag & VVMEXEC)) { 9639 mutex_enter(&vp->v_lock); 9640 vp->v_flag |= VVMEXEC; 9641 mutex_exit(&vp->v_lock); 9642 } 9643 mutex_enter(&svntr_hashtab[hash].tr_lock); 9644 /* 9645 * Bail out if potentially MAP_SHARED writable mappings exist to this 9646 * vnode. We don't want to use old file contents from existing 9647 * replicas if this mapping was established after the original file 9648 * was changed. 9649 */ 9650 if (vn_is_mapped(vp, V_WRITE)) { 9651 mutex_exit(&svntr_hashtab[hash].tr_lock); 9652 svd->tr_state = SEGVN_TR_OFF; 9653 SEGVN_TR_ADDSTAT(wrcnt); 9654 return; 9655 } 9656 svntrp = svntr_hashtab[hash].tr_head; 9657 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9658 ASSERT(svntrp->tr_refcnt != 0); 9659 if (svntrp->tr_vp != vp) { 9660 continue; 9661 } 9662 9663 /* 9664 * Bail out if the file or its attributes were changed after 9665 * this replication entry was created since we need to use the 9666 * latest file contents. Note that mtime test alone is not 9667 * sufficient because a user can explicitly change mtime via 9668 * utimes(2) interfaces back to the old value after modifiying 9669 * the file contents. To detect this case we also have to test 9670 * ctime which among other things records the time of the last 9671 * mtime change by utimes(2). ctime is not changed when the file 9672 * is only read or executed so we expect that typically existing 9673 * replication amp's can be used most of the time. 9674 */ 9675 if (!svntrp->tr_valid || 9676 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec || 9677 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec || 9678 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec || 9679 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) { 9680 mutex_exit(&svntr_hashtab[hash].tr_lock); 9681 svd->tr_state = SEGVN_TR_OFF; 9682 SEGVN_TR_ADDSTAT(stale); 9683 return; 9684 } 9685 /* 9686 * if off, eoff and szc match current segment we found the 9687 * existing entry we can use. 9688 */ 9689 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff && 9690 svntrp->tr_szc == szc) { 9691 break; 9692 } 9693 /* 9694 * Don't create different but overlapping in file offsets 9695 * entries to avoid replication of the same file pages more 9696 * than once per lgroup. 9697 */ 9698 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) || 9699 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) { 9700 mutex_exit(&svntr_hashtab[hash].tr_lock); 9701 svd->tr_state = SEGVN_TR_OFF; 9702 SEGVN_TR_ADDSTAT(overlap); 9703 return; 9704 } 9705 } 9706 /* 9707 * If we didn't find existing entry create a new one. 9708 */ 9709 if (svntrp == NULL) { 9710 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP); 9711 if (svntrp == NULL) { 9712 mutex_exit(&svntr_hashtab[hash].tr_lock); 9713 svd->tr_state = SEGVN_TR_OFF; 9714 SEGVN_TR_ADDSTAT(nokmem); 9715 return; 9716 } 9717 #ifdef DEBUG 9718 { 9719 lgrp_id_t i; 9720 for (i = 0; i < NLGRPS_MAX; i++) { 9721 ASSERT(svntrp->tr_amp[i] == NULL); 9722 } 9723 } 9724 #endif /* DEBUG */ 9725 svntrp->tr_vp = vp; 9726 svntrp->tr_off = off; 9727 svntrp->tr_eoff = eoff; 9728 svntrp->tr_szc = szc; 9729 svntrp->tr_valid = 1; 9730 svntrp->tr_mtime = va.va_mtime; 9731 svntrp->tr_ctime = va.va_ctime; 9732 svntrp->tr_refcnt = 0; 9733 svntrp->tr_next = svntr_hashtab[hash].tr_head; 9734 svntr_hashtab[hash].tr_head = svntrp; 9735 } 9736 first = 1; 9737 again: 9738 /* 9739 * We want to pick a replica with pages on main thread's (t_tid = 1, 9740 * aka T1) lgrp. Currently text replication is only optimized for 9741 * workloads that either have all threads of a process on the same 9742 * lgrp or execute their large text primarily on main thread. 9743 */ 9744 lgrp_id = p->p_t1_lgrpid; 9745 if (lgrp_id == LGRP_NONE) { 9746 /* 9747 * In case exec() prefaults text on non main thread use 9748 * current thread lgrpid. It will become main thread anyway 9749 * soon. 9750 */ 9751 lgrp_id = lgrp_home_id(curthread); 9752 } 9753 /* 9754 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise 9755 * just set it to NLGRPS_MAX if it's different from current process T1 9756 * home lgrp. p_tr_lgrpid is used to detect if process uses text 9757 * replication and T1 new home is different from lgrp used for text 9758 * replication. When this happens asyncronous segvn thread rechecks if 9759 * segments should change lgrps used for text replication. If we fail 9760 * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX 9761 * without cas if it's not already NLGRPS_MAX and not equal lgrp_id 9762 * we want to use. We don't need to use cas in this case because 9763 * another thread that races in between our non atomic check and set 9764 * may only change p_tr_lgrpid to NLGRPS_MAX at this point. 9765 */ 9766 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9767 olid = p->p_tr_lgrpid; 9768 if (lgrp_id != olid && olid != NLGRPS_MAX) { 9769 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX; 9770 if (atomic_cas_32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) != 9771 olid) { 9772 olid = p->p_tr_lgrpid; 9773 ASSERT(olid != LGRP_NONE); 9774 if (olid != lgrp_id && olid != NLGRPS_MAX) { 9775 p->p_tr_lgrpid = NLGRPS_MAX; 9776 } 9777 } 9778 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 9779 membar_producer(); 9780 /* 9781 * lgrp_move_thread() won't schedule async recheck after 9782 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not 9783 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid 9784 * is not LGRP_NONE. 9785 */ 9786 if (first && p->p_t1_lgrpid != LGRP_NONE && 9787 p->p_t1_lgrpid != lgrp_id) { 9788 first = 0; 9789 goto again; 9790 } 9791 } 9792 /* 9793 * If no amp was created yet for lgrp_id create a new one as long as 9794 * we have enough memory to afford it. 9795 */ 9796 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) { 9797 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 9798 if (trmem > segvn_textrepl_max_bytes) { 9799 SEGVN_TR_ADDSTAT(normem); 9800 goto fail; 9801 } 9802 if (anon_try_resv_zone(size, NULL) == 0) { 9803 SEGVN_TR_ADDSTAT(noanon); 9804 goto fail; 9805 } 9806 amp = anonmap_alloc(size, size, ANON_NOSLEEP); 9807 if (amp == NULL) { 9808 anon_unresv_zone(size, NULL); 9809 SEGVN_TR_ADDSTAT(nokmem); 9810 goto fail; 9811 } 9812 ASSERT(amp->refcnt == 1); 9813 amp->a_szc = szc; 9814 svntrp->tr_amp[lgrp_id] = amp; 9815 SEGVN_TR_ADDSTAT(newamp); 9816 } 9817 svntrp->tr_refcnt++; 9818 ASSERT(svd->svn_trnext == NULL); 9819 ASSERT(svd->svn_trprev == NULL); 9820 svd->svn_trnext = svntrp->tr_svnhead; 9821 svd->svn_trprev = NULL; 9822 if (svntrp->tr_svnhead != NULL) { 9823 svntrp->tr_svnhead->svn_trprev = svd; 9824 } 9825 svntrp->tr_svnhead = svd; 9826 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size); 9827 ASSERT(amp->refcnt >= 1); 9828 svd->amp = amp; 9829 svd->anon_index = 0; 9830 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG; 9831 svd->tr_policy_info.mem_lgrpid = lgrp_id; 9832 svd->tr_state = SEGVN_TR_ON; 9833 mutex_exit(&svntr_hashtab[hash].tr_lock); 9834 SEGVN_TR_ADDSTAT(repl); 9835 return; 9836 fail: 9837 ASSERT(segvn_textrepl_bytes >= size); 9838 atomic_add_long(&segvn_textrepl_bytes, -size); 9839 ASSERT(svntrp != NULL); 9840 ASSERT(svntrp->tr_amp[lgrp_id] == NULL); 9841 if (svntrp->tr_refcnt == 0) { 9842 ASSERT(svntrp == svntr_hashtab[hash].tr_head); 9843 svntr_hashtab[hash].tr_head = svntrp->tr_next; 9844 mutex_exit(&svntr_hashtab[hash].tr_lock); 9845 kmem_cache_free(svntr_cache, svntrp); 9846 } else { 9847 mutex_exit(&svntr_hashtab[hash].tr_lock); 9848 } 9849 svd->tr_state = SEGVN_TR_OFF; 9850 } 9851 9852 /* 9853 * Convert seg back to regular vnode mapping seg by unbinding it from its text 9854 * replication amp. This routine is most typically called when segment is 9855 * unmapped but can also be called when segment no longer qualifies for text 9856 * replication (e.g. due to protection changes). If unload_unmap is set use 9857 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of 9858 * svntr free all its anon maps and remove it from the hash table. 9859 */ 9860 static void 9861 segvn_textunrepl(struct seg *seg, int unload_unmap) 9862 { 9863 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9864 vnode_t *vp = svd->vp; 9865 u_offset_t off = svd->offset; 9866 size_t size = seg->s_size; 9867 u_offset_t eoff = off + size; 9868 uint_t szc = seg->s_szc; 9869 ulong_t hash = SVNTR_HASH_FUNC(vp); 9870 svntr_t *svntrp; 9871 svntr_t **prv_svntrp; 9872 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid; 9873 lgrp_id_t i; 9874 9875 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9876 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 9877 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 9878 ASSERT(svd->tr_state == SEGVN_TR_ON); 9879 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9880 ASSERT(svd->amp != NULL); 9881 ASSERT(svd->amp->refcnt >= 1); 9882 ASSERT(svd->anon_index == 0); 9883 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9884 ASSERT(svntr_hashtab != NULL); 9885 9886 mutex_enter(&svntr_hashtab[hash].tr_lock); 9887 prv_svntrp = &svntr_hashtab[hash].tr_head; 9888 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) { 9889 ASSERT(svntrp->tr_refcnt != 0); 9890 if (svntrp->tr_vp == vp && svntrp->tr_off == off && 9891 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) { 9892 break; 9893 } 9894 } 9895 if (svntrp == NULL) { 9896 panic("segvn_textunrepl: svntr record not found"); 9897 } 9898 if (svntrp->tr_amp[lgrp_id] != svd->amp) { 9899 panic("segvn_textunrepl: amp mismatch"); 9900 } 9901 svd->tr_state = SEGVN_TR_OFF; 9902 svd->amp = NULL; 9903 if (svd->svn_trprev == NULL) { 9904 ASSERT(svntrp->tr_svnhead == svd); 9905 svntrp->tr_svnhead = svd->svn_trnext; 9906 if (svntrp->tr_svnhead != NULL) { 9907 svntrp->tr_svnhead->svn_trprev = NULL; 9908 } 9909 svd->svn_trnext = NULL; 9910 } else { 9911 svd->svn_trprev->svn_trnext = svd->svn_trnext; 9912 if (svd->svn_trnext != NULL) { 9913 svd->svn_trnext->svn_trprev = svd->svn_trprev; 9914 svd->svn_trnext = NULL; 9915 } 9916 svd->svn_trprev = NULL; 9917 } 9918 if (--svntrp->tr_refcnt) { 9919 mutex_exit(&svntr_hashtab[hash].tr_lock); 9920 goto done; 9921 } 9922 *prv_svntrp = svntrp->tr_next; 9923 mutex_exit(&svntr_hashtab[hash].tr_lock); 9924 for (i = 0; i < NLGRPS_MAX; i++) { 9925 struct anon_map *amp = svntrp->tr_amp[i]; 9926 if (amp == NULL) { 9927 continue; 9928 } 9929 ASSERT(amp->refcnt == 1); 9930 ASSERT(amp->swresv == size); 9931 ASSERT(amp->size == size); 9932 ASSERT(amp->a_szc == szc); 9933 if (amp->a_szc != 0) { 9934 anon_free_pages(amp->ahp, 0, size, szc); 9935 } else { 9936 anon_free(amp->ahp, 0, size); 9937 } 9938 svntrp->tr_amp[i] = NULL; 9939 ASSERT(segvn_textrepl_bytes >= size); 9940 atomic_add_long(&segvn_textrepl_bytes, -size); 9941 anon_unresv_zone(amp->swresv, NULL); 9942 amp->refcnt = 0; 9943 anonmap_free(amp); 9944 } 9945 kmem_cache_free(svntr_cache, svntrp); 9946 done: 9947 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size, 9948 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL); 9949 } 9950 9951 /* 9952 * This is called when a MAP_SHARED writable mapping is created to a vnode 9953 * that is currently used for execution (VVMEXEC flag is set). In this case we 9954 * need to prevent further use of existing replicas. 9955 */ 9956 static void 9957 segvn_inval_trcache(vnode_t *vp) 9958 { 9959 ulong_t hash = SVNTR_HASH_FUNC(vp); 9960 svntr_t *svntrp; 9961 9962 ASSERT(vp->v_flag & VVMEXEC); 9963 9964 if (svntr_hashtab == NULL) { 9965 return; 9966 } 9967 9968 mutex_enter(&svntr_hashtab[hash].tr_lock); 9969 svntrp = svntr_hashtab[hash].tr_head; 9970 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9971 ASSERT(svntrp->tr_refcnt != 0); 9972 if (svntrp->tr_vp == vp && svntrp->tr_valid) { 9973 svntrp->tr_valid = 0; 9974 } 9975 } 9976 mutex_exit(&svntr_hashtab[hash].tr_lock); 9977 } 9978 9979 static void 9980 segvn_trasync_thread(void) 9981 { 9982 callb_cpr_t cpr_info; 9983 kmutex_t cpr_lock; /* just for CPR stuff */ 9984 9985 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL); 9986 9987 CALLB_CPR_INIT(&cpr_info, &cpr_lock, 9988 callb_generic_cpr, "segvn_async"); 9989 9990 if (segvn_update_textrepl_interval == 0) { 9991 segvn_update_textrepl_interval = segvn_update_tr_time * hz; 9992 } else { 9993 segvn_update_textrepl_interval *= hz; 9994 } 9995 (void) timeout(segvn_trupdate_wakeup, NULL, 9996 segvn_update_textrepl_interval); 9997 9998 for (;;) { 9999 mutex_enter(&cpr_lock); 10000 CALLB_CPR_SAFE_BEGIN(&cpr_info); 10001 mutex_exit(&cpr_lock); 10002 sema_p(&segvn_trasync_sem); 10003 mutex_enter(&cpr_lock); 10004 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 10005 mutex_exit(&cpr_lock); 10006 segvn_trupdate(); 10007 } 10008 } 10009 10010 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0; 10011 10012 static void 10013 segvn_trupdate_wakeup(void *dummy) 10014 { 10015 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations(); 10016 10017 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) { 10018 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs; 10019 sema_v(&segvn_trasync_sem); 10020 } 10021 10022 if (!segvn_disable_textrepl_update && 10023 segvn_update_textrepl_interval != 0) { 10024 (void) timeout(segvn_trupdate_wakeup, dummy, 10025 segvn_update_textrepl_interval); 10026 } 10027 } 10028 10029 static void 10030 segvn_trupdate(void) 10031 { 10032 ulong_t hash; 10033 svntr_t *svntrp; 10034 segvn_data_t *svd; 10035 10036 ASSERT(svntr_hashtab != NULL); 10037 10038 for (hash = 0; hash < svntr_hashtab_sz; hash++) { 10039 mutex_enter(&svntr_hashtab[hash].tr_lock); 10040 svntrp = svntr_hashtab[hash].tr_head; 10041 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 10042 ASSERT(svntrp->tr_refcnt != 0); 10043 svd = svntrp->tr_svnhead; 10044 for (; svd != NULL; svd = svd->svn_trnext) { 10045 segvn_trupdate_seg(svd->seg, svd, svntrp, 10046 hash); 10047 } 10048 } 10049 mutex_exit(&svntr_hashtab[hash].tr_lock); 10050 } 10051 } 10052 10053 static void 10054 segvn_trupdate_seg(struct seg *seg, 10055 segvn_data_t *svd, 10056 svntr_t *svntrp, 10057 ulong_t hash) 10058 { 10059 proc_t *p; 10060 lgrp_id_t lgrp_id; 10061 struct as *as; 10062 size_t size; 10063 struct anon_map *amp; 10064 10065 ASSERT(svd->vp != NULL); 10066 ASSERT(svd->vp == svntrp->tr_vp); 10067 ASSERT(svd->offset == svntrp->tr_off); 10068 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff); 10069 ASSERT(seg != NULL); 10070 ASSERT(svd->seg == seg); 10071 ASSERT(seg->s_data == (void *)svd); 10072 ASSERT(seg->s_szc == svntrp->tr_szc); 10073 ASSERT(svd->tr_state == SEGVN_TR_ON); 10074 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 10075 ASSERT(svd->amp != NULL); 10076 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 10077 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE); 10078 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX); 10079 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp); 10080 ASSERT(svntrp->tr_refcnt != 0); 10081 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock)); 10082 10083 as = seg->s_as; 10084 ASSERT(as != NULL && as != &kas); 10085 p = as->a_proc; 10086 ASSERT(p != NULL); 10087 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 10088 lgrp_id = p->p_t1_lgrpid; 10089 if (lgrp_id == LGRP_NONE) { 10090 return; 10091 } 10092 ASSERT(lgrp_id < NLGRPS_MAX); 10093 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) { 10094 return; 10095 } 10096 10097 /* 10098 * Use tryenter locking since we are locking as/seg and svntr hash 10099 * lock in reverse from syncrounous thread order. 10100 */ 10101 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) { 10102 SEGVN_TR_ADDSTAT(nolock); 10103 if (segvn_lgrp_trthr_migrs_snpsht) { 10104 segvn_lgrp_trthr_migrs_snpsht = 0; 10105 } 10106 return; 10107 } 10108 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) { 10109 AS_LOCK_EXIT(as, &as->a_lock); 10110 SEGVN_TR_ADDSTAT(nolock); 10111 if (segvn_lgrp_trthr_migrs_snpsht) { 10112 segvn_lgrp_trthr_migrs_snpsht = 0; 10113 } 10114 return; 10115 } 10116 size = seg->s_size; 10117 if (svntrp->tr_amp[lgrp_id] == NULL) { 10118 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 10119 if (trmem > segvn_textrepl_max_bytes) { 10120 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10121 AS_LOCK_EXIT(as, &as->a_lock); 10122 atomic_add_long(&segvn_textrepl_bytes, -size); 10123 SEGVN_TR_ADDSTAT(normem); 10124 return; 10125 } 10126 if (anon_try_resv_zone(size, NULL) == 0) { 10127 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10128 AS_LOCK_EXIT(as, &as->a_lock); 10129 atomic_add_long(&segvn_textrepl_bytes, -size); 10130 SEGVN_TR_ADDSTAT(noanon); 10131 return; 10132 } 10133 amp = anonmap_alloc(size, size, KM_NOSLEEP); 10134 if (amp == NULL) { 10135 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10136 AS_LOCK_EXIT(as, &as->a_lock); 10137 atomic_add_long(&segvn_textrepl_bytes, -size); 10138 anon_unresv_zone(size, NULL); 10139 SEGVN_TR_ADDSTAT(nokmem); 10140 return; 10141 } 10142 ASSERT(amp->refcnt == 1); 10143 amp->a_szc = seg->s_szc; 10144 svntrp->tr_amp[lgrp_id] = amp; 10145 } 10146 /* 10147 * We don't need to drop the bucket lock but here we give other 10148 * threads a chance. svntr and svd can't be unlinked as long as 10149 * segment lock is held as a writer and AS held as well. After we 10150 * retake bucket lock we'll continue from where we left. We'll be able 10151 * to reach the end of either list since new entries are always added 10152 * to the beginning of the lists. 10153 */ 10154 mutex_exit(&svntr_hashtab[hash].tr_lock); 10155 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL); 10156 mutex_enter(&svntr_hashtab[hash].tr_lock); 10157 10158 ASSERT(svd->tr_state == SEGVN_TR_ON); 10159 ASSERT(svd->amp != NULL); 10160 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 10161 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id); 10162 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]); 10163 10164 svd->tr_policy_info.mem_lgrpid = lgrp_id; 10165 svd->amp = svntrp->tr_amp[lgrp_id]; 10166 p->p_tr_lgrpid = NLGRPS_MAX; 10167 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10168 AS_LOCK_EXIT(as, &as->a_lock); 10169 10170 ASSERT(svntrp->tr_refcnt != 0); 10171 ASSERT(svd->vp == svntrp->tr_vp); 10172 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id); 10173 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]); 10174 ASSERT(svd->seg == seg); 10175 ASSERT(svd->tr_state == SEGVN_TR_ON); 10176 10177 SEGVN_TR_ADDSTAT(asyncrepl); 10178 } 10179