1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * University Copyright- Copyright (c) 1982, 1986, 1988 31 * The Regents of the University of California 32 * All Rights Reserved 33 * 34 * University Acknowledgment- Portions of this document are derived from 35 * software developed by the University of California, Berkeley, and its 36 * contributors. 37 */ 38 39 #pragma ident "%Z%%M% %I% %E% SMI" 40 41 /* 42 * VM - shared or copy-on-write from a vnode/anonymous memory. 43 */ 44 45 #include <sys/types.h> 46 #include <sys/param.h> 47 #include <sys/t_lock.h> 48 #include <sys/errno.h> 49 #include <sys/systm.h> 50 #include <sys/mman.h> 51 #include <sys/debug.h> 52 #include <sys/cred.h> 53 #include <sys/vmsystm.h> 54 #include <sys/tuneable.h> 55 #include <sys/bitmap.h> 56 #include <sys/swap.h> 57 #include <sys/kmem.h> 58 #include <sys/sysmacros.h> 59 #include <sys/vtrace.h> 60 #include <sys/cmn_err.h> 61 #include <sys/callb.h> 62 #include <sys/vm.h> 63 #include <sys/dumphdr.h> 64 #include <sys/lgrp.h> 65 66 #include <vm/hat.h> 67 #include <vm/as.h> 68 #include <vm/seg.h> 69 #include <vm/seg_vn.h> 70 #include <vm/pvn.h> 71 #include <vm/anon.h> 72 #include <vm/page.h> 73 #include <vm/vpage.h> 74 #include <sys/proc.h> 75 #include <sys/task.h> 76 #include <sys/project.h> 77 #include <sys/zone.h> 78 #include <sys/shm_impl.h> 79 /* 80 * Private seg op routines. 81 */ 82 static int segvn_dup(struct seg *seg, struct seg *newseg); 83 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len); 84 static void segvn_free(struct seg *seg); 85 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg, 86 caddr_t addr, size_t len, enum fault_type type, 87 enum seg_rw rw); 88 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr); 89 static int segvn_setprot(struct seg *seg, caddr_t addr, 90 size_t len, uint_t prot); 91 static int segvn_checkprot(struct seg *seg, caddr_t addr, 92 size_t len, uint_t prot); 93 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 94 static size_t segvn_swapout(struct seg *seg); 95 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len, 96 int attr, uint_t flags); 97 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len, 98 char *vec); 99 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 100 int attr, int op, ulong_t *lockmap, size_t pos); 101 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len, 102 uint_t *protv); 103 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr); 104 static int segvn_gettype(struct seg *seg, caddr_t addr); 105 static int segvn_getvp(struct seg *seg, caddr_t addr, 106 struct vnode **vpp); 107 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len, 108 uint_t behav); 109 static void segvn_dump(struct seg *seg); 110 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, 111 struct page ***ppp, enum lock_type type, enum seg_rw rw); 112 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, 113 uint_t szc); 114 static int segvn_getmemid(struct seg *seg, caddr_t addr, 115 memid_t *memidp); 116 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t); 117 static int segvn_capable(struct seg *seg, segcapability_t capable); 118 119 struct seg_ops segvn_ops = { 120 segvn_dup, 121 segvn_unmap, 122 segvn_free, 123 segvn_fault, 124 segvn_faulta, 125 segvn_setprot, 126 segvn_checkprot, 127 segvn_kluster, 128 segvn_swapout, 129 segvn_sync, 130 segvn_incore, 131 segvn_lockop, 132 segvn_getprot, 133 segvn_getoffset, 134 segvn_gettype, 135 segvn_getvp, 136 segvn_advise, 137 segvn_dump, 138 segvn_pagelock, 139 segvn_setpagesize, 140 segvn_getmemid, 141 segvn_getpolicy, 142 segvn_capable, 143 }; 144 145 /* 146 * Common zfod structures, provided as a shorthand for others to use. 147 */ 148 static segvn_crargs_t zfod_segvn_crargs = 149 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL); 150 static segvn_crargs_t kzfod_segvn_crargs = 151 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER, 152 PROT_ALL & ~PROT_USER); 153 static segvn_crargs_t stack_noexec_crargs = 154 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL); 155 156 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */ 157 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */ 158 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */ 159 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */ 160 161 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */ 162 163 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */ 164 165 static int segvn_concat(struct seg *, struct seg *, int); 166 static int segvn_extend_prev(struct seg *, struct seg *, 167 struct segvn_crargs *, size_t); 168 static int segvn_extend_next(struct seg *, struct seg *, 169 struct segvn_crargs *, size_t); 170 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw); 171 static void segvn_pagelist_rele(page_t **); 172 static void segvn_setvnode_mpss(vnode_t *); 173 static void segvn_relocate_pages(page_t **, page_t *); 174 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *); 175 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t, 176 uint_t, page_t **, page_t **, uint_t *, int *); 177 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t, 178 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 179 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t, 180 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 181 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t, 182 u_offset_t, struct vpage *, page_t **, uint_t, 183 enum fault_type, enum seg_rw, int, int); 184 static void segvn_vpage(struct seg *); 185 static size_t segvn_count_swap_by_vpages(struct seg *); 186 187 static void segvn_purge(struct seg *seg); 188 static int segvn_reclaim(struct seg *, caddr_t, size_t, struct page **, 189 enum seg_rw); 190 191 static int sameprot(struct seg *, caddr_t, size_t); 192 193 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t); 194 static int segvn_clrszc(struct seg *); 195 static struct seg *segvn_split_seg(struct seg *, caddr_t); 196 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t, 197 ulong_t, uint_t); 198 199 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t, 200 size_t, void *, u_offset_t); 201 202 static int segvn_slock_anonpages(page_t *, int); 203 static void segvn_sunlock_anonpages(page_t *, int); 204 205 static struct kmem_cache *segvn_cache; 206 static struct kmem_cache **segvn_szc_cache; 207 208 #ifdef VM_STATS 209 static struct segvnvmstats_str { 210 ulong_t fill_vp_pages[31]; 211 ulong_t fltvnpages[49]; 212 ulong_t fullszcpages[10]; 213 ulong_t relocatepages[3]; 214 ulong_t fltanpages[17]; 215 ulong_t pagelock[3]; 216 ulong_t demoterange[3]; 217 } segvnvmstats; 218 #endif /* VM_STATS */ 219 220 #define SDR_RANGE 1 /* demote entire range */ 221 #define SDR_END 2 /* demote non aligned ends only */ 222 223 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \ 224 if ((len) != 0) { \ 225 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \ 226 ASSERT(lpgaddr >= (seg)->s_base); \ 227 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \ 228 (len)), pgsz); \ 229 ASSERT(lpgeaddr > lpgaddr); \ 230 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \ 231 } else { \ 232 lpgeaddr = lpgaddr = (addr); \ 233 } \ 234 } 235 236 /*ARGSUSED*/ 237 static int 238 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags) 239 { 240 struct segvn_data *svd = buf; 241 242 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL); 243 mutex_init(&svd->segp_slock, NULL, MUTEX_DEFAULT, NULL); 244 svd->svn_trnext = svd->svn_trprev = NULL; 245 return (0); 246 } 247 248 /*ARGSUSED1*/ 249 static void 250 segvn_cache_destructor(void *buf, void *cdrarg) 251 { 252 struct segvn_data *svd = buf; 253 254 rw_destroy(&svd->lock); 255 mutex_destroy(&svd->segp_slock); 256 } 257 258 /*ARGSUSED*/ 259 static int 260 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags) 261 { 262 bzero(buf, sizeof (svntr_t)); 263 return (0); 264 } 265 266 /* 267 * Patching this variable to non-zero allows the system to run with 268 * stacks marked as "not executable". It's a bit of a kludge, but is 269 * provided as a tweakable for platforms that export those ABIs 270 * (e.g. sparc V8) that have executable stacks enabled by default. 271 * There are also some restrictions for platforms that don't actually 272 * implement 'noexec' protections. 273 * 274 * Once enabled, the system is (therefore) unable to provide a fully 275 * ABI-compliant execution environment, though practically speaking, 276 * most everything works. The exceptions are generally some interpreters 277 * and debuggers that create executable code on the stack and jump 278 * into it (without explicitly mprotecting the address range to include 279 * PROT_EXEC). 280 * 281 * One important class of applications that are disabled are those 282 * that have been transformed into malicious agents using one of the 283 * numerous "buffer overflow" attacks. See 4007890. 284 */ 285 int noexec_user_stack = 0; 286 int noexec_user_stack_log = 1; 287 288 int segvn_lpg_disable = 0; 289 uint_t segvn_maxpgszc = 0; 290 291 ulong_t segvn_vmpss_clrszc_cnt; 292 ulong_t segvn_vmpss_clrszc_err; 293 ulong_t segvn_fltvnpages_clrszc_cnt; 294 ulong_t segvn_fltvnpages_clrszc_err; 295 ulong_t segvn_setpgsz_align_err; 296 ulong_t segvn_setpgsz_anon_align_err; 297 ulong_t segvn_setpgsz_getattr_err; 298 ulong_t segvn_setpgsz_eof_err; 299 ulong_t segvn_faultvnmpss_align_err1; 300 ulong_t segvn_faultvnmpss_align_err2; 301 ulong_t segvn_faultvnmpss_align_err3; 302 ulong_t segvn_faultvnmpss_align_err4; 303 ulong_t segvn_faultvnmpss_align_err5; 304 ulong_t segvn_vmpss_pageio_deadlk_err; 305 306 int segvn_use_regions = 1; 307 308 /* 309 * Segvn supports text replication optimization for NUMA platforms. Text 310 * replica's are represented by anon maps (amp). There's one amp per text file 311 * region per lgroup. A process chooses the amp for each of its text mappings 312 * based on the lgroup assignment of its main thread (t_tid = 1). All 313 * processes that want a replica on a particular lgroup for the same text file 314 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table 315 * with vp,off,size,szc used as a key. Text replication segments are read only 316 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by 317 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode 318 * pages. Replication amp is assigned to a segment when it gets its first 319 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread 320 * rechecks periodically if the process still maps an amp local to the main 321 * thread. If not async thread forces process to remap to an amp in the new 322 * home lgroup of the main thread. Current text replication implementation 323 * only provides the benefit to workloads that do most of their work in the 324 * main thread of a process or all the threads of a process run in the same 325 * lgroup. To extend text replication benefit to different types of 326 * multithreaded workloads further work would be needed in the hat layer to 327 * allow the same virtual address in the same hat to simultaneously map 328 * different physical addresses (i.e. page table replication would be needed 329 * for x86). 330 * 331 * amp pages are used instead of vnode pages as long as segment has a very 332 * simple life cycle. It's created via segvn_create(), handles S_EXEC 333 * (S_READ) pagefaults and is fully unmapped. If anything more complicated 334 * happens such as protection is changed, real COW fault happens, pagesize is 335 * changed, MC_LOCK is requested or segment is partially unmapped we turn off 336 * text replication by converting the segment back to vnode only segment 337 * (unmap segment's address range and set svd->amp to NULL). 338 * 339 * The original file can be changed after amp is inserted into 340 * svntr_hashtab. Processes that are launched after the file is already 341 * changed can't use the replica's created prior to the file change. To 342 * implement this functionality hash entries are timestamped. Replica's can 343 * only be used if current file modification time is the same as the timestamp 344 * saved when hash entry was created. However just timestamps alone are not 345 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We 346 * deal with file changes via MAP_SHARED mappings differently. When writable 347 * MAP_SHARED mappings are created to vnodes marked as executable we mark all 348 * existing replica's for this vnode as not usable for future text 349 * mappings. And we don't create new replica's for files that currently have 350 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is 351 * true). 352 */ 353 354 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20) 355 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR; 356 357 static ulong_t svntr_hashtab_sz = 512; 358 static svntr_bucket_t *svntr_hashtab = NULL; 359 static struct kmem_cache *svntr_cache; 360 static svntr_stats_t *segvn_textrepl_stats; 361 static ksema_t segvn_trasync_sem; 362 363 int segvn_disable_textrepl = 1; 364 size_t textrepl_size_thresh = (size_t)-1; 365 size_t segvn_textrepl_bytes = 0; 366 size_t segvn_textrepl_max_bytes = 0; 367 clock_t segvn_update_textrepl_interval = 0; 368 int segvn_update_tr_time = 10; 369 int segvn_disable_textrepl_update = 0; 370 371 static void segvn_textrepl(struct seg *); 372 static void segvn_textunrepl(struct seg *, int); 373 static void segvn_inval_trcache(vnode_t *); 374 static void segvn_trasync_thread(void); 375 static void segvn_trupdate_wakeup(void *); 376 static void segvn_trupdate(void); 377 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *, 378 ulong_t); 379 380 /* 381 * Initialize segvn data structures 382 */ 383 void 384 segvn_init(void) 385 { 386 uint_t maxszc; 387 uint_t szc; 388 size_t pgsz; 389 390 segvn_cache = kmem_cache_create("segvn_cache", 391 sizeof (struct segvn_data), 0, 392 segvn_cache_constructor, segvn_cache_destructor, NULL, 393 NULL, NULL, 0); 394 395 if (segvn_lpg_disable == 0) { 396 szc = maxszc = page_num_pagesizes() - 1; 397 if (szc == 0) { 398 segvn_lpg_disable = 1; 399 } 400 if (page_get_pagesize(0) != PAGESIZE) { 401 panic("segvn_init: bad szc 0"); 402 /*NOTREACHED*/ 403 } 404 while (szc != 0) { 405 pgsz = page_get_pagesize(szc); 406 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) { 407 panic("segvn_init: bad szc %d", szc); 408 /*NOTREACHED*/ 409 } 410 szc--; 411 } 412 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc) 413 segvn_maxpgszc = maxszc; 414 } 415 416 if (segvn_maxpgszc) { 417 segvn_szc_cache = (struct kmem_cache **)kmem_alloc( 418 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *), 419 KM_SLEEP); 420 } 421 422 for (szc = 1; szc <= segvn_maxpgszc; szc++) { 423 char str[32]; 424 425 (void) sprintf(str, "segvn_szc_cache%d", szc); 426 segvn_szc_cache[szc] = kmem_cache_create(str, 427 page_get_pagecnt(szc) * sizeof (page_t *), 0, 428 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG); 429 } 430 431 432 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL)) 433 segvn_use_regions = 0; 434 435 /* 436 * For now shared regions and text replication segvn support 437 * are mutually exclusive. This is acceptable because 438 * currently significant benefit from text replication was 439 * only observed on AMD64 NUMA platforms (due to relatively 440 * small L2$ size) and currently we don't support shared 441 * regions on x86. 442 */ 443 if (segvn_use_regions && !segvn_disable_textrepl) { 444 segvn_disable_textrepl = 1; 445 } 446 447 #if defined(_LP64) 448 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 && 449 !segvn_disable_textrepl) { 450 ulong_t i; 451 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t); 452 453 svntr_cache = kmem_cache_create("svntr_cache", 454 sizeof (svntr_t), 0, svntr_cache_constructor, NULL, 455 NULL, NULL, NULL, 0); 456 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP); 457 for (i = 0; i < svntr_hashtab_sz; i++) { 458 mutex_init(&svntr_hashtab[i].tr_lock, NULL, 459 MUTEX_DEFAULT, NULL); 460 } 461 segvn_textrepl_max_bytes = ptob(physmem) / 462 segvn_textrepl_max_bytes_factor; 463 segvn_textrepl_stats = kmem_zalloc(NCPU * 464 sizeof (svntr_stats_t), KM_SLEEP); 465 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL); 466 (void) thread_create(NULL, 0, segvn_trasync_thread, 467 NULL, 0, &p0, TS_RUN, minclsyspri); 468 } 469 #endif 470 } 471 472 #define SEGVN_PAGEIO ((void *)0x1) 473 #define SEGVN_NOPAGEIO ((void *)0x2) 474 475 static void 476 segvn_setvnode_mpss(vnode_t *vp) 477 { 478 int err; 479 480 ASSERT(vp->v_mpssdata == NULL || 481 vp->v_mpssdata == SEGVN_PAGEIO || 482 vp->v_mpssdata == SEGVN_NOPAGEIO); 483 484 if (vp->v_mpssdata == NULL) { 485 if (vn_vmpss_usepageio(vp)) { 486 err = VOP_PAGEIO(vp, (page_t *)NULL, 487 (u_offset_t)0, 0, 0, CRED(), NULL); 488 } else { 489 err = ENOSYS; 490 } 491 /* 492 * set v_mpssdata just once per vnode life 493 * so that it never changes. 494 */ 495 mutex_enter(&vp->v_lock); 496 if (vp->v_mpssdata == NULL) { 497 if (err == EINVAL) { 498 vp->v_mpssdata = SEGVN_PAGEIO; 499 } else { 500 vp->v_mpssdata = SEGVN_NOPAGEIO; 501 } 502 } 503 mutex_exit(&vp->v_lock); 504 } 505 } 506 507 int 508 segvn_create(struct seg *seg, void *argsp) 509 { 510 struct segvn_crargs *a = (struct segvn_crargs *)argsp; 511 struct segvn_data *svd; 512 size_t swresv = 0; 513 struct cred *cred; 514 struct anon_map *amp; 515 int error = 0; 516 size_t pgsz; 517 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT; 518 int use_rgn = 0; 519 int trok = 0; 520 521 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 522 523 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) { 524 panic("segvn_create type"); 525 /*NOTREACHED*/ 526 } 527 528 /* 529 * Check arguments. If a shared anon structure is given then 530 * it is illegal to also specify a vp. 531 */ 532 if (a->amp != NULL && a->vp != NULL) { 533 panic("segvn_create anon_map"); 534 /*NOTREACHED*/ 535 } 536 537 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) && 538 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) && 539 segvn_use_regions) { 540 use_rgn = 1; 541 } 542 543 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */ 544 if (a->type == MAP_SHARED) 545 a->flags &= ~MAP_NORESERVE; 546 547 if (a->szc != 0) { 548 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) || 549 (a->amp != NULL && a->type == MAP_PRIVATE) || 550 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) { 551 a->szc = 0; 552 } else { 553 if (a->szc > segvn_maxpgszc) 554 a->szc = segvn_maxpgszc; 555 pgsz = page_get_pagesize(a->szc); 556 if (!IS_P2ALIGNED(seg->s_base, pgsz) || 557 !IS_P2ALIGNED(seg->s_size, pgsz)) { 558 a->szc = 0; 559 } else if (a->vp != NULL) { 560 extern struct vnode kvp; 561 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) { 562 /* 563 * paranoid check. 564 * hat_page_demote() is not supported 565 * on swapfs pages. 566 */ 567 a->szc = 0; 568 } else if (map_addr_vacalign_check(seg->s_base, 569 a->offset & PAGEMASK)) { 570 a->szc = 0; 571 } 572 } else if (a->amp != NULL) { 573 pgcnt_t anum = btopr(a->offset); 574 pgcnt_t pgcnt = page_get_pagecnt(a->szc); 575 if (!IS_P2ALIGNED(anum, pgcnt)) { 576 a->szc = 0; 577 } 578 } 579 } 580 } 581 582 /* 583 * If segment may need private pages, reserve them now. 584 */ 585 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) || 586 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) { 587 if (anon_resv(seg->s_size) == 0) 588 return (EAGAIN); 589 swresv = seg->s_size; 590 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 591 seg, swresv, 1); 592 } 593 594 /* 595 * Reserve any mapping structures that may be required. 596 * 597 * Don't do it for segments that may use regions. It's currently a 598 * noop in the hat implementations anyway. 599 */ 600 if (!use_rgn) { 601 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP); 602 } 603 604 if (a->cred) { 605 cred = a->cred; 606 crhold(cred); 607 } else { 608 crhold(cred = CRED()); 609 } 610 611 /* Inform the vnode of the new mapping */ 612 if (a->vp != NULL) { 613 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK, 614 seg->s_as, seg->s_base, seg->s_size, a->prot, 615 a->maxprot, a->type, cred, NULL); 616 if (error) { 617 if (swresv != 0) { 618 anon_unresv(swresv); 619 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 620 "anon proc:%p %lu %u", seg, swresv, 0); 621 } 622 crfree(cred); 623 if (!use_rgn) { 624 hat_unload(seg->s_as->a_hat, seg->s_base, 625 seg->s_size, HAT_UNLOAD_UNMAP); 626 } 627 return (error); 628 } 629 /* 630 * svntr_hashtab will be NULL if we support shared regions. 631 */ 632 trok = ((a->flags & MAP_TEXT) && 633 (seg->s_size > textrepl_size_thresh || 634 (a->flags & _MAP_TEXTREPL)) && 635 lgrp_optimizations() && svntr_hashtab != NULL && 636 a->type == MAP_PRIVATE && swresv == 0 && 637 !(a->flags & MAP_NORESERVE) && 638 seg->s_as != &kas && a->vp->v_type == VREG); 639 640 ASSERT(!trok || !use_rgn); 641 } 642 643 /* 644 * If more than one segment in the address space, and they're adjacent 645 * virtually, try to concatenate them. Don't concatenate if an 646 * explicit anon_map structure was supplied (e.g., SystemV shared 647 * memory) or if we'll use text replication for this segment. 648 */ 649 if (a->amp == NULL && !use_rgn && !trok) { 650 struct seg *pseg, *nseg; 651 struct segvn_data *psvd, *nsvd; 652 lgrp_mem_policy_t ppolicy, npolicy; 653 uint_t lgrp_mem_policy_flags = 0; 654 extern lgrp_mem_policy_t lgrp_mem_default_policy; 655 656 /* 657 * Memory policy flags (lgrp_mem_policy_flags) is valid when 658 * extending stack/heap segments. 659 */ 660 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) && 661 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) { 662 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags; 663 } else { 664 /* 665 * Get policy when not extending it from another segment 666 */ 667 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type); 668 } 669 670 /* 671 * First, try to concatenate the previous and new segments 672 */ 673 pseg = AS_SEGPREV(seg->s_as, seg); 674 if (pseg != NULL && 675 pseg->s_base + pseg->s_size == seg->s_base && 676 pseg->s_ops == &segvn_ops) { 677 /* 678 * Get memory allocation policy from previous segment. 679 * When extension is specified (e.g. for heap) apply 680 * this policy to the new segment regardless of the 681 * outcome of segment concatenation. Extension occurs 682 * for non-default policy otherwise default policy is 683 * used and is based on extended segment size. 684 */ 685 psvd = (struct segvn_data *)pseg->s_data; 686 ppolicy = psvd->policy_info.mem_policy; 687 if (lgrp_mem_policy_flags == 688 LGRP_MP_FLAG_EXTEND_UP) { 689 if (ppolicy != lgrp_mem_default_policy) { 690 mpolicy = ppolicy; 691 } else { 692 mpolicy = lgrp_mem_policy_default( 693 pseg->s_size + seg->s_size, 694 a->type); 695 } 696 } 697 698 if (mpolicy == ppolicy && 699 (pseg->s_size + seg->s_size <= 700 segvn_comb_thrshld || psvd->amp == NULL) && 701 segvn_extend_prev(pseg, seg, a, swresv) == 0) { 702 /* 703 * success! now try to concatenate 704 * with following seg 705 */ 706 crfree(cred); 707 nseg = AS_SEGNEXT(pseg->s_as, pseg); 708 if (nseg != NULL && 709 nseg != pseg && 710 nseg->s_ops == &segvn_ops && 711 pseg->s_base + pseg->s_size == 712 nseg->s_base) 713 (void) segvn_concat(pseg, nseg, 0); 714 ASSERT(pseg->s_szc == 0 || 715 (a->szc == pseg->s_szc && 716 IS_P2ALIGNED(pseg->s_base, pgsz) && 717 IS_P2ALIGNED(pseg->s_size, pgsz))); 718 return (0); 719 } 720 } 721 722 /* 723 * Failed, so try to concatenate with following seg 724 */ 725 nseg = AS_SEGNEXT(seg->s_as, seg); 726 if (nseg != NULL && 727 seg->s_base + seg->s_size == nseg->s_base && 728 nseg->s_ops == &segvn_ops) { 729 /* 730 * Get memory allocation policy from next segment. 731 * When extension is specified (e.g. for stack) apply 732 * this policy to the new segment regardless of the 733 * outcome of segment concatenation. Extension occurs 734 * for non-default policy otherwise default policy is 735 * used and is based on extended segment size. 736 */ 737 nsvd = (struct segvn_data *)nseg->s_data; 738 npolicy = nsvd->policy_info.mem_policy; 739 if (lgrp_mem_policy_flags == 740 LGRP_MP_FLAG_EXTEND_DOWN) { 741 if (npolicy != lgrp_mem_default_policy) { 742 mpolicy = npolicy; 743 } else { 744 mpolicy = lgrp_mem_policy_default( 745 nseg->s_size + seg->s_size, 746 a->type); 747 } 748 } 749 750 if (mpolicy == npolicy && 751 segvn_extend_next(seg, nseg, a, swresv) == 0) { 752 crfree(cred); 753 ASSERT(nseg->s_szc == 0 || 754 (a->szc == nseg->s_szc && 755 IS_P2ALIGNED(nseg->s_base, pgsz) && 756 IS_P2ALIGNED(nseg->s_size, pgsz))); 757 return (0); 758 } 759 } 760 } 761 762 if (a->vp != NULL) { 763 VN_HOLD(a->vp); 764 if (a->type == MAP_SHARED) 765 lgrp_shm_policy_init(NULL, a->vp); 766 } 767 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 768 769 seg->s_ops = &segvn_ops; 770 seg->s_data = (void *)svd; 771 seg->s_szc = a->szc; 772 773 svd->seg = seg; 774 svd->vp = a->vp; 775 /* 776 * Anonymous mappings have no backing file so the offset is meaningless. 777 */ 778 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0; 779 svd->prot = a->prot; 780 svd->maxprot = a->maxprot; 781 svd->pageprot = 0; 782 svd->type = a->type; 783 svd->vpage = NULL; 784 svd->cred = cred; 785 svd->advice = MADV_NORMAL; 786 svd->pageadvice = 0; 787 svd->flags = (ushort_t)a->flags; 788 svd->softlockcnt = 0; 789 svd->rcookie = HAT_INVALID_REGION_COOKIE; 790 svd->pageswap = 0; 791 792 if (a->szc != 0 && a->vp != NULL) { 793 segvn_setvnode_mpss(a->vp); 794 } 795 if (svd->type == MAP_SHARED && svd->vp != NULL && 796 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) { 797 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 798 segvn_inval_trcache(svd->vp); 799 } 800 801 amp = a->amp; 802 if ((svd->amp = amp) == NULL) { 803 svd->anon_index = 0; 804 if (svd->type == MAP_SHARED) { 805 svd->swresv = 0; 806 /* 807 * Shared mappings to a vp need no other setup. 808 * If we have a shared mapping to an anon_map object 809 * which hasn't been allocated yet, allocate the 810 * struct now so that it will be properly shared 811 * by remembering the swap reservation there. 812 */ 813 if (a->vp == NULL) { 814 svd->amp = anonmap_alloc(seg->s_size, swresv, 815 ANON_SLEEP); 816 svd->amp->a_szc = seg->s_szc; 817 } 818 } else { 819 /* 820 * Private mapping (with or without a vp). 821 * Allocate anon_map when needed. 822 */ 823 svd->swresv = swresv; 824 } 825 } else { 826 pgcnt_t anon_num; 827 828 /* 829 * Mapping to an existing anon_map structure without a vp. 830 * For now we will insure that the segment size isn't larger 831 * than the size - offset gives us. Later on we may wish to 832 * have the anon array dynamically allocated itself so that 833 * we don't always have to allocate all the anon pointer slots. 834 * This of course involves adding extra code to check that we 835 * aren't trying to use an anon pointer slot beyond the end 836 * of the currently allocated anon array. 837 */ 838 if ((amp->size - a->offset) < seg->s_size) { 839 panic("segvn_create anon_map size"); 840 /*NOTREACHED*/ 841 } 842 843 anon_num = btopr(a->offset); 844 845 if (a->type == MAP_SHARED) { 846 /* 847 * SHARED mapping to a given anon_map. 848 */ 849 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 850 amp->refcnt++; 851 if (a->szc > amp->a_szc) { 852 amp->a_szc = a->szc; 853 } 854 ANON_LOCK_EXIT(&->a_rwlock); 855 svd->anon_index = anon_num; 856 svd->swresv = 0; 857 } else { 858 /* 859 * PRIVATE mapping to a given anon_map. 860 * Make sure that all the needed anon 861 * structures are created (so that we will 862 * share the underlying pages if nothing 863 * is written by this mapping) and then 864 * duplicate the anon array as is done 865 * when a privately mapped segment is dup'ed. 866 */ 867 struct anon *ap; 868 caddr_t addr; 869 caddr_t eaddr; 870 ulong_t anon_idx; 871 int hat_flag = HAT_LOAD; 872 873 if (svd->flags & MAP_TEXT) { 874 hat_flag |= HAT_LOAD_TEXT; 875 } 876 877 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 878 svd->amp->a_szc = seg->s_szc; 879 svd->anon_index = 0; 880 svd->swresv = swresv; 881 882 /* 883 * Prevent 2 threads from allocating anon 884 * slots simultaneously. 885 */ 886 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 887 eaddr = seg->s_base + seg->s_size; 888 889 for (anon_idx = anon_num, addr = seg->s_base; 890 addr < eaddr; addr += PAGESIZE, anon_idx++) { 891 page_t *pp; 892 893 if ((ap = anon_get_ptr(amp->ahp, 894 anon_idx)) != NULL) 895 continue; 896 897 /* 898 * Allocate the anon struct now. 899 * Might as well load up translation 900 * to the page while we're at it... 901 */ 902 pp = anon_zero(seg, addr, &ap, cred); 903 if (ap == NULL || pp == NULL) { 904 panic("segvn_create anon_zero"); 905 /*NOTREACHED*/ 906 } 907 908 /* 909 * Re-acquire the anon_map lock and 910 * initialize the anon array entry. 911 */ 912 ASSERT(anon_get_ptr(amp->ahp, 913 anon_idx) == NULL); 914 (void) anon_set_ptr(amp->ahp, anon_idx, ap, 915 ANON_SLEEP); 916 917 ASSERT(seg->s_szc == 0); 918 ASSERT(!IS_VMODSORT(pp->p_vnode)); 919 920 ASSERT(use_rgn == 0); 921 hat_memload(seg->s_as->a_hat, addr, pp, 922 svd->prot & ~PROT_WRITE, hat_flag); 923 924 page_unlock(pp); 925 } 926 ASSERT(seg->s_szc == 0); 927 anon_dup(amp->ahp, anon_num, svd->amp->ahp, 928 0, seg->s_size); 929 ANON_LOCK_EXIT(&->a_rwlock); 930 } 931 } 932 933 /* 934 * Set default memory allocation policy for segment 935 * 936 * Always set policy for private memory at least for initialization 937 * even if this is a shared memory segment 938 */ 939 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size); 940 941 if (svd->type == MAP_SHARED) 942 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index, 943 svd->vp, svd->offset, seg->s_size); 944 945 if (use_rgn) { 946 ASSERT(!trok); 947 ASSERT(svd->amp == NULL); 948 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base, 949 seg->s_size, (void *)svd->vp, svd->offset, svd->prot, 950 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback, 951 HAT_REGION_TEXT); 952 } 953 954 ASSERT(!trok || !(svd->prot & PROT_WRITE)); 955 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF; 956 957 return (0); 958 } 959 960 /* 961 * Concatenate two existing segments, if possible. 962 * Return 0 on success, -1 if two segments are not compatible 963 * or -2 on memory allocation failure. 964 * If amp_cat == 1 then try and concat segments with anon maps 965 */ 966 static int 967 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat) 968 { 969 struct segvn_data *svd1 = seg1->s_data; 970 struct segvn_data *svd2 = seg2->s_data; 971 struct anon_map *amp1 = svd1->amp; 972 struct anon_map *amp2 = svd2->amp; 973 struct vpage *vpage1 = svd1->vpage; 974 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL; 975 size_t size, nvpsize; 976 pgcnt_t npages1, npages2; 977 978 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as); 979 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 980 ASSERT(seg1->s_ops == seg2->s_ops); 981 982 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) || 983 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 984 return (-1); 985 } 986 987 /* both segments exist, try to merge them */ 988 #define incompat(x) (svd1->x != svd2->x) 989 if (incompat(vp) || incompat(maxprot) || 990 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) || 991 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) || 992 incompat(type) || incompat(cred) || incompat(flags) || 993 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) || 994 (svd2->softlockcnt > 0)) 995 return (-1); 996 #undef incompat 997 998 /* 999 * vp == NULL implies zfod, offset doesn't matter 1000 */ 1001 if (svd1->vp != NULL && 1002 svd1->offset + seg1->s_size != svd2->offset) { 1003 return (-1); 1004 } 1005 1006 /* 1007 * Don't concatenate if either segment uses text replication. 1008 */ 1009 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) { 1010 return (-1); 1011 } 1012 1013 /* 1014 * Fail early if we're not supposed to concatenate 1015 * segments with non NULL amp. 1016 */ 1017 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) { 1018 return (-1); 1019 } 1020 1021 if (svd1->vp == NULL && svd1->type == MAP_SHARED) { 1022 if (amp1 != amp2) { 1023 return (-1); 1024 } 1025 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) != 1026 svd2->anon_index) { 1027 return (-1); 1028 } 1029 ASSERT(amp1 == NULL || amp1->refcnt >= 2); 1030 } 1031 1032 /* 1033 * If either seg has vpages, create a new merged vpage array. 1034 */ 1035 if (vpage1 != NULL || vpage2 != NULL) { 1036 struct vpage *vp, *evp; 1037 1038 npages1 = seg_pages(seg1); 1039 npages2 = seg_pages(seg2); 1040 nvpsize = vpgtob(npages1 + npages2); 1041 1042 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) { 1043 return (-2); 1044 } 1045 1046 if (vpage1 != NULL) { 1047 bcopy(vpage1, nvpage, vpgtob(npages1)); 1048 } else { 1049 evp = nvpage + npages1; 1050 for (vp = nvpage; vp < evp; vp++) { 1051 VPP_SETPROT(vp, svd1->prot); 1052 VPP_SETADVICE(vp, svd1->advice); 1053 } 1054 } 1055 1056 if (vpage2 != NULL) { 1057 bcopy(vpage2, nvpage + npages1, vpgtob(npages2)); 1058 } else { 1059 evp = nvpage + npages1 + npages2; 1060 for (vp = nvpage + npages1; vp < evp; vp++) { 1061 VPP_SETPROT(vp, svd2->prot); 1062 VPP_SETADVICE(vp, svd2->advice); 1063 } 1064 } 1065 1066 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) { 1067 ASSERT(svd1->swresv == seg1->s_size); 1068 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1069 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1070 evp = nvpage + npages1; 1071 for (vp = nvpage; vp < evp; vp++) { 1072 VPP_SETSWAPRES(vp); 1073 } 1074 } 1075 1076 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) { 1077 ASSERT(svd2->swresv == seg2->s_size); 1078 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1079 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1080 vp = nvpage + npages1; 1081 evp = vp + npages2; 1082 for (; vp < evp; vp++) { 1083 VPP_SETSWAPRES(vp); 1084 } 1085 } 1086 } 1087 ASSERT((vpage1 != NULL || vpage2 != NULL) || 1088 (svd1->pageswap == 0 && svd2->pageswap == 0)); 1089 1090 /* 1091 * If either segment has private pages, create a new merged anon 1092 * array. If mergeing shared anon segments just decrement anon map's 1093 * refcnt. 1094 */ 1095 if (amp1 != NULL && svd1->type == MAP_SHARED) { 1096 ASSERT(amp1 == amp2 && svd1->vp == NULL); 1097 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1098 ASSERT(amp1->refcnt >= 2); 1099 amp1->refcnt--; 1100 ANON_LOCK_EXIT(&1->a_rwlock); 1101 svd2->amp = NULL; 1102 } else if (amp1 != NULL || amp2 != NULL) { 1103 struct anon_hdr *nahp; 1104 struct anon_map *namp = NULL; 1105 size_t asize; 1106 1107 ASSERT(svd1->type == MAP_PRIVATE); 1108 1109 asize = seg1->s_size + seg2->s_size; 1110 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) { 1111 if (nvpage != NULL) { 1112 kmem_free(nvpage, nvpsize); 1113 } 1114 return (-2); 1115 } 1116 if (amp1 != NULL) { 1117 /* 1118 * XXX anon rwlock is not really needed because 1119 * this is a private segment and we are writers. 1120 */ 1121 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1122 ASSERT(amp1->refcnt == 1); 1123 if (anon_copy_ptr(amp1->ahp, svd1->anon_index, 1124 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) { 1125 anon_release(nahp, btop(asize)); 1126 ANON_LOCK_EXIT(&1->a_rwlock); 1127 if (nvpage != NULL) { 1128 kmem_free(nvpage, nvpsize); 1129 } 1130 return (-2); 1131 } 1132 } 1133 if (amp2 != NULL) { 1134 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1135 ASSERT(amp2->refcnt == 1); 1136 if (anon_copy_ptr(amp2->ahp, svd2->anon_index, 1137 nahp, btop(seg1->s_size), btop(seg2->s_size), 1138 ANON_NOSLEEP)) { 1139 anon_release(nahp, btop(asize)); 1140 ANON_LOCK_EXIT(&2->a_rwlock); 1141 if (amp1 != NULL) { 1142 ANON_LOCK_EXIT(&1->a_rwlock); 1143 } 1144 if (nvpage != NULL) { 1145 kmem_free(nvpage, nvpsize); 1146 } 1147 return (-2); 1148 } 1149 } 1150 if (amp1 != NULL) { 1151 namp = amp1; 1152 anon_release(amp1->ahp, btop(amp1->size)); 1153 } 1154 if (amp2 != NULL) { 1155 if (namp == NULL) { 1156 ASSERT(amp1 == NULL); 1157 namp = amp2; 1158 anon_release(amp2->ahp, btop(amp2->size)); 1159 } else { 1160 amp2->refcnt--; 1161 ANON_LOCK_EXIT(&2->a_rwlock); 1162 anonmap_free(amp2); 1163 } 1164 svd2->amp = NULL; /* needed for seg_free */ 1165 } 1166 namp->ahp = nahp; 1167 namp->size = asize; 1168 svd1->amp = namp; 1169 svd1->anon_index = 0; 1170 ANON_LOCK_EXIT(&namp->a_rwlock); 1171 } 1172 /* 1173 * Now free the old vpage structures. 1174 */ 1175 if (nvpage != NULL) { 1176 if (vpage1 != NULL) { 1177 kmem_free(vpage1, vpgtob(npages1)); 1178 } 1179 if (vpage2 != NULL) { 1180 svd2->vpage = NULL; 1181 kmem_free(vpage2, vpgtob(npages2)); 1182 } 1183 if (svd2->pageprot) { 1184 svd1->pageprot = 1; 1185 } 1186 if (svd2->pageadvice) { 1187 svd1->pageadvice = 1; 1188 } 1189 if (svd2->pageswap) { 1190 svd1->pageswap = 1; 1191 } 1192 svd1->vpage = nvpage; 1193 } 1194 1195 /* all looks ok, merge segments */ 1196 svd1->swresv += svd2->swresv; 1197 svd2->swresv = 0; /* so seg_free doesn't release swap space */ 1198 size = seg2->s_size; 1199 seg_free(seg2); 1200 seg1->s_size += size; 1201 return (0); 1202 } 1203 1204 /* 1205 * Extend the previous segment (seg1) to include the 1206 * new segment (seg2 + a), if possible. 1207 * Return 0 on success. 1208 */ 1209 static int 1210 segvn_extend_prev(seg1, seg2, a, swresv) 1211 struct seg *seg1, *seg2; 1212 struct segvn_crargs *a; 1213 size_t swresv; 1214 { 1215 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data; 1216 size_t size; 1217 struct anon_map *amp1; 1218 struct vpage *new_vpage; 1219 1220 /* 1221 * We don't need any segment level locks for "segvn" data 1222 * since the address space is "write" locked. 1223 */ 1224 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 1225 1226 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) { 1227 return (-1); 1228 } 1229 1230 /* second segment is new, try to extend first */ 1231 /* XXX - should also check cred */ 1232 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot || 1233 (!svd1->pageprot && (svd1->prot != a->prot)) || 1234 svd1->type != a->type || svd1->flags != a->flags || 1235 seg1->s_szc != a->szc) 1236 return (-1); 1237 1238 /* vp == NULL implies zfod, offset doesn't matter */ 1239 if (svd1->vp != NULL && 1240 svd1->offset + seg1->s_size != (a->offset & PAGEMASK)) 1241 return (-1); 1242 1243 if (svd1->tr_state != SEGVN_TR_OFF) { 1244 return (-1); 1245 } 1246 1247 amp1 = svd1->amp; 1248 if (amp1) { 1249 pgcnt_t newpgs; 1250 1251 /* 1252 * Segment has private pages, can data structures 1253 * be expanded? 1254 * 1255 * Acquire the anon_map lock to prevent it from changing, 1256 * if it is shared. This ensures that the anon_map 1257 * will not change while a thread which has a read/write 1258 * lock on an address space references it. 1259 * XXX - Don't need the anon_map lock at all if "refcnt" 1260 * is 1. 1261 * 1262 * Can't grow a MAP_SHARED segment with an anonmap because 1263 * there may be existing anon slots where we want to extend 1264 * the segment and we wouldn't know what to do with them 1265 * (e.g., for tmpfs right thing is to just leave them there, 1266 * for /dev/zero they should be cleared out). 1267 */ 1268 if (svd1->type == MAP_SHARED) 1269 return (-1); 1270 1271 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1272 if (amp1->refcnt > 1) { 1273 ANON_LOCK_EXIT(&1->a_rwlock); 1274 return (-1); 1275 } 1276 newpgs = anon_grow(amp1->ahp, &svd1->anon_index, 1277 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP); 1278 1279 if (newpgs == 0) { 1280 ANON_LOCK_EXIT(&1->a_rwlock); 1281 return (-1); 1282 } 1283 amp1->size = ptob(newpgs); 1284 ANON_LOCK_EXIT(&1->a_rwlock); 1285 } 1286 if (svd1->vpage != NULL) { 1287 struct vpage *vp, *evp; 1288 new_vpage = 1289 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1290 KM_NOSLEEP); 1291 if (new_vpage == NULL) 1292 return (-1); 1293 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1))); 1294 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1))); 1295 svd1->vpage = new_vpage; 1296 1297 vp = new_vpage + seg_pages(seg1); 1298 evp = vp + seg_pages(seg2); 1299 for (; vp < evp; vp++) 1300 VPP_SETPROT(vp, a->prot); 1301 if (svd1->pageswap && swresv) { 1302 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1303 ASSERT(swresv == seg2->s_size); 1304 vp = new_vpage + seg_pages(seg1); 1305 for (; vp < evp; vp++) { 1306 VPP_SETSWAPRES(vp); 1307 } 1308 } 1309 } 1310 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0); 1311 size = seg2->s_size; 1312 seg_free(seg2); 1313 seg1->s_size += size; 1314 svd1->swresv += swresv; 1315 if (svd1->pageprot && (a->prot & PROT_WRITE) && 1316 svd1->type == MAP_SHARED && svd1->vp != NULL && 1317 (svd1->vp->v_flag & VVMEXEC)) { 1318 ASSERT(vn_is_mapped(svd1->vp, V_WRITE)); 1319 segvn_inval_trcache(svd1->vp); 1320 } 1321 return (0); 1322 } 1323 1324 /* 1325 * Extend the next segment (seg2) to include the 1326 * new segment (seg1 + a), if possible. 1327 * Return 0 on success. 1328 */ 1329 static int 1330 segvn_extend_next( 1331 struct seg *seg1, 1332 struct seg *seg2, 1333 struct segvn_crargs *a, 1334 size_t swresv) 1335 { 1336 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data; 1337 size_t size; 1338 struct anon_map *amp2; 1339 struct vpage *new_vpage; 1340 1341 /* 1342 * We don't need any segment level locks for "segvn" data 1343 * since the address space is "write" locked. 1344 */ 1345 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock)); 1346 1347 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 1348 return (-1); 1349 } 1350 1351 /* first segment is new, try to extend second */ 1352 /* XXX - should also check cred */ 1353 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot || 1354 (!svd2->pageprot && (svd2->prot != a->prot)) || 1355 svd2->type != a->type || svd2->flags != a->flags || 1356 seg2->s_szc != a->szc) 1357 return (-1); 1358 /* vp == NULL implies zfod, offset doesn't matter */ 1359 if (svd2->vp != NULL && 1360 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset) 1361 return (-1); 1362 1363 if (svd2->tr_state != SEGVN_TR_OFF) { 1364 return (-1); 1365 } 1366 1367 amp2 = svd2->amp; 1368 if (amp2) { 1369 pgcnt_t newpgs; 1370 1371 /* 1372 * Segment has private pages, can data structures 1373 * be expanded? 1374 * 1375 * Acquire the anon_map lock to prevent it from changing, 1376 * if it is shared. This ensures that the anon_map 1377 * will not change while a thread which has a read/write 1378 * lock on an address space references it. 1379 * 1380 * XXX - Don't need the anon_map lock at all if "refcnt" 1381 * is 1. 1382 */ 1383 if (svd2->type == MAP_SHARED) 1384 return (-1); 1385 1386 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1387 if (amp2->refcnt > 1) { 1388 ANON_LOCK_EXIT(&2->a_rwlock); 1389 return (-1); 1390 } 1391 newpgs = anon_grow(amp2->ahp, &svd2->anon_index, 1392 btop(seg2->s_size), btop(seg1->s_size), 1393 ANON_NOSLEEP | ANON_GROWDOWN); 1394 1395 if (newpgs == 0) { 1396 ANON_LOCK_EXIT(&2->a_rwlock); 1397 return (-1); 1398 } 1399 amp2->size = ptob(newpgs); 1400 ANON_LOCK_EXIT(&2->a_rwlock); 1401 } 1402 if (svd2->vpage != NULL) { 1403 struct vpage *vp, *evp; 1404 new_vpage = 1405 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1406 KM_NOSLEEP); 1407 if (new_vpage == NULL) { 1408 /* Not merging segments so adjust anon_index back */ 1409 if (amp2) 1410 svd2->anon_index += seg_pages(seg1); 1411 return (-1); 1412 } 1413 bcopy(svd2->vpage, new_vpage + seg_pages(seg1), 1414 vpgtob(seg_pages(seg2))); 1415 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2))); 1416 svd2->vpage = new_vpage; 1417 1418 vp = new_vpage; 1419 evp = vp + seg_pages(seg1); 1420 for (; vp < evp; vp++) 1421 VPP_SETPROT(vp, a->prot); 1422 if (svd2->pageswap && swresv) { 1423 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1424 ASSERT(swresv == seg1->s_size); 1425 vp = new_vpage; 1426 for (; vp < evp; vp++) { 1427 VPP_SETSWAPRES(vp); 1428 } 1429 } 1430 } 1431 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0); 1432 size = seg1->s_size; 1433 seg_free(seg1); 1434 seg2->s_size += size; 1435 seg2->s_base -= size; 1436 svd2->offset -= size; 1437 svd2->swresv += swresv; 1438 if (svd2->pageprot && (a->prot & PROT_WRITE) && 1439 svd2->type == MAP_SHARED && svd2->vp != NULL && 1440 (svd2->vp->v_flag & VVMEXEC)) { 1441 ASSERT(vn_is_mapped(svd2->vp, V_WRITE)); 1442 segvn_inval_trcache(svd2->vp); 1443 } 1444 return (0); 1445 } 1446 1447 static int 1448 segvn_dup(struct seg *seg, struct seg *newseg) 1449 { 1450 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1451 struct segvn_data *newsvd; 1452 pgcnt_t npages = seg_pages(seg); 1453 int error = 0; 1454 uint_t prot; 1455 size_t len; 1456 struct anon_map *amp; 1457 1458 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1459 1460 /* 1461 * If segment has anon reserved, reserve more for the new seg. 1462 * For a MAP_NORESERVE segment swresv will be a count of all the 1463 * allocated anon slots; thus we reserve for the child as many slots 1464 * as the parent has allocated. This semantic prevents the child or 1465 * parent from dieing during a copy-on-write fault caused by trying 1466 * to write a shared pre-existing anon page. 1467 */ 1468 if ((len = svd->swresv) != 0) { 1469 if (anon_resv(svd->swresv) == 0) 1470 return (ENOMEM); 1471 1472 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 1473 seg, len, 0); 1474 } 1475 1476 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 1477 1478 newseg->s_ops = &segvn_ops; 1479 newseg->s_data = (void *)newsvd; 1480 newseg->s_szc = seg->s_szc; 1481 1482 newsvd->seg = newseg; 1483 if ((newsvd->vp = svd->vp) != NULL) { 1484 VN_HOLD(svd->vp); 1485 if (svd->type == MAP_SHARED) 1486 lgrp_shm_policy_init(NULL, svd->vp); 1487 } 1488 newsvd->offset = svd->offset; 1489 newsvd->prot = svd->prot; 1490 newsvd->maxprot = svd->maxprot; 1491 newsvd->pageprot = svd->pageprot; 1492 newsvd->type = svd->type; 1493 newsvd->cred = svd->cred; 1494 crhold(newsvd->cred); 1495 newsvd->advice = svd->advice; 1496 newsvd->pageadvice = svd->pageadvice; 1497 newsvd->swresv = svd->swresv; 1498 newsvd->pageswap = svd->pageswap; 1499 newsvd->flags = svd->flags; 1500 newsvd->softlockcnt = 0; 1501 newsvd->policy_info = svd->policy_info; 1502 newsvd->rcookie = HAT_INVALID_REGION_COOKIE; 1503 1504 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) { 1505 /* 1506 * Not attaching to a shared anon object. 1507 */ 1508 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) || 1509 svd->tr_state == SEGVN_TR_OFF); 1510 if (svd->tr_state == SEGVN_TR_ON) { 1511 ASSERT(newsvd->vp != NULL && amp != NULL); 1512 newsvd->tr_state = SEGVN_TR_INIT; 1513 } else { 1514 newsvd->tr_state = svd->tr_state; 1515 } 1516 newsvd->amp = NULL; 1517 newsvd->anon_index = 0; 1518 } else { 1519 /* regions for now are only used on pure vnode segments */ 1520 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 1521 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1522 newsvd->tr_state = SEGVN_TR_OFF; 1523 if (svd->type == MAP_SHARED) { 1524 newsvd->amp = amp; 1525 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1526 amp->refcnt++; 1527 ANON_LOCK_EXIT(&->a_rwlock); 1528 newsvd->anon_index = svd->anon_index; 1529 } else { 1530 int reclaim = 1; 1531 1532 /* 1533 * Allocate and initialize new anon_map structure. 1534 */ 1535 newsvd->amp = anonmap_alloc(newseg->s_size, 0, 1536 ANON_SLEEP); 1537 newsvd->amp->a_szc = newseg->s_szc; 1538 newsvd->anon_index = 0; 1539 1540 /* 1541 * We don't have to acquire the anon_map lock 1542 * for the new segment (since it belongs to an 1543 * address space that is still not associated 1544 * with any process), or the segment in the old 1545 * address space (since all threads in it 1546 * are stopped while duplicating the address space). 1547 */ 1548 1549 /* 1550 * The goal of the following code is to make sure that 1551 * softlocked pages do not end up as copy on write 1552 * pages. This would cause problems where one 1553 * thread writes to a page that is COW and a different 1554 * thread in the same process has softlocked it. The 1555 * softlock lock would move away from this process 1556 * because the write would cause this process to get 1557 * a copy (without the softlock). 1558 * 1559 * The strategy here is to just break the 1560 * sharing on pages that could possibly be 1561 * softlocked. 1562 */ 1563 retry: 1564 if (svd->softlockcnt) { 1565 struct anon *ap, *newap; 1566 size_t i; 1567 uint_t vpprot; 1568 page_t *anon_pl[1+1], *pp; 1569 caddr_t addr; 1570 ulong_t old_idx = svd->anon_index; 1571 ulong_t new_idx = 0; 1572 1573 /* 1574 * The softlock count might be non zero 1575 * because some pages are still stuck in the 1576 * cache for lazy reclaim. Flush the cache 1577 * now. This should drop the count to zero. 1578 * [or there is really I/O going on to these 1579 * pages]. Note, we have the writers lock so 1580 * nothing gets inserted during the flush. 1581 */ 1582 if (reclaim == 1) { 1583 segvn_purge(seg); 1584 reclaim = 0; 1585 goto retry; 1586 } 1587 i = btopr(seg->s_size); 1588 addr = seg->s_base; 1589 /* 1590 * XXX break cow sharing using PAGESIZE 1591 * pages. They will be relocated into larger 1592 * pages at fault time. 1593 */ 1594 while (i-- > 0) { 1595 if (ap = anon_get_ptr(amp->ahp, 1596 old_idx)) { 1597 error = anon_getpage(&ap, 1598 &vpprot, anon_pl, PAGESIZE, 1599 seg, addr, S_READ, 1600 svd->cred); 1601 if (error) { 1602 newsvd->vpage = NULL; 1603 goto out; 1604 } 1605 /* 1606 * prot need not be computed 1607 * below 'cause anon_private is 1608 * going to ignore it anyway 1609 * as child doesn't inherit 1610 * pagelock from parent. 1611 */ 1612 prot = svd->pageprot ? 1613 VPP_PROT( 1614 &svd->vpage[ 1615 seg_page(seg, addr)]) 1616 : svd->prot; 1617 pp = anon_private(&newap, 1618 newseg, addr, prot, 1619 anon_pl[0], 0, 1620 newsvd->cred); 1621 if (pp == NULL) { 1622 /* no mem abort */ 1623 newsvd->vpage = NULL; 1624 error = ENOMEM; 1625 goto out; 1626 } 1627 (void) anon_set_ptr( 1628 newsvd->amp->ahp, new_idx, 1629 newap, ANON_SLEEP); 1630 page_unlock(pp); 1631 } 1632 addr += PAGESIZE; 1633 old_idx++; 1634 new_idx++; 1635 } 1636 } else { /* common case */ 1637 if (seg->s_szc != 0) { 1638 /* 1639 * If at least one of anon slots of a 1640 * large page exists then make sure 1641 * all anon slots of a large page 1642 * exist to avoid partial cow sharing 1643 * of a large page in the future. 1644 */ 1645 anon_dup_fill_holes(amp->ahp, 1646 svd->anon_index, newsvd->amp->ahp, 1647 0, seg->s_size, seg->s_szc, 1648 svd->vp != NULL); 1649 } else { 1650 anon_dup(amp->ahp, svd->anon_index, 1651 newsvd->amp->ahp, 0, seg->s_size); 1652 } 1653 1654 hat_clrattr(seg->s_as->a_hat, seg->s_base, 1655 seg->s_size, PROT_WRITE); 1656 } 1657 } 1658 } 1659 /* 1660 * If necessary, create a vpage structure for the new segment. 1661 * Do not copy any page lock indications. 1662 */ 1663 if (svd->vpage != NULL) { 1664 uint_t i; 1665 struct vpage *ovp = svd->vpage; 1666 struct vpage *nvp; 1667 1668 nvp = newsvd->vpage = 1669 kmem_alloc(vpgtob(npages), KM_SLEEP); 1670 for (i = 0; i < npages; i++) { 1671 *nvp = *ovp++; 1672 VPP_CLRPPLOCK(nvp++); 1673 } 1674 } else 1675 newsvd->vpage = NULL; 1676 1677 /* Inform the vnode of the new mapping */ 1678 if (newsvd->vp != NULL) { 1679 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset, 1680 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot, 1681 newsvd->maxprot, newsvd->type, newsvd->cred, NULL); 1682 } 1683 out: 1684 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1685 ASSERT(newsvd->amp == NULL); 1686 ASSERT(newsvd->tr_state == SEGVN_TR_OFF); 1687 newsvd->rcookie = svd->rcookie; 1688 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie); 1689 } 1690 return (error); 1691 } 1692 1693 1694 /* 1695 * callback function to invoke free_vp_pages() for only those pages actually 1696 * processed by the HAT when a shared region is destroyed. 1697 */ 1698 extern int free_pages; 1699 1700 static void 1701 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 1702 size_t r_size, void *r_obj, u_offset_t r_objoff) 1703 { 1704 u_offset_t off; 1705 size_t len; 1706 vnode_t *vp = (vnode_t *)r_obj; 1707 1708 ASSERT(eaddr > saddr); 1709 ASSERT(saddr >= r_saddr); 1710 ASSERT(saddr < r_saddr + r_size); 1711 ASSERT(eaddr > r_saddr); 1712 ASSERT(eaddr <= r_saddr + r_size); 1713 ASSERT(vp != NULL); 1714 1715 if (!free_pages) { 1716 return; 1717 } 1718 1719 len = eaddr - saddr; 1720 off = (saddr - r_saddr) + r_objoff; 1721 free_vp_pages(vp, off, len); 1722 } 1723 1724 /* 1725 * callback function used by segvn_unmap to invoke free_vp_pages() for only 1726 * those pages actually processed by the HAT 1727 */ 1728 static void 1729 segvn_hat_unload_callback(hat_callback_t *cb) 1730 { 1731 struct seg *seg = cb->hcb_data; 1732 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1733 size_t len; 1734 u_offset_t off; 1735 1736 ASSERT(svd->vp != NULL); 1737 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr); 1738 ASSERT(cb->hcb_start_addr >= seg->s_base); 1739 1740 len = cb->hcb_end_addr - cb->hcb_start_addr; 1741 off = cb->hcb_start_addr - seg->s_base; 1742 free_vp_pages(svd->vp, svd->offset + off, len); 1743 } 1744 1745 /* 1746 * This function determines the number of bytes of swap reserved by 1747 * a segment for which per-page accounting is present. It is used to 1748 * calculate the correct value of a segvn_data's swresv. 1749 */ 1750 static size_t 1751 segvn_count_swap_by_vpages(struct seg *seg) 1752 { 1753 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1754 struct vpage *vp, *evp; 1755 size_t nswappages = 0; 1756 1757 ASSERT(svd->pageswap); 1758 ASSERT(svd->vpage != NULL); 1759 1760 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 1761 1762 for (vp = svd->vpage; vp < evp; vp++) { 1763 if (VPP_ISSWAPRES(vp)) 1764 nswappages++; 1765 } 1766 1767 return (nswappages << PAGESHIFT); 1768 } 1769 1770 static int 1771 segvn_unmap(struct seg *seg, caddr_t addr, size_t len) 1772 { 1773 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1774 struct segvn_data *nsvd; 1775 struct seg *nseg; 1776 struct anon_map *amp; 1777 pgcnt_t opages; /* old segment size in pages */ 1778 pgcnt_t npages; /* new segment size in pages */ 1779 pgcnt_t dpages; /* pages being deleted (unmapped) */ 1780 hat_callback_t callback; /* used for free_vp_pages() */ 1781 hat_callback_t *cbp = NULL; 1782 caddr_t nbase; 1783 size_t nsize; 1784 size_t oswresv; 1785 int reclaim = 1; 1786 1787 /* 1788 * We don't need any segment level locks for "segvn" data 1789 * since the address space is "write" locked. 1790 */ 1791 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1792 1793 /* 1794 * Fail the unmap if pages are SOFTLOCKed through this mapping. 1795 * softlockcnt is protected from change by the as write lock. 1796 */ 1797 retry: 1798 if (svd->softlockcnt > 0) { 1799 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1800 /* 1801 * since we do have the writers lock nobody can fill 1802 * the cache during the purge. The flush either succeeds 1803 * or we still have pending I/Os. 1804 */ 1805 if (reclaim == 1) { 1806 segvn_purge(seg); 1807 reclaim = 0; 1808 goto retry; 1809 } 1810 return (EAGAIN); 1811 } 1812 1813 /* 1814 * Check for bad sizes 1815 */ 1816 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size || 1817 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) { 1818 panic("segvn_unmap"); 1819 /*NOTREACHED*/ 1820 } 1821 1822 if (seg->s_szc != 0) { 1823 size_t pgsz = page_get_pagesize(seg->s_szc); 1824 int err; 1825 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 1826 ASSERT(seg->s_base != addr || seg->s_size != len); 1827 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1828 ASSERT(svd->amp == NULL); 1829 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1830 hat_leave_region(seg->s_as->a_hat, 1831 svd->rcookie, HAT_REGION_TEXT); 1832 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1833 /* 1834 * could pass a flag to segvn_demote_range() 1835 * below to tell it not to do any unloads but 1836 * this case is rare enough to not bother for 1837 * now. 1838 */ 1839 } else if (svd->tr_state == SEGVN_TR_INIT) { 1840 svd->tr_state = SEGVN_TR_OFF; 1841 } else if (svd->tr_state == SEGVN_TR_ON) { 1842 ASSERT(svd->amp != NULL); 1843 segvn_textunrepl(seg, 1); 1844 ASSERT(svd->amp == NULL); 1845 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1846 } 1847 VM_STAT_ADD(segvnvmstats.demoterange[0]); 1848 err = segvn_demote_range(seg, addr, len, SDR_END, 0); 1849 if (err == 0) { 1850 return (IE_RETRY); 1851 } 1852 return (err); 1853 } 1854 } 1855 1856 /* Inform the vnode of the unmapping. */ 1857 if (svd->vp) { 1858 int error; 1859 1860 error = VOP_DELMAP(svd->vp, 1861 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base), 1862 seg->s_as, addr, len, svd->prot, svd->maxprot, 1863 svd->type, svd->cred, NULL); 1864 1865 if (error == EAGAIN) 1866 return (error); 1867 } 1868 1869 /* 1870 * Remove any page locks set through this mapping. 1871 * If text replication is not off no page locks could have been 1872 * established via this mapping. 1873 */ 1874 if (svd->tr_state == SEGVN_TR_OFF) { 1875 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0); 1876 } 1877 1878 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1879 ASSERT(svd->amp == NULL); 1880 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1881 ASSERT(svd->type == MAP_PRIVATE); 1882 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 1883 HAT_REGION_TEXT); 1884 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1885 } else if (svd->tr_state == SEGVN_TR_ON) { 1886 ASSERT(svd->amp != NULL); 1887 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE)); 1888 segvn_textunrepl(seg, 1); 1889 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 1890 } else { 1891 if (svd->tr_state != SEGVN_TR_OFF) { 1892 ASSERT(svd->tr_state == SEGVN_TR_INIT); 1893 svd->tr_state = SEGVN_TR_OFF; 1894 } 1895 /* 1896 * Unload any hardware translations in the range to be taken 1897 * out. Use a callback to invoke free_vp_pages() effectively. 1898 */ 1899 if (svd->vp != NULL && free_pages != 0) { 1900 callback.hcb_data = seg; 1901 callback.hcb_function = segvn_hat_unload_callback; 1902 cbp = &callback; 1903 } 1904 hat_unload_callback(seg->s_as->a_hat, addr, len, 1905 HAT_UNLOAD_UNMAP, cbp); 1906 1907 if (svd->type == MAP_SHARED && svd->vp != NULL && 1908 (svd->vp->v_flag & VVMEXEC) && 1909 ((svd->prot & PROT_WRITE) || svd->pageprot)) { 1910 segvn_inval_trcache(svd->vp); 1911 } 1912 } 1913 1914 /* 1915 * Check for entire segment 1916 */ 1917 if (addr == seg->s_base && len == seg->s_size) { 1918 seg_free(seg); 1919 return (0); 1920 } 1921 1922 opages = seg_pages(seg); 1923 dpages = btop(len); 1924 npages = opages - dpages; 1925 amp = svd->amp; 1926 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc); 1927 1928 /* 1929 * Check for beginning of segment 1930 */ 1931 if (addr == seg->s_base) { 1932 if (svd->vpage != NULL) { 1933 size_t nbytes; 1934 struct vpage *ovpage; 1935 1936 ovpage = svd->vpage; /* keep pointer to vpage */ 1937 1938 nbytes = vpgtob(npages); 1939 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 1940 bcopy(&ovpage[dpages], svd->vpage, nbytes); 1941 1942 /* free up old vpage */ 1943 kmem_free(ovpage, vpgtob(opages)); 1944 } 1945 if (amp != NULL) { 1946 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1947 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 1948 /* 1949 * Free up now unused parts of anon_map array. 1950 */ 1951 if (amp->a_szc == seg->s_szc) { 1952 if (seg->s_szc != 0) { 1953 anon_free_pages(amp->ahp, 1954 svd->anon_index, len, 1955 seg->s_szc); 1956 } else { 1957 anon_free(amp->ahp, 1958 svd->anon_index, 1959 len); 1960 } 1961 } else { 1962 ASSERT(svd->type == MAP_SHARED); 1963 ASSERT(amp->a_szc > seg->s_szc); 1964 anon_shmap_free_pages(amp, 1965 svd->anon_index, len); 1966 } 1967 1968 /* 1969 * Unreserve swap space for the 1970 * unmapped chunk of this segment in 1971 * case it's MAP_SHARED 1972 */ 1973 if (svd->type == MAP_SHARED) { 1974 anon_unresv(len); 1975 amp->swresv -= len; 1976 } 1977 } 1978 ANON_LOCK_EXIT(&->a_rwlock); 1979 svd->anon_index += dpages; 1980 } 1981 if (svd->vp != NULL) 1982 svd->offset += len; 1983 1984 seg->s_base += len; 1985 seg->s_size -= len; 1986 1987 if (svd->swresv) { 1988 if (svd->flags & MAP_NORESERVE) { 1989 ASSERT(amp); 1990 oswresv = svd->swresv; 1991 1992 svd->swresv = ptob(anon_pages(amp->ahp, 1993 svd->anon_index, npages)); 1994 anon_unresv(oswresv - svd->swresv); 1995 } else { 1996 size_t unlen; 1997 1998 if (svd->pageswap) { 1999 oswresv = svd->swresv; 2000 svd->swresv = 2001 segvn_count_swap_by_vpages(seg); 2002 ASSERT(oswresv >= svd->swresv); 2003 unlen = oswresv - svd->swresv; 2004 } else { 2005 svd->swresv -= len; 2006 ASSERT(svd->swresv == seg->s_size); 2007 unlen = len; 2008 } 2009 anon_unresv(unlen); 2010 } 2011 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2012 seg, len, 0); 2013 } 2014 2015 return (0); 2016 } 2017 2018 /* 2019 * Check for end of segment 2020 */ 2021 if (addr + len == seg->s_base + seg->s_size) { 2022 if (svd->vpage != NULL) { 2023 size_t nbytes; 2024 struct vpage *ovpage; 2025 2026 ovpage = svd->vpage; /* keep pointer to vpage */ 2027 2028 nbytes = vpgtob(npages); 2029 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2030 bcopy(ovpage, svd->vpage, nbytes); 2031 2032 /* free up old vpage */ 2033 kmem_free(ovpage, vpgtob(opages)); 2034 2035 } 2036 if (amp != NULL) { 2037 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2038 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2039 /* 2040 * Free up now unused parts of anon_map array. 2041 */ 2042 ulong_t an_idx = svd->anon_index + npages; 2043 if (amp->a_szc == seg->s_szc) { 2044 if (seg->s_szc != 0) { 2045 anon_free_pages(amp->ahp, 2046 an_idx, len, 2047 seg->s_szc); 2048 } else { 2049 anon_free(amp->ahp, an_idx, 2050 len); 2051 } 2052 } else { 2053 ASSERT(svd->type == MAP_SHARED); 2054 ASSERT(amp->a_szc > seg->s_szc); 2055 anon_shmap_free_pages(amp, 2056 an_idx, len); 2057 } 2058 2059 /* 2060 * Unreserve swap space for the 2061 * unmapped chunk of this segment in 2062 * case it's MAP_SHARED 2063 */ 2064 if (svd->type == MAP_SHARED) { 2065 anon_unresv(len); 2066 amp->swresv -= len; 2067 } 2068 } 2069 ANON_LOCK_EXIT(&->a_rwlock); 2070 } 2071 2072 seg->s_size -= len; 2073 2074 if (svd->swresv) { 2075 if (svd->flags & MAP_NORESERVE) { 2076 ASSERT(amp); 2077 oswresv = svd->swresv; 2078 svd->swresv = ptob(anon_pages(amp->ahp, 2079 svd->anon_index, npages)); 2080 anon_unresv(oswresv - svd->swresv); 2081 } else { 2082 size_t unlen; 2083 2084 if (svd->pageswap) { 2085 oswresv = svd->swresv; 2086 svd->swresv = 2087 segvn_count_swap_by_vpages(seg); 2088 ASSERT(oswresv >= svd->swresv); 2089 unlen = oswresv - svd->swresv; 2090 } else { 2091 svd->swresv -= len; 2092 ASSERT(svd->swresv == seg->s_size); 2093 unlen = len; 2094 } 2095 anon_unresv(unlen); 2096 } 2097 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2098 "anon proc:%p %lu %u", seg, len, 0); 2099 } 2100 2101 return (0); 2102 } 2103 2104 /* 2105 * The section to go is in the middle of the segment, 2106 * have to make it into two segments. nseg is made for 2107 * the high end while seg is cut down at the low end. 2108 */ 2109 nbase = addr + len; /* new seg base */ 2110 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */ 2111 seg->s_size = addr - seg->s_base; /* shrink old seg */ 2112 nseg = seg_alloc(seg->s_as, nbase, nsize); 2113 if (nseg == NULL) { 2114 panic("segvn_unmap seg_alloc"); 2115 /*NOTREACHED*/ 2116 } 2117 nseg->s_ops = seg->s_ops; 2118 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 2119 nseg->s_data = (void *)nsvd; 2120 nseg->s_szc = seg->s_szc; 2121 *nsvd = *svd; 2122 nsvd->seg = nseg; 2123 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base); 2124 nsvd->swresv = 0; 2125 nsvd->softlockcnt = 0; 2126 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 2127 2128 if (svd->vp != NULL) { 2129 VN_HOLD(nsvd->vp); 2130 if (nsvd->type == MAP_SHARED) 2131 lgrp_shm_policy_init(NULL, nsvd->vp); 2132 } 2133 crhold(svd->cred); 2134 2135 if (svd->vpage == NULL) { 2136 nsvd->vpage = NULL; 2137 } else { 2138 /* need to split vpage into two arrays */ 2139 size_t nbytes; 2140 struct vpage *ovpage; 2141 2142 ovpage = svd->vpage; /* keep pointer to vpage */ 2143 2144 npages = seg_pages(seg); /* seg has shrunk */ 2145 nbytes = vpgtob(npages); 2146 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2147 2148 bcopy(ovpage, svd->vpage, nbytes); 2149 2150 npages = seg_pages(nseg); 2151 nbytes = vpgtob(npages); 2152 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2153 2154 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes); 2155 2156 /* free up old vpage */ 2157 kmem_free(ovpage, vpgtob(opages)); 2158 } 2159 2160 if (amp == NULL) { 2161 nsvd->amp = NULL; 2162 nsvd->anon_index = 0; 2163 } else { 2164 /* 2165 * Need to create a new anon map for the new segment. 2166 * We'll also allocate a new smaller array for the old 2167 * smaller segment to save space. 2168 */ 2169 opages = btop((uintptr_t)(addr - seg->s_base)); 2170 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2171 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2172 /* 2173 * Free up now unused parts of anon_map array. 2174 */ 2175 ulong_t an_idx = svd->anon_index + opages; 2176 if (amp->a_szc == seg->s_szc) { 2177 if (seg->s_szc != 0) { 2178 anon_free_pages(amp->ahp, an_idx, len, 2179 seg->s_szc); 2180 } else { 2181 anon_free(amp->ahp, an_idx, 2182 len); 2183 } 2184 } else { 2185 ASSERT(svd->type == MAP_SHARED); 2186 ASSERT(amp->a_szc > seg->s_szc); 2187 anon_shmap_free_pages(amp, an_idx, len); 2188 } 2189 2190 /* 2191 * Unreserve swap space for the 2192 * unmapped chunk of this segment in 2193 * case it's MAP_SHARED 2194 */ 2195 if (svd->type == MAP_SHARED) { 2196 anon_unresv(len); 2197 amp->swresv -= len; 2198 } 2199 } 2200 nsvd->anon_index = svd->anon_index + 2201 btop((uintptr_t)(nseg->s_base - seg->s_base)); 2202 if (svd->type == MAP_SHARED) { 2203 amp->refcnt++; 2204 nsvd->amp = amp; 2205 } else { 2206 struct anon_map *namp; 2207 struct anon_hdr *nahp; 2208 2209 ASSERT(svd->type == MAP_PRIVATE); 2210 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 2211 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 2212 namp->a_szc = seg->s_szc; 2213 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp, 2214 0, btop(seg->s_size), ANON_SLEEP); 2215 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index, 2216 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 2217 anon_release(amp->ahp, btop(amp->size)); 2218 svd->anon_index = 0; 2219 nsvd->anon_index = 0; 2220 amp->ahp = nahp; 2221 amp->size = seg->s_size; 2222 nsvd->amp = namp; 2223 } 2224 ANON_LOCK_EXIT(&->a_rwlock); 2225 } 2226 if (svd->swresv) { 2227 if (svd->flags & MAP_NORESERVE) { 2228 ASSERT(amp); 2229 oswresv = svd->swresv; 2230 svd->swresv = ptob(anon_pages(amp->ahp, 2231 svd->anon_index, btop(seg->s_size))); 2232 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 2233 nsvd->anon_index, btop(nseg->s_size))); 2234 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2235 anon_unresv(oswresv - (svd->swresv + nsvd->swresv)); 2236 } else { 2237 size_t unlen; 2238 2239 if (svd->pageswap) { 2240 oswresv = svd->swresv; 2241 svd->swresv = segvn_count_swap_by_vpages(seg); 2242 nsvd->swresv = segvn_count_swap_by_vpages(nseg); 2243 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2244 unlen = oswresv - (svd->swresv + nsvd->swresv); 2245 } else { 2246 if (seg->s_size + nseg->s_size + len != 2247 svd->swresv) { 2248 panic("segvn_unmap: cannot split " 2249 "swap reservation"); 2250 /*NOTREACHED*/ 2251 } 2252 svd->swresv = seg->s_size; 2253 nsvd->swresv = nseg->s_size; 2254 unlen = len; 2255 } 2256 anon_unresv(unlen); 2257 } 2258 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2259 seg, len, 0); 2260 } 2261 2262 return (0); /* I'm glad that's all over with! */ 2263 } 2264 2265 static void 2266 segvn_free(struct seg *seg) 2267 { 2268 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2269 pgcnt_t npages = seg_pages(seg); 2270 struct anon_map *amp; 2271 size_t len; 2272 2273 /* 2274 * We don't need any segment level locks for "segvn" data 2275 * since the address space is "write" locked. 2276 */ 2277 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2278 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2279 2280 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2281 2282 /* 2283 * Be sure to unlock pages. XXX Why do things get free'ed instead 2284 * of unmapped? XXX 2285 */ 2286 (void) segvn_lockop(seg, seg->s_base, seg->s_size, 2287 0, MC_UNLOCK, NULL, 0); 2288 2289 /* 2290 * Deallocate the vpage and anon pointers if necessary and possible. 2291 */ 2292 if (svd->vpage != NULL) { 2293 kmem_free(svd->vpage, vpgtob(npages)); 2294 svd->vpage = NULL; 2295 } 2296 if ((amp = svd->amp) != NULL) { 2297 /* 2298 * If there are no more references to this anon_map 2299 * structure, then deallocate the structure after freeing 2300 * up all the anon slot pointers that we can. 2301 */ 2302 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2303 ASSERT(amp->a_szc >= seg->s_szc); 2304 if (--amp->refcnt == 0) { 2305 if (svd->type == MAP_PRIVATE) { 2306 /* 2307 * Private - we only need to anon_free 2308 * the part that this segment refers to. 2309 */ 2310 if (seg->s_szc != 0) { 2311 anon_free_pages(amp->ahp, 2312 svd->anon_index, seg->s_size, 2313 seg->s_szc); 2314 } else { 2315 anon_free(amp->ahp, svd->anon_index, 2316 seg->s_size); 2317 } 2318 } else { 2319 /* 2320 * Shared - anon_free the entire 2321 * anon_map's worth of stuff and 2322 * release any swap reservation. 2323 */ 2324 if (amp->a_szc != 0) { 2325 anon_shmap_free_pages(amp, 0, 2326 amp->size); 2327 } else { 2328 anon_free(amp->ahp, 0, amp->size); 2329 } 2330 if ((len = amp->swresv) != 0) { 2331 anon_unresv(len); 2332 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2333 "anon proc:%p %lu %u", seg, len, 0); 2334 } 2335 } 2336 svd->amp = NULL; 2337 ANON_LOCK_EXIT(&->a_rwlock); 2338 anonmap_free(amp); 2339 } else if (svd->type == MAP_PRIVATE) { 2340 /* 2341 * We had a private mapping which still has 2342 * a held anon_map so just free up all the 2343 * anon slot pointers that we were using. 2344 */ 2345 if (seg->s_szc != 0) { 2346 anon_free_pages(amp->ahp, svd->anon_index, 2347 seg->s_size, seg->s_szc); 2348 } else { 2349 anon_free(amp->ahp, svd->anon_index, 2350 seg->s_size); 2351 } 2352 ANON_LOCK_EXIT(&->a_rwlock); 2353 } else { 2354 ANON_LOCK_EXIT(&->a_rwlock); 2355 } 2356 } 2357 2358 /* 2359 * Release swap reservation. 2360 */ 2361 if ((len = svd->swresv) != 0) { 2362 anon_unresv(svd->swresv); 2363 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2364 seg, len, 0); 2365 svd->swresv = 0; 2366 } 2367 /* 2368 * Release claim on vnode, credentials, and finally free the 2369 * private data. 2370 */ 2371 if (svd->vp != NULL) { 2372 if (svd->type == MAP_SHARED) 2373 lgrp_shm_policy_fini(NULL, svd->vp); 2374 VN_RELE(svd->vp); 2375 svd->vp = NULL; 2376 } 2377 crfree(svd->cred); 2378 svd->pageprot = 0; 2379 svd->pageadvice = 0; 2380 svd->pageswap = 0; 2381 svd->cred = NULL; 2382 2383 seg->s_data = NULL; 2384 kmem_cache_free(segvn_cache, svd); 2385 } 2386 2387 #ifdef DEBUG 2388 uint32_t segvn_slock_mtbf = 0; 2389 #endif 2390 2391 ulong_t segvn_lpglck_limit = 0; 2392 2393 /* 2394 * Support routines used by segvn_pagelock() and softlock faults for anonymous 2395 * pages to implement availrmem accounting in a way that makes sure the 2396 * same memory is accounted just once for all softlock/pagelock purposes. 2397 * This prevents a bug when availrmem is quickly incorrectly exhausted from 2398 * several pagelocks to different parts of the same large page since each 2399 * pagelock has to decrement availrmem by the size of the entire large 2400 * page. Note those pages are not COW shared until softunlock/pageunlock so 2401 * we don't need to use cow style accounting here. We also need to make sure 2402 * the entire large page is accounted even if softlock range is less than the 2403 * entire large page because large anon pages can't be demoted when any of 2404 * constituent pages is locked. The caller calls this routine for every page_t 2405 * it locks. The very first page in the range may not be the root page of a 2406 * large page. For all other pages it's guaranteed we are going to visit the 2407 * root of a particular large page before any other constituent page as we are 2408 * locking sequential pages belonging to the same anon map. So we do all the 2409 * locking when the root is encountered except for the very first page. Since 2410 * softlocking is not supported (except S_READ_NOCOW special case) for vmpss 2411 * segments and since vnode pages can be demoted without locking all 2412 * constituent pages vnode pages don't come here. Unlocking relies on the 2413 * fact that pagesize can't change whenever any of constituent large pages is 2414 * locked at least SE_SHARED. This allows unlocking code to find the right 2415 * root and decrement availrmem by the same amount it was incremented when the 2416 * page was locked. 2417 */ 2418 static int 2419 segvn_slock_anonpages(page_t *pp, int first) 2420 { 2421 pgcnt_t pages; 2422 pfn_t pfn; 2423 uchar_t szc = pp->p_szc; 2424 2425 ASSERT(PAGE_LOCKED(pp)); 2426 ASSERT(pp->p_vnode != NULL); 2427 ASSERT(IS_SWAPFSVP(pp->p_vnode)); 2428 2429 /* 2430 * pagesize won't change as long as any constituent page is locked. 2431 */ 2432 pages = page_get_pagecnt(pp->p_szc); 2433 pfn = page_pptonum(pp); 2434 2435 if (!first) { 2436 if (!IS_P2ALIGNED(pfn, pages)) { 2437 #ifdef DEBUG 2438 pp = &pp[-(spgcnt_t)(pfn & (pages - 1))]; 2439 pfn = page_pptonum(pp); 2440 ASSERT(IS_P2ALIGNED(pfn, pages)); 2441 ASSERT(pp->p_szc == szc); 2442 ASSERT(pp->p_vnode != NULL); 2443 ASSERT(IS_SWAPFSVP(pp->p_vnode)); 2444 ASSERT(pp->p_slckcnt != 0); 2445 #endif /* DEBUG */ 2446 return (1); 2447 } 2448 } else if (!IS_P2ALIGNED(pfn, pages)) { 2449 pp = &pp[-(spgcnt_t)(pfn & (pages - 1))]; 2450 #ifdef DEBUG 2451 pfn = page_pptonum(pp); 2452 ASSERT(IS_P2ALIGNED(pfn, pages)); 2453 ASSERT(pp->p_szc == szc); 2454 ASSERT(pp->p_vnode != NULL); 2455 ASSERT(IS_SWAPFSVP(pp->p_vnode)); 2456 #endif /* DEBUG */ 2457 } 2458 2459 #ifdef DEBUG 2460 if (segvn_slock_mtbf && !(gethrtime() % segvn_slock_mtbf)) { 2461 return (0); 2462 } 2463 #endif /* DEBUG */ 2464 2465 /* 2466 * pp is a root page. 2467 * We haven't locked this large page yet. 2468 */ 2469 page_struct_lock(pp); 2470 if (pp->p_slckcnt != 0) { 2471 if (pp->p_slckcnt < PAGE_SLOCK_MAXIMUM) { 2472 pp->p_slckcnt++; 2473 page_struct_unlock(pp); 2474 return (1); 2475 } 2476 page_struct_unlock(pp); 2477 segvn_lpglck_limit++; 2478 return (0); 2479 } 2480 mutex_enter(&freemem_lock); 2481 if (availrmem < tune.t_minarmem + pages) { 2482 mutex_exit(&freemem_lock); 2483 page_struct_unlock(pp); 2484 return (0); 2485 } 2486 pp->p_slckcnt++; 2487 availrmem -= pages; 2488 mutex_exit(&freemem_lock); 2489 page_struct_unlock(pp); 2490 return (1); 2491 } 2492 2493 static void 2494 segvn_sunlock_anonpages(page_t *pp, int first) 2495 { 2496 pgcnt_t pages; 2497 pfn_t pfn; 2498 2499 ASSERT(PAGE_LOCKED(pp)); 2500 ASSERT(pp->p_vnode != NULL); 2501 ASSERT(IS_SWAPFSVP(pp->p_vnode)); 2502 2503 /* 2504 * pagesize won't change as long as any constituent page is locked. 2505 */ 2506 pages = page_get_pagecnt(pp->p_szc); 2507 pfn = page_pptonum(pp); 2508 2509 if (!first) { 2510 if (!IS_P2ALIGNED(pfn, pages)) { 2511 return; 2512 } 2513 } else if (!IS_P2ALIGNED(pfn, pages)) { 2514 pp = &pp[-(spgcnt_t)(pfn & (pages - 1))]; 2515 #ifdef DEBUG 2516 pfn = page_pptonum(pp); 2517 ASSERT(IS_P2ALIGNED(pfn, pages)); 2518 #endif /* DEBUG */ 2519 } 2520 ASSERT(pp->p_vnode != NULL); 2521 ASSERT(IS_SWAPFSVP(pp->p_vnode)); 2522 ASSERT(pp->p_slckcnt != 0); 2523 page_struct_lock(pp); 2524 if (--pp->p_slckcnt == 0) { 2525 mutex_enter(&freemem_lock); 2526 availrmem += pages; 2527 mutex_exit(&freemem_lock); 2528 } 2529 page_struct_unlock(pp); 2530 } 2531 2532 /* 2533 * Do a F_SOFTUNLOCK call over the range requested. The range must have 2534 * already been F_SOFTLOCK'ed. 2535 * Caller must always match addr and len of a softunlock with a previous 2536 * softlock with exactly the same addr and len. 2537 */ 2538 static void 2539 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw) 2540 { 2541 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2542 page_t *pp; 2543 caddr_t adr; 2544 struct vnode *vp; 2545 u_offset_t offset; 2546 ulong_t anon_index; 2547 struct anon_map *amp; 2548 struct anon *ap = NULL; 2549 2550 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2551 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 2552 2553 if ((amp = svd->amp) != NULL) 2554 anon_index = svd->anon_index + seg_page(seg, addr); 2555 2556 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 2557 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2558 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie); 2559 } else { 2560 hat_unlock(seg->s_as->a_hat, addr, len); 2561 } 2562 for (adr = addr; adr < addr + len; adr += PAGESIZE) { 2563 if (amp != NULL) { 2564 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2565 if ((ap = anon_get_ptr(amp->ahp, anon_index++)) 2566 != NULL) { 2567 swap_xlate(ap, &vp, &offset); 2568 } else { 2569 vp = svd->vp; 2570 offset = svd->offset + 2571 (uintptr_t)(adr - seg->s_base); 2572 } 2573 ANON_LOCK_EXIT(&->a_rwlock); 2574 } else { 2575 vp = svd->vp; 2576 offset = svd->offset + 2577 (uintptr_t)(adr - seg->s_base); 2578 } 2579 2580 /* 2581 * Use page_find() instead of page_lookup() to 2582 * find the page since we know that it is locked. 2583 */ 2584 pp = page_find(vp, offset); 2585 if (pp == NULL) { 2586 panic( 2587 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx", 2588 (void *)adr, (void *)ap, (void *)vp, offset); 2589 /*NOTREACHED*/ 2590 } 2591 2592 if (rw == S_WRITE) { 2593 hat_setrefmod(pp); 2594 if (seg->s_as->a_vbits) 2595 hat_setstat(seg->s_as, adr, PAGESIZE, 2596 P_REF | P_MOD); 2597 } else if (rw != S_OTHER) { 2598 hat_setref(pp); 2599 if (seg->s_as->a_vbits) 2600 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF); 2601 } 2602 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2603 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset); 2604 if (svd->vp == NULL) { 2605 segvn_sunlock_anonpages(pp, adr == addr); 2606 } 2607 page_unlock(pp); 2608 } 2609 mutex_enter(&freemem_lock); /* for availrmem */ 2610 if (svd->vp != NULL) { 2611 availrmem += btop(len); 2612 } 2613 segvn_pages_locked -= btop(len); 2614 svd->softlockcnt -= btop(len); 2615 mutex_exit(&freemem_lock); 2616 if (svd->softlockcnt == 0) { 2617 /* 2618 * All SOFTLOCKS are gone. Wakeup any waiting 2619 * unmappers so they can try again to unmap. 2620 * Check for waiters first without the mutex 2621 * held so we don't always grab the mutex on 2622 * softunlocks. 2623 */ 2624 if (AS_ISUNMAPWAIT(seg->s_as)) { 2625 mutex_enter(&seg->s_as->a_contents); 2626 if (AS_ISUNMAPWAIT(seg->s_as)) { 2627 AS_CLRUNMAPWAIT(seg->s_as); 2628 cv_broadcast(&seg->s_as->a_cv); 2629 } 2630 mutex_exit(&seg->s_as->a_contents); 2631 } 2632 } 2633 } 2634 2635 #define PAGE_HANDLED ((page_t *)-1) 2636 2637 /* 2638 * Release all the pages in the NULL terminated ppp list 2639 * which haven't already been converted to PAGE_HANDLED. 2640 */ 2641 static void 2642 segvn_pagelist_rele(page_t **ppp) 2643 { 2644 for (; *ppp != NULL; ppp++) { 2645 if (*ppp != PAGE_HANDLED) 2646 page_unlock(*ppp); 2647 } 2648 } 2649 2650 static int stealcow = 1; 2651 2652 /* 2653 * Workaround for viking chip bug. See bug id 1220902. 2654 * To fix this down in pagefault() would require importing so 2655 * much as and segvn code as to be unmaintainable. 2656 */ 2657 int enable_mbit_wa = 0; 2658 2659 /* 2660 * Handles all the dirty work of getting the right 2661 * anonymous pages and loading up the translations. 2662 * This routine is called only from segvn_fault() 2663 * when looping over the range of addresses requested. 2664 * 2665 * The basic algorithm here is: 2666 * If this is an anon_zero case 2667 * Call anon_zero to allocate page 2668 * Load up translation 2669 * Return 2670 * endif 2671 * If this is an anon page 2672 * Use anon_getpage to get the page 2673 * else 2674 * Find page in pl[] list passed in 2675 * endif 2676 * If not a cow 2677 * Load up the translation to the page 2678 * return 2679 * endif 2680 * Call anon_private to handle cow 2681 * Load up (writable) translation to new page 2682 */ 2683 static faultcode_t 2684 segvn_faultpage( 2685 struct hat *hat, /* the hat to use for mapping */ 2686 struct seg *seg, /* seg_vn of interest */ 2687 caddr_t addr, /* address in as */ 2688 u_offset_t off, /* offset in vp */ 2689 struct vpage *vpage, /* pointer to vpage for vp, off */ 2690 page_t *pl[], /* object source page pointer */ 2691 uint_t vpprot, /* access allowed to object pages */ 2692 enum fault_type type, /* type of fault */ 2693 enum seg_rw rw, /* type of access at fault */ 2694 int brkcow, /* we may need to break cow */ 2695 int first) /* first page for this fault if 1 */ 2696 { 2697 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2698 page_t *pp, **ppp; 2699 uint_t pageflags = 0; 2700 page_t *anon_pl[1 + 1]; 2701 page_t *opp = NULL; /* original page */ 2702 uint_t prot; 2703 int err; 2704 int cow; 2705 int claim; 2706 int steal = 0; 2707 ulong_t anon_index; 2708 struct anon *ap, *oldap; 2709 struct anon_map *amp; 2710 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 2711 int anon_lock = 0; 2712 anon_sync_obj_t cookie; 2713 2714 if (svd->flags & MAP_TEXT) { 2715 hat_flag |= HAT_LOAD_TEXT; 2716 } 2717 2718 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 2719 ASSERT(seg->s_szc == 0); 2720 ASSERT(svd->tr_state != SEGVN_TR_INIT); 2721 2722 /* 2723 * Initialize protection value for this page. 2724 * If we have per page protection values check it now. 2725 */ 2726 if (svd->pageprot) { 2727 uint_t protchk; 2728 2729 switch (rw) { 2730 case S_READ: 2731 protchk = PROT_READ; 2732 break; 2733 case S_WRITE: 2734 protchk = PROT_WRITE; 2735 break; 2736 case S_EXEC: 2737 protchk = PROT_EXEC; 2738 break; 2739 case S_OTHER: 2740 default: 2741 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 2742 break; 2743 } 2744 2745 prot = VPP_PROT(vpage); 2746 if ((prot & protchk) == 0) 2747 return (FC_PROT); /* illegal access type */ 2748 } else { 2749 prot = svd->prot; 2750 } 2751 2752 if (type == F_SOFTLOCK && svd->vp != NULL) { 2753 mutex_enter(&freemem_lock); 2754 if (availrmem <= tune.t_minarmem) { 2755 mutex_exit(&freemem_lock); 2756 return (FC_MAKE_ERR(ENOMEM)); /* out of real memory */ 2757 } else { 2758 availrmem--; 2759 svd->softlockcnt++; 2760 segvn_pages_locked++; 2761 } 2762 mutex_exit(&freemem_lock); 2763 } 2764 2765 /* 2766 * Always acquire the anon array lock to prevent 2 threads from 2767 * allocating separate anon slots for the same "addr". 2768 */ 2769 2770 if ((amp = svd->amp) != NULL) { 2771 ASSERT(RW_READ_HELD(&->a_rwlock)); 2772 anon_index = svd->anon_index + seg_page(seg, addr); 2773 anon_array_enter(amp, anon_index, &cookie); 2774 anon_lock = 1; 2775 } 2776 2777 if (svd->vp == NULL && amp != NULL) { 2778 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) { 2779 /* 2780 * Allocate a (normally) writable anonymous page of 2781 * zeroes. If no advance reservations, reserve now. 2782 */ 2783 if (svd->flags & MAP_NORESERVE) { 2784 if (anon_resv_zone(ptob(1), 2785 seg->s_as->a_proc->p_zone)) { 2786 atomic_add_long(&svd->swresv, ptob(1)); 2787 } else { 2788 err = ENOMEM; 2789 goto out; 2790 } 2791 } 2792 if ((pp = anon_zero(seg, addr, &ap, 2793 svd->cred)) == NULL) { 2794 err = ENOMEM; 2795 goto out; /* out of swap space */ 2796 } 2797 /* 2798 * Re-acquire the anon_map lock and 2799 * initialize the anon array entry. 2800 */ 2801 (void) anon_set_ptr(amp->ahp, anon_index, ap, 2802 ANON_SLEEP); 2803 2804 ASSERT(pp->p_szc == 0); 2805 2806 /* 2807 * Handle pages that have been marked for migration 2808 */ 2809 if (lgrp_optimizations()) 2810 page_migrate(seg, addr, &pp, 1); 2811 2812 if (type == F_SOFTLOCK) { 2813 if (!segvn_slock_anonpages(pp, first)) { 2814 page_unlock(pp); 2815 err = ENOMEM; 2816 goto out; 2817 } else { 2818 mutex_enter(&freemem_lock); 2819 svd->softlockcnt++; 2820 segvn_pages_locked++; 2821 mutex_exit(&freemem_lock); 2822 } 2823 } 2824 2825 if (enable_mbit_wa) { 2826 if (rw == S_WRITE) 2827 hat_setmod(pp); 2828 else if (!hat_ismod(pp)) 2829 prot &= ~PROT_WRITE; 2830 } 2831 /* 2832 * If AS_PAGLCK is set in a_flags (via memcntl(2) 2833 * with MC_LOCKAS, MCL_FUTURE) and this is a 2834 * MAP_NORESERVE segment, we may need to 2835 * permanently lock the page as it is being faulted 2836 * for the first time. The following text applies 2837 * only to MAP_NORESERVE segments: 2838 * 2839 * As per memcntl(2), if this segment was created 2840 * after MCL_FUTURE was applied (a "future" 2841 * segment), its pages must be locked. If this 2842 * segment existed at MCL_FUTURE application (a 2843 * "past" segment), the interface is unclear. 2844 * 2845 * We decide to lock only if vpage is present: 2846 * 2847 * - "future" segments will have a vpage array (see 2848 * as_map), and so will be locked as required 2849 * 2850 * - "past" segments may not have a vpage array, 2851 * depending on whether events (such as 2852 * mprotect) have occurred. Locking if vpage 2853 * exists will preserve legacy behavior. Not 2854 * locking if vpage is absent, will not break 2855 * the interface or legacy behavior. Note that 2856 * allocating vpage here if it's absent requires 2857 * upgrading the segvn reader lock, the cost of 2858 * which does not seem worthwhile. 2859 * 2860 * Usually testing and setting VPP_ISPPLOCK and 2861 * VPP_SETPPLOCK requires holding the segvn lock as 2862 * writer, but in this case all readers are 2863 * serializing on the anon array lock. 2864 */ 2865 if (AS_ISPGLCK(seg->s_as) && vpage != NULL && 2866 (svd->flags & MAP_NORESERVE) && 2867 !VPP_ISPPLOCK(vpage)) { 2868 proc_t *p = seg->s_as->a_proc; 2869 ASSERT(svd->type == MAP_PRIVATE); 2870 mutex_enter(&p->p_lock); 2871 if (rctl_incr_locked_mem(p, NULL, PAGESIZE, 2872 1) == 0) { 2873 claim = VPP_PROT(vpage) & PROT_WRITE; 2874 if (page_pp_lock(pp, claim, 0)) { 2875 VPP_SETPPLOCK(vpage); 2876 } else { 2877 rctl_decr_locked_mem(p, NULL, 2878 PAGESIZE, 1); 2879 } 2880 } 2881 mutex_exit(&p->p_lock); 2882 } 2883 2884 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2885 hat_memload(hat, addr, pp, prot, hat_flag); 2886 2887 if (!(hat_flag & HAT_LOAD_LOCK)) 2888 page_unlock(pp); 2889 2890 anon_array_exit(&cookie); 2891 return (0); 2892 } 2893 } 2894 2895 /* 2896 * Obtain the page structure via anon_getpage() if it is 2897 * a private copy of an object (the result of a previous 2898 * copy-on-write). 2899 */ 2900 if (amp != NULL) { 2901 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) { 2902 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE, 2903 seg, addr, rw, svd->cred); 2904 if (err) 2905 goto out; 2906 2907 if (svd->type == MAP_SHARED) { 2908 /* 2909 * If this is a shared mapping to an 2910 * anon_map, then ignore the write 2911 * permissions returned by anon_getpage(). 2912 * They apply to the private mappings 2913 * of this anon_map. 2914 */ 2915 vpprot |= PROT_WRITE; 2916 } 2917 opp = anon_pl[0]; 2918 } 2919 } 2920 2921 /* 2922 * Search the pl[] list passed in if it is from the 2923 * original object (i.e., not a private copy). 2924 */ 2925 if (opp == NULL) { 2926 /* 2927 * Find original page. We must be bringing it in 2928 * from the list in pl[]. 2929 */ 2930 for (ppp = pl; (opp = *ppp) != NULL; ppp++) { 2931 if (opp == PAGE_HANDLED) 2932 continue; 2933 ASSERT(opp->p_vnode == svd->vp); /* XXX */ 2934 if (opp->p_offset == off) 2935 break; 2936 } 2937 if (opp == NULL) { 2938 panic("segvn_faultpage not found"); 2939 /*NOTREACHED*/ 2940 } 2941 *ppp = PAGE_HANDLED; 2942 2943 } 2944 2945 ASSERT(PAGE_LOCKED(opp)); 2946 2947 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2948 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0); 2949 2950 /* 2951 * The fault is treated as a copy-on-write fault if a 2952 * write occurs on a private segment and the object 2953 * page (i.e., mapping) is write protected. We assume 2954 * that fatal protection checks have already been made. 2955 */ 2956 2957 if (brkcow) { 2958 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2959 cow = !(vpprot & PROT_WRITE); 2960 } else if (svd->tr_state == SEGVN_TR_ON) { 2961 /* 2962 * If we are doing text replication COW on first touch. 2963 */ 2964 ASSERT(amp != NULL); 2965 ASSERT(svd->vp != NULL); 2966 ASSERT(rw != S_WRITE); 2967 cow = (ap == NULL); 2968 } else { 2969 cow = 0; 2970 } 2971 2972 /* 2973 * If not a copy-on-write case load the translation 2974 * and return. 2975 */ 2976 if (cow == 0) { 2977 2978 /* 2979 * Handle pages that have been marked for migration 2980 */ 2981 if (lgrp_optimizations()) 2982 page_migrate(seg, addr, &opp, 1); 2983 2984 if (type == F_SOFTLOCK && svd->vp == NULL) { 2985 2986 ASSERT(opp->p_szc == 0 || 2987 (svd->type == MAP_SHARED && 2988 amp != NULL && amp->a_szc != 0)); 2989 2990 if (!segvn_slock_anonpages(opp, first)) { 2991 page_unlock(opp); 2992 err = ENOMEM; 2993 goto out; 2994 } else { 2995 mutex_enter(&freemem_lock); 2996 svd->softlockcnt++; 2997 segvn_pages_locked++; 2998 mutex_exit(&freemem_lock); 2999 } 3000 } 3001 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) { 3002 if (rw == S_WRITE) 3003 hat_setmod(opp); 3004 else if (rw != S_OTHER && !hat_ismod(opp)) 3005 prot &= ~PROT_WRITE; 3006 } 3007 3008 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 3009 (!svd->pageprot && svd->prot == (prot & vpprot))); 3010 ASSERT(amp == NULL || 3011 svd->rcookie == HAT_INVALID_REGION_COOKIE); 3012 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag, 3013 svd->rcookie); 3014 3015 if (!(hat_flag & HAT_LOAD_LOCK)) 3016 page_unlock(opp); 3017 3018 if (anon_lock) { 3019 anon_array_exit(&cookie); 3020 } 3021 return (0); 3022 } 3023 3024 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 3025 3026 hat_setref(opp); 3027 3028 ASSERT(amp != NULL && anon_lock); 3029 3030 /* 3031 * Steal the page only if it isn't a private page 3032 * since stealing a private page is not worth the effort. 3033 */ 3034 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) 3035 steal = 1; 3036 3037 /* 3038 * Steal the original page if the following conditions are true: 3039 * 3040 * We are low on memory, the page is not private, page is not large, 3041 * not shared, not modified, not `locked' or if we have it `locked' 3042 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies 3043 * that the page is not shared) and if it doesn't have any 3044 * translations. page_struct_lock isn't needed to look at p_cowcnt 3045 * and p_lckcnt because we first get exclusive lock on page. 3046 */ 3047 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD); 3048 3049 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 && 3050 page_tryupgrade(opp) && !hat_ismod(opp) && 3051 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) || 3052 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 && 3053 vpage != NULL && VPP_ISPPLOCK(vpage)))) { 3054 /* 3055 * Check if this page has other translations 3056 * after unloading our translation. 3057 */ 3058 if (hat_page_is_mapped(opp)) { 3059 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 3060 hat_unload(seg->s_as->a_hat, addr, PAGESIZE, 3061 HAT_UNLOAD); 3062 } 3063 3064 /* 3065 * hat_unload() might sync back someone else's recent 3066 * modification, so check again. 3067 */ 3068 if (!hat_ismod(opp) && !hat_page_is_mapped(opp)) 3069 pageflags |= STEAL_PAGE; 3070 } 3071 3072 /* 3073 * If we have a vpage pointer, see if it indicates that we have 3074 * ``locked'' the page we map -- if so, tell anon_private to 3075 * transfer the locking resource to the new page. 3076 * 3077 * See Statement at the beginning of segvn_lockop regarding 3078 * the way lockcnts/cowcnts are handled during COW. 3079 * 3080 */ 3081 if (vpage != NULL && VPP_ISPPLOCK(vpage)) 3082 pageflags |= LOCK_PAGE; 3083 3084 /* 3085 * Allocate a private page and perform the copy. 3086 * For MAP_NORESERVE reserve swap space now, unless this 3087 * is a cow fault on an existing anon page in which case 3088 * MAP_NORESERVE will have made advance reservations. 3089 */ 3090 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) { 3091 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) { 3092 atomic_add_long(&svd->swresv, ptob(1)); 3093 } else { 3094 page_unlock(opp); 3095 err = ENOMEM; 3096 goto out; 3097 } 3098 } 3099 oldap = ap; 3100 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred); 3101 if (pp == NULL) { 3102 err = ENOMEM; /* out of swap space */ 3103 goto out; 3104 } 3105 3106 /* 3107 * If we copied away from an anonymous page, then 3108 * we are one step closer to freeing up an anon slot. 3109 * 3110 * NOTE: The original anon slot must be released while 3111 * holding the "anon_map" lock. This is necessary to prevent 3112 * other threads from obtaining a pointer to the anon slot 3113 * which may be freed if its "refcnt" is 1. 3114 */ 3115 if (oldap != NULL) 3116 anon_decref(oldap); 3117 3118 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 3119 3120 /* 3121 * Handle pages that have been marked for migration 3122 */ 3123 if (lgrp_optimizations()) 3124 page_migrate(seg, addr, &pp, 1); 3125 3126 ASSERT(pp->p_szc == 0); 3127 if (type == F_SOFTLOCK && svd->vp == NULL) { 3128 if (!segvn_slock_anonpages(pp, first)) { 3129 page_unlock(pp); 3130 err = ENOMEM; 3131 goto out; 3132 } else { 3133 mutex_enter(&freemem_lock); 3134 svd->softlockcnt++; 3135 segvn_pages_locked++; 3136 mutex_exit(&freemem_lock); 3137 } 3138 } 3139 3140 ASSERT(!IS_VMODSORT(pp->p_vnode)); 3141 if (enable_mbit_wa) { 3142 if (rw == S_WRITE) 3143 hat_setmod(pp); 3144 else if (!hat_ismod(pp)) 3145 prot &= ~PROT_WRITE; 3146 } 3147 3148 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 3149 hat_memload(hat, addr, pp, prot, hat_flag); 3150 3151 if (!(hat_flag & HAT_LOAD_LOCK)) 3152 page_unlock(pp); 3153 3154 ASSERT(anon_lock); 3155 anon_array_exit(&cookie); 3156 return (0); 3157 out: 3158 if (anon_lock) 3159 anon_array_exit(&cookie); 3160 3161 if (type == F_SOFTLOCK && svd->vp != NULL) { 3162 mutex_enter(&freemem_lock); 3163 availrmem++; 3164 segvn_pages_locked--; 3165 svd->softlockcnt--; 3166 mutex_exit(&freemem_lock); 3167 } 3168 return (FC_MAKE_ERR(err)); 3169 } 3170 3171 /* 3172 * relocate a bunch of smaller targ pages into one large repl page. all targ 3173 * pages must be complete pages smaller than replacement pages. 3174 * it's assumed that no page's szc can change since they are all PAGESIZE or 3175 * complete large pages locked SHARED. 3176 */ 3177 static void 3178 segvn_relocate_pages(page_t **targ, page_t *replacement) 3179 { 3180 page_t *pp; 3181 pgcnt_t repl_npgs, curnpgs; 3182 pgcnt_t i; 3183 uint_t repl_szc = replacement->p_szc; 3184 page_t *first_repl = replacement; 3185 page_t *repl; 3186 spgcnt_t npgs; 3187 3188 VM_STAT_ADD(segvnvmstats.relocatepages[0]); 3189 3190 ASSERT(repl_szc != 0); 3191 npgs = repl_npgs = page_get_pagecnt(repl_szc); 3192 3193 i = 0; 3194 while (repl_npgs) { 3195 spgcnt_t nreloc; 3196 int err; 3197 ASSERT(replacement != NULL); 3198 pp = targ[i]; 3199 ASSERT(pp->p_szc < repl_szc); 3200 ASSERT(PAGE_EXCL(pp)); 3201 ASSERT(!PP_ISFREE(pp)); 3202 curnpgs = page_get_pagecnt(pp->p_szc); 3203 if (curnpgs == 1) { 3204 VM_STAT_ADD(segvnvmstats.relocatepages[1]); 3205 repl = replacement; 3206 page_sub(&replacement, repl); 3207 ASSERT(PAGE_EXCL(repl)); 3208 ASSERT(!PP_ISFREE(repl)); 3209 ASSERT(repl->p_szc == repl_szc); 3210 } else { 3211 page_t *repl_savepp; 3212 int j; 3213 VM_STAT_ADD(segvnvmstats.relocatepages[2]); 3214 repl_savepp = replacement; 3215 for (j = 0; j < curnpgs; j++) { 3216 repl = replacement; 3217 page_sub(&replacement, repl); 3218 ASSERT(PAGE_EXCL(repl)); 3219 ASSERT(!PP_ISFREE(repl)); 3220 ASSERT(repl->p_szc == repl_szc); 3221 ASSERT(page_pptonum(targ[i + j]) == 3222 page_pptonum(targ[i]) + j); 3223 } 3224 repl = repl_savepp; 3225 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs)); 3226 } 3227 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL); 3228 if (err || nreloc != curnpgs) { 3229 panic("segvn_relocate_pages: " 3230 "page_relocate failed err=%d curnpgs=%ld " 3231 "nreloc=%ld", err, curnpgs, nreloc); 3232 } 3233 ASSERT(curnpgs <= repl_npgs); 3234 repl_npgs -= curnpgs; 3235 i += curnpgs; 3236 } 3237 ASSERT(replacement == NULL); 3238 3239 repl = first_repl; 3240 repl_npgs = npgs; 3241 for (i = 0; i < repl_npgs; i++) { 3242 ASSERT(PAGE_EXCL(repl)); 3243 ASSERT(!PP_ISFREE(repl)); 3244 targ[i] = repl; 3245 page_downgrade(targ[i]); 3246 repl++; 3247 } 3248 } 3249 3250 /* 3251 * Check if all pages in ppa array are complete smaller than szc pages and 3252 * their roots will still be aligned relative to their current size if the 3253 * entire ppa array is relocated into one szc page. If these conditions are 3254 * not met return 0. 3255 * 3256 * If all pages are properly aligned attempt to upgrade their locks 3257 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0. 3258 * upgrdfail was set to 0 by caller. 3259 * 3260 * Return 1 if all pages are aligned and locked exclusively. 3261 * 3262 * If all pages in ppa array happen to be physically contiguous to make one 3263 * szc page and all exclusive locks are successfully obtained promote the page 3264 * size to szc and set *pszc to szc. Return 1 with pages locked shared. 3265 */ 3266 static int 3267 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc) 3268 { 3269 page_t *pp; 3270 pfn_t pfn; 3271 pgcnt_t totnpgs = page_get_pagecnt(szc); 3272 pfn_t first_pfn; 3273 int contig = 1; 3274 pgcnt_t i; 3275 pgcnt_t j; 3276 uint_t curszc; 3277 pgcnt_t curnpgs; 3278 int root = 0; 3279 3280 ASSERT(szc > 0); 3281 3282 VM_STAT_ADD(segvnvmstats.fullszcpages[0]); 3283 3284 for (i = 0; i < totnpgs; i++) { 3285 pp = ppa[i]; 3286 ASSERT(PAGE_SHARED(pp)); 3287 ASSERT(!PP_ISFREE(pp)); 3288 pfn = page_pptonum(pp); 3289 if (i == 0) { 3290 if (!IS_P2ALIGNED(pfn, totnpgs)) { 3291 contig = 0; 3292 } else { 3293 first_pfn = pfn; 3294 } 3295 } else if (contig && pfn != first_pfn + i) { 3296 contig = 0; 3297 } 3298 if (pp->p_szc == 0) { 3299 if (root) { 3300 VM_STAT_ADD(segvnvmstats.fullszcpages[1]); 3301 return (0); 3302 } 3303 } else if (!root) { 3304 if ((curszc = pp->p_szc) >= szc) { 3305 VM_STAT_ADD(segvnvmstats.fullszcpages[2]); 3306 return (0); 3307 } 3308 if (curszc == 0) { 3309 /* 3310 * p_szc changed means we don't have all pages 3311 * locked. return failure. 3312 */ 3313 VM_STAT_ADD(segvnvmstats.fullszcpages[3]); 3314 return (0); 3315 } 3316 curnpgs = page_get_pagecnt(curszc); 3317 if (!IS_P2ALIGNED(pfn, curnpgs) || 3318 !IS_P2ALIGNED(i, curnpgs)) { 3319 VM_STAT_ADD(segvnvmstats.fullszcpages[4]); 3320 return (0); 3321 } 3322 root = 1; 3323 } else { 3324 ASSERT(i > 0); 3325 VM_STAT_ADD(segvnvmstats.fullszcpages[5]); 3326 if (pp->p_szc != curszc) { 3327 VM_STAT_ADD(segvnvmstats.fullszcpages[6]); 3328 return (0); 3329 } 3330 if (pfn - 1 != page_pptonum(ppa[i - 1])) { 3331 panic("segvn_full_szcpages: " 3332 "large page not physically contiguous"); 3333 } 3334 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) { 3335 root = 0; 3336 } 3337 } 3338 } 3339 3340 for (i = 0; i < totnpgs; i++) { 3341 ASSERT(ppa[i]->p_szc < szc); 3342 if (!page_tryupgrade(ppa[i])) { 3343 for (j = 0; j < i; j++) { 3344 page_downgrade(ppa[j]); 3345 } 3346 *pszc = ppa[i]->p_szc; 3347 *upgrdfail = 1; 3348 VM_STAT_ADD(segvnvmstats.fullszcpages[7]); 3349 return (0); 3350 } 3351 } 3352 3353 /* 3354 * When a page is put a free cachelist its szc is set to 0. if file 3355 * system reclaimed pages from cachelist targ pages will be physically 3356 * contiguous with 0 p_szc. in this case just upgrade szc of targ 3357 * pages without any relocations. 3358 * To avoid any hat issues with previous small mappings 3359 * hat_pageunload() the target pages first. 3360 */ 3361 if (contig) { 3362 VM_STAT_ADD(segvnvmstats.fullszcpages[8]); 3363 for (i = 0; i < totnpgs; i++) { 3364 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD); 3365 } 3366 for (i = 0; i < totnpgs; i++) { 3367 ppa[i]->p_szc = szc; 3368 } 3369 for (i = 0; i < totnpgs; i++) { 3370 ASSERT(PAGE_EXCL(ppa[i])); 3371 page_downgrade(ppa[i]); 3372 } 3373 if (pszc != NULL) { 3374 *pszc = szc; 3375 } 3376 } 3377 VM_STAT_ADD(segvnvmstats.fullszcpages[9]); 3378 return (1); 3379 } 3380 3381 /* 3382 * Create physically contiguous pages for [vp, off] - [vp, off + 3383 * page_size(szc)) range and for private segment return them in ppa array. 3384 * Pages are created either via IO or relocations. 3385 * 3386 * Return 1 on success and 0 on failure. 3387 * 3388 * If physically contiguous pages already exist for this range return 1 without 3389 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa 3390 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE(). 3391 */ 3392 3393 static int 3394 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off, 3395 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc, 3396 int *downsize) 3397 3398 { 3399 page_t *pplist = *ppplist; 3400 size_t pgsz = page_get_pagesize(szc); 3401 pgcnt_t pages = btop(pgsz); 3402 ulong_t start_off = off; 3403 u_offset_t eoff = off + pgsz; 3404 spgcnt_t nreloc; 3405 u_offset_t io_off = off; 3406 size_t io_len; 3407 page_t *io_pplist = NULL; 3408 page_t *done_pplist = NULL; 3409 pgcnt_t pgidx = 0; 3410 page_t *pp; 3411 page_t *newpp; 3412 page_t *targpp; 3413 int io_err = 0; 3414 int i; 3415 pfn_t pfn; 3416 ulong_t ppages; 3417 page_t *targ_pplist = NULL; 3418 page_t *repl_pplist = NULL; 3419 page_t *tmp_pplist; 3420 int nios = 0; 3421 uint_t pszc; 3422 struct vattr va; 3423 3424 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]); 3425 3426 ASSERT(szc != 0); 3427 ASSERT(pplist->p_szc == szc); 3428 3429 /* 3430 * downsize will be set to 1 only if we fail to lock pages. this will 3431 * allow subsequent faults to try to relocate the page again. If we 3432 * fail due to misalignment don't downsize and let the caller map the 3433 * whole region with small mappings to avoid more faults into the area 3434 * where we can't get large pages anyway. 3435 */ 3436 *downsize = 0; 3437 3438 while (off < eoff) { 3439 newpp = pplist; 3440 ASSERT(newpp != NULL); 3441 ASSERT(PAGE_EXCL(newpp)); 3442 ASSERT(!PP_ISFREE(newpp)); 3443 /* 3444 * we pass NULL for nrelocp to page_lookup_create() 3445 * so that it doesn't relocate. We relocate here 3446 * later only after we make sure we can lock all 3447 * pages in the range we handle and they are all 3448 * aligned. 3449 */ 3450 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0); 3451 ASSERT(pp != NULL); 3452 ASSERT(!PP_ISFREE(pp)); 3453 ASSERT(pp->p_vnode == vp); 3454 ASSERT(pp->p_offset == off); 3455 if (pp == newpp) { 3456 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]); 3457 page_sub(&pplist, pp); 3458 ASSERT(PAGE_EXCL(pp)); 3459 ASSERT(page_iolock_assert(pp)); 3460 page_list_concat(&io_pplist, &pp); 3461 off += PAGESIZE; 3462 continue; 3463 } 3464 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]); 3465 pfn = page_pptonum(pp); 3466 pszc = pp->p_szc; 3467 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL && 3468 IS_P2ALIGNED(pfn, pages)) { 3469 ASSERT(repl_pplist == NULL); 3470 ASSERT(done_pplist == NULL); 3471 ASSERT(pplist == *ppplist); 3472 page_unlock(pp); 3473 page_free_replacement_page(pplist); 3474 page_create_putback(pages); 3475 *ppplist = NULL; 3476 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]); 3477 return (1); 3478 } 3479 if (pszc >= szc) { 3480 page_unlock(pp); 3481 segvn_faultvnmpss_align_err1++; 3482 goto out; 3483 } 3484 ppages = page_get_pagecnt(pszc); 3485 if (!IS_P2ALIGNED(pfn, ppages)) { 3486 ASSERT(pszc > 0); 3487 /* 3488 * sizing down to pszc won't help. 3489 */ 3490 page_unlock(pp); 3491 segvn_faultvnmpss_align_err2++; 3492 goto out; 3493 } 3494 pfn = page_pptonum(newpp); 3495 if (!IS_P2ALIGNED(pfn, ppages)) { 3496 ASSERT(pszc > 0); 3497 /* 3498 * sizing down to pszc won't help. 3499 */ 3500 page_unlock(pp); 3501 segvn_faultvnmpss_align_err3++; 3502 goto out; 3503 } 3504 if (!PAGE_EXCL(pp)) { 3505 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]); 3506 page_unlock(pp); 3507 *downsize = 1; 3508 *ret_pszc = pp->p_szc; 3509 goto out; 3510 } 3511 targpp = pp; 3512 if (io_pplist != NULL) { 3513 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]); 3514 io_len = off - io_off; 3515 /* 3516 * Some file systems like NFS don't check EOF 3517 * conditions in VOP_PAGEIO(). Check it here 3518 * now that pages are locked SE_EXCL. Any file 3519 * truncation will wait until the pages are 3520 * unlocked so no need to worry that file will 3521 * be truncated after we check its size here. 3522 * XXX fix NFS to remove this check. 3523 */ 3524 va.va_mask = AT_SIZE; 3525 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) { 3526 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]); 3527 page_unlock(targpp); 3528 goto out; 3529 } 3530 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3531 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]); 3532 *downsize = 1; 3533 *ret_pszc = 0; 3534 page_unlock(targpp); 3535 goto out; 3536 } 3537 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3538 B_READ, svd->cred, NULL); 3539 if (io_err) { 3540 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]); 3541 page_unlock(targpp); 3542 if (io_err == EDEADLK) { 3543 segvn_vmpss_pageio_deadlk_err++; 3544 } 3545 goto out; 3546 } 3547 nios++; 3548 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]); 3549 while (io_pplist != NULL) { 3550 pp = io_pplist; 3551 page_sub(&io_pplist, pp); 3552 ASSERT(page_iolock_assert(pp)); 3553 page_io_unlock(pp); 3554 pgidx = (pp->p_offset - start_off) >> 3555 PAGESHIFT; 3556 ASSERT(pgidx < pages); 3557 ppa[pgidx] = pp; 3558 page_list_concat(&done_pplist, &pp); 3559 } 3560 } 3561 pp = targpp; 3562 ASSERT(PAGE_EXCL(pp)); 3563 ASSERT(pp->p_szc <= pszc); 3564 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) { 3565 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]); 3566 page_unlock(pp); 3567 *downsize = 1; 3568 *ret_pszc = pp->p_szc; 3569 goto out; 3570 } 3571 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]); 3572 /* 3573 * page szc chould have changed before the entire group was 3574 * locked. reread page szc. 3575 */ 3576 pszc = pp->p_szc; 3577 ppages = page_get_pagecnt(pszc); 3578 3579 /* link just the roots */ 3580 page_list_concat(&targ_pplist, &pp); 3581 page_sub(&pplist, newpp); 3582 page_list_concat(&repl_pplist, &newpp); 3583 off += PAGESIZE; 3584 while (--ppages != 0) { 3585 newpp = pplist; 3586 page_sub(&pplist, newpp); 3587 off += PAGESIZE; 3588 } 3589 io_off = off; 3590 } 3591 if (io_pplist != NULL) { 3592 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]); 3593 io_len = eoff - io_off; 3594 va.va_mask = AT_SIZE; 3595 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) { 3596 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]); 3597 goto out; 3598 } 3599 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3600 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]); 3601 *downsize = 1; 3602 *ret_pszc = 0; 3603 goto out; 3604 } 3605 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3606 B_READ, svd->cred, NULL); 3607 if (io_err) { 3608 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]); 3609 if (io_err == EDEADLK) { 3610 segvn_vmpss_pageio_deadlk_err++; 3611 } 3612 goto out; 3613 } 3614 nios++; 3615 while (io_pplist != NULL) { 3616 pp = io_pplist; 3617 page_sub(&io_pplist, pp); 3618 ASSERT(page_iolock_assert(pp)); 3619 page_io_unlock(pp); 3620 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3621 ASSERT(pgidx < pages); 3622 ppa[pgidx] = pp; 3623 } 3624 } 3625 /* 3626 * we're now bound to succeed or panic. 3627 * remove pages from done_pplist. it's not needed anymore. 3628 */ 3629 while (done_pplist != NULL) { 3630 pp = done_pplist; 3631 page_sub(&done_pplist, pp); 3632 } 3633 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]); 3634 ASSERT(pplist == NULL); 3635 *ppplist = NULL; 3636 while (targ_pplist != NULL) { 3637 int ret; 3638 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]); 3639 ASSERT(repl_pplist); 3640 pp = targ_pplist; 3641 page_sub(&targ_pplist, pp); 3642 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3643 newpp = repl_pplist; 3644 page_sub(&repl_pplist, newpp); 3645 #ifdef DEBUG 3646 pfn = page_pptonum(pp); 3647 pszc = pp->p_szc; 3648 ppages = page_get_pagecnt(pszc); 3649 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3650 pfn = page_pptonum(newpp); 3651 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3652 ASSERT(P2PHASE(pfn, pages) == pgidx); 3653 #endif 3654 nreloc = 0; 3655 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL); 3656 if (ret != 0 || nreloc == 0) { 3657 panic("segvn_fill_vp_pages: " 3658 "page_relocate failed"); 3659 } 3660 pp = newpp; 3661 while (nreloc-- != 0) { 3662 ASSERT(PAGE_EXCL(pp)); 3663 ASSERT(pp->p_vnode == vp); 3664 ASSERT(pgidx == 3665 ((pp->p_offset - start_off) >> PAGESHIFT)); 3666 ppa[pgidx++] = pp; 3667 pp++; 3668 } 3669 } 3670 3671 if (svd->type == MAP_PRIVATE) { 3672 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]); 3673 for (i = 0; i < pages; i++) { 3674 ASSERT(ppa[i] != NULL); 3675 ASSERT(PAGE_EXCL(ppa[i])); 3676 ASSERT(ppa[i]->p_vnode == vp); 3677 ASSERT(ppa[i]->p_offset == 3678 start_off + (i << PAGESHIFT)); 3679 page_downgrade(ppa[i]); 3680 } 3681 ppa[pages] = NULL; 3682 } else { 3683 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]); 3684 /* 3685 * the caller will still call VOP_GETPAGE() for shared segments 3686 * to check FS write permissions. For private segments we map 3687 * file read only anyway. so no VOP_GETPAGE is needed. 3688 */ 3689 for (i = 0; i < pages; i++) { 3690 ASSERT(ppa[i] != NULL); 3691 ASSERT(PAGE_EXCL(ppa[i])); 3692 ASSERT(ppa[i]->p_vnode == vp); 3693 ASSERT(ppa[i]->p_offset == 3694 start_off + (i << PAGESHIFT)); 3695 page_unlock(ppa[i]); 3696 } 3697 ppa[0] = NULL; 3698 } 3699 3700 return (1); 3701 out: 3702 /* 3703 * Do the cleanup. Unlock target pages we didn't relocate. They are 3704 * linked on targ_pplist by root pages. reassemble unused replacement 3705 * and io pages back to pplist. 3706 */ 3707 if (io_pplist != NULL) { 3708 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]); 3709 pp = io_pplist; 3710 do { 3711 ASSERT(pp->p_vnode == vp); 3712 ASSERT(pp->p_offset == io_off); 3713 ASSERT(page_iolock_assert(pp)); 3714 page_io_unlock(pp); 3715 page_hashout(pp, NULL); 3716 io_off += PAGESIZE; 3717 } while ((pp = pp->p_next) != io_pplist); 3718 page_list_concat(&io_pplist, &pplist); 3719 pplist = io_pplist; 3720 } 3721 tmp_pplist = NULL; 3722 while (targ_pplist != NULL) { 3723 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]); 3724 pp = targ_pplist; 3725 ASSERT(PAGE_EXCL(pp)); 3726 page_sub(&targ_pplist, pp); 3727 3728 pszc = pp->p_szc; 3729 ppages = page_get_pagecnt(pszc); 3730 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3731 3732 if (pszc != 0) { 3733 group_page_unlock(pp); 3734 } 3735 page_unlock(pp); 3736 3737 pp = repl_pplist; 3738 ASSERT(pp != NULL); 3739 ASSERT(PAGE_EXCL(pp)); 3740 ASSERT(pp->p_szc == szc); 3741 page_sub(&repl_pplist, pp); 3742 3743 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3744 3745 /* relink replacement page */ 3746 page_list_concat(&tmp_pplist, &pp); 3747 while (--ppages != 0) { 3748 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]); 3749 pp++; 3750 ASSERT(PAGE_EXCL(pp)); 3751 ASSERT(pp->p_szc == szc); 3752 page_list_concat(&tmp_pplist, &pp); 3753 } 3754 } 3755 if (tmp_pplist != NULL) { 3756 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]); 3757 page_list_concat(&tmp_pplist, &pplist); 3758 pplist = tmp_pplist; 3759 } 3760 /* 3761 * at this point all pages are either on done_pplist or 3762 * pplist. They can't be all on done_pplist otherwise 3763 * we'd've been done. 3764 */ 3765 ASSERT(pplist != NULL); 3766 if (nios != 0) { 3767 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]); 3768 pp = pplist; 3769 do { 3770 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]); 3771 ASSERT(pp->p_szc == szc); 3772 ASSERT(PAGE_EXCL(pp)); 3773 ASSERT(pp->p_vnode != vp); 3774 pp->p_szc = 0; 3775 } while ((pp = pp->p_next) != pplist); 3776 3777 pp = done_pplist; 3778 do { 3779 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]); 3780 ASSERT(pp->p_szc == szc); 3781 ASSERT(PAGE_EXCL(pp)); 3782 ASSERT(pp->p_vnode == vp); 3783 pp->p_szc = 0; 3784 } while ((pp = pp->p_next) != done_pplist); 3785 3786 while (pplist != NULL) { 3787 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]); 3788 pp = pplist; 3789 page_sub(&pplist, pp); 3790 page_free(pp, 0); 3791 } 3792 3793 while (done_pplist != NULL) { 3794 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]); 3795 pp = done_pplist; 3796 page_sub(&done_pplist, pp); 3797 page_unlock(pp); 3798 } 3799 *ppplist = NULL; 3800 return (0); 3801 } 3802 ASSERT(pplist == *ppplist); 3803 if (io_err) { 3804 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]); 3805 /* 3806 * don't downsize on io error. 3807 * see if vop_getpage succeeds. 3808 * pplist may still be used in this case 3809 * for relocations. 3810 */ 3811 return (0); 3812 } 3813 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]); 3814 page_free_replacement_page(pplist); 3815 page_create_putback(pages); 3816 *ppplist = NULL; 3817 return (0); 3818 } 3819 3820 int segvn_anypgsz = 0; 3821 3822 #define SEGVN_RESTORE_SOFTLOCK(type, pages) \ 3823 if ((type) == F_SOFTLOCK) { \ 3824 mutex_enter(&freemem_lock); \ 3825 availrmem += (pages); \ 3826 segvn_pages_locked -= (pages); \ 3827 svd->softlockcnt -= (pages); \ 3828 mutex_exit(&freemem_lock); \ 3829 } 3830 3831 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \ 3832 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \ 3833 if ((rw) == S_WRITE) { \ 3834 for (i = 0; i < (pages); i++) { \ 3835 ASSERT((ppa)[i]->p_vnode == \ 3836 (ppa)[0]->p_vnode); \ 3837 hat_setmod((ppa)[i]); \ 3838 } \ 3839 } else if ((rw) != S_OTHER && \ 3840 ((prot) & (vpprot) & PROT_WRITE)) { \ 3841 for (i = 0; i < (pages); i++) { \ 3842 ASSERT((ppa)[i]->p_vnode == \ 3843 (ppa)[0]->p_vnode); \ 3844 if (!hat_ismod((ppa)[i])) { \ 3845 prot &= ~PROT_WRITE; \ 3846 break; \ 3847 } \ 3848 } \ 3849 } \ 3850 } 3851 3852 #ifdef VM_STATS 3853 3854 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \ 3855 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]); 3856 3857 #else /* VM_STATS */ 3858 3859 #define SEGVN_VMSTAT_FLTVNPAGES(idx) 3860 3861 #endif 3862 3863 static faultcode_t 3864 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 3865 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 3866 caddr_t eaddr, int brkcow) 3867 { 3868 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 3869 struct anon_map *amp = svd->amp; 3870 uchar_t segtype = svd->type; 3871 uint_t szc = seg->s_szc; 3872 size_t pgsz = page_get_pagesize(szc); 3873 size_t maxpgsz = pgsz; 3874 pgcnt_t pages = btop(pgsz); 3875 pgcnt_t maxpages = pages; 3876 size_t ppasize = (pages + 1) * sizeof (page_t *); 3877 caddr_t a = lpgaddr; 3878 caddr_t maxlpgeaddr = lpgeaddr; 3879 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base); 3880 ulong_t aindx = svd->anon_index + seg_page(seg, a); 3881 struct vpage *vpage = (svd->vpage != NULL) ? 3882 &svd->vpage[seg_page(seg, a)] : NULL; 3883 vnode_t *vp = svd->vp; 3884 page_t **ppa; 3885 uint_t pszc; 3886 size_t ppgsz; 3887 pgcnt_t ppages; 3888 faultcode_t err = 0; 3889 int ierr; 3890 int vop_size_err = 0; 3891 uint_t protchk, prot, vpprot; 3892 ulong_t i; 3893 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 3894 anon_sync_obj_t an_cookie; 3895 enum seg_rw arw; 3896 int alloc_failed = 0; 3897 int adjszc_chk; 3898 struct vattr va; 3899 int xhat = 0; 3900 page_t *pplist; 3901 pfn_t pfn; 3902 int physcontig; 3903 int upgrdfail; 3904 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */ 3905 int tron = (svd->tr_state == SEGVN_TR_ON); 3906 3907 ASSERT(szc != 0); 3908 ASSERT(vp != NULL); 3909 ASSERT(brkcow == 0 || amp != NULL); 3910 ASSERT(tron == 0 || amp != NULL); 3911 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 3912 ASSERT(!(svd->flags & MAP_NORESERVE)); 3913 ASSERT(type != F_SOFTUNLOCK); 3914 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 3915 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages)); 3916 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 3917 ASSERT(seg->s_szc < NBBY * sizeof (int)); 3918 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz); 3919 ASSERT(svd->tr_state != SEGVN_TR_INIT); 3920 3921 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]); 3922 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]); 3923 3924 if (svd->flags & MAP_TEXT) { 3925 hat_flag |= HAT_LOAD_TEXT; 3926 } 3927 3928 if (svd->pageprot) { 3929 switch (rw) { 3930 case S_READ: 3931 protchk = PROT_READ; 3932 break; 3933 case S_WRITE: 3934 protchk = PROT_WRITE; 3935 break; 3936 case S_EXEC: 3937 protchk = PROT_EXEC; 3938 break; 3939 case S_OTHER: 3940 default: 3941 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 3942 break; 3943 } 3944 } else { 3945 prot = svd->prot; 3946 /* caller has already done segment level protection check. */ 3947 } 3948 3949 if (seg->s_as->a_hat != hat) { 3950 xhat = 1; 3951 } 3952 3953 if (rw == S_WRITE && segtype == MAP_PRIVATE) { 3954 SEGVN_VMSTAT_FLTVNPAGES(2); 3955 arw = S_READ; 3956 } else { 3957 arw = rw; 3958 } 3959 3960 ppa = kmem_alloc(ppasize, KM_SLEEP); 3961 3962 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]); 3963 3964 for (;;) { 3965 adjszc_chk = 0; 3966 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) { 3967 if (adjszc_chk) { 3968 while (szc < seg->s_szc) { 3969 uintptr_t e; 3970 uint_t tszc; 3971 tszc = segvn_anypgsz_vnode ? szc + 1 : 3972 seg->s_szc; 3973 ppgsz = page_get_pagesize(tszc); 3974 if (!IS_P2ALIGNED(a, ppgsz) || 3975 ((alloc_failed >> tszc) & 0x1)) { 3976 break; 3977 } 3978 SEGVN_VMSTAT_FLTVNPAGES(4); 3979 szc = tszc; 3980 pgsz = ppgsz; 3981 pages = btop(pgsz); 3982 e = P2ROUNDUP((uintptr_t)eaddr, pgsz); 3983 lpgeaddr = (caddr_t)e; 3984 } 3985 } 3986 3987 again: 3988 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) { 3989 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 3990 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 3991 anon_array_enter(amp, aindx, &an_cookie); 3992 if (anon_get_ptr(amp->ahp, aindx) != NULL) { 3993 SEGVN_VMSTAT_FLTVNPAGES(5); 3994 ASSERT(anon_pages(amp->ahp, aindx, 3995 maxpages) == maxpages); 3996 anon_array_exit(&an_cookie); 3997 ANON_LOCK_EXIT(&->a_rwlock); 3998 err = segvn_fault_anonpages(hat, seg, 3999 a, a + maxpgsz, type, rw, 4000 MAX(a, addr), 4001 MIN(a + maxpgsz, eaddr), brkcow); 4002 if (err != 0) { 4003 SEGVN_VMSTAT_FLTVNPAGES(6); 4004 goto out; 4005 } 4006 if (szc < seg->s_szc) { 4007 szc = seg->s_szc; 4008 pgsz = maxpgsz; 4009 pages = maxpages; 4010 lpgeaddr = maxlpgeaddr; 4011 } 4012 goto next; 4013 } else { 4014 ASSERT(anon_pages(amp->ahp, aindx, 4015 maxpages) == 0); 4016 SEGVN_VMSTAT_FLTVNPAGES(7); 4017 anon_array_exit(&an_cookie); 4018 ANON_LOCK_EXIT(&->a_rwlock); 4019 } 4020 } 4021 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz)); 4022 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz)); 4023 4024 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 4025 ASSERT(vpage != NULL); 4026 prot = VPP_PROT(vpage); 4027 ASSERT(sameprot(seg, a, maxpgsz)); 4028 if ((prot & protchk) == 0) { 4029 SEGVN_VMSTAT_FLTVNPAGES(8); 4030 err = FC_PROT; 4031 goto out; 4032 } 4033 } 4034 if (type == F_SOFTLOCK) { 4035 mutex_enter(&freemem_lock); 4036 if (availrmem < tune.t_minarmem + pages) { 4037 mutex_exit(&freemem_lock); 4038 err = FC_MAKE_ERR(ENOMEM); 4039 goto out; 4040 } else { 4041 availrmem -= pages; 4042 segvn_pages_locked += pages; 4043 svd->softlockcnt += pages; 4044 } 4045 mutex_exit(&freemem_lock); 4046 } 4047 4048 pplist = NULL; 4049 physcontig = 0; 4050 ppa[0] = NULL; 4051 if (!brkcow && !tron && szc && 4052 !page_exists_physcontig(vp, off, szc, 4053 segtype == MAP_PRIVATE ? ppa : NULL)) { 4054 SEGVN_VMSTAT_FLTVNPAGES(9); 4055 if (page_alloc_pages(vp, seg, a, &pplist, NULL, 4056 szc, 0, 0) && type != F_SOFTLOCK) { 4057 SEGVN_VMSTAT_FLTVNPAGES(10); 4058 pszc = 0; 4059 ierr = -1; 4060 alloc_failed |= (1 << szc); 4061 break; 4062 } 4063 if (pplist != NULL && 4064 vp->v_mpssdata == SEGVN_PAGEIO) { 4065 int downsize; 4066 SEGVN_VMSTAT_FLTVNPAGES(11); 4067 physcontig = segvn_fill_vp_pages(svd, 4068 vp, off, szc, ppa, &pplist, 4069 &pszc, &downsize); 4070 ASSERT(!physcontig || pplist == NULL); 4071 if (!physcontig && downsize && 4072 type != F_SOFTLOCK) { 4073 ASSERT(pplist == NULL); 4074 SEGVN_VMSTAT_FLTVNPAGES(12); 4075 ierr = -1; 4076 break; 4077 } 4078 ASSERT(!physcontig || 4079 segtype == MAP_PRIVATE || 4080 ppa[0] == NULL); 4081 if (physcontig && ppa[0] == NULL) { 4082 physcontig = 0; 4083 } 4084 } 4085 } else if (!brkcow && !tron && szc && ppa[0] != NULL) { 4086 SEGVN_VMSTAT_FLTVNPAGES(13); 4087 ASSERT(segtype == MAP_PRIVATE); 4088 physcontig = 1; 4089 } 4090 4091 if (!physcontig) { 4092 SEGVN_VMSTAT_FLTVNPAGES(14); 4093 ppa[0] = NULL; 4094 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz, 4095 &vpprot, ppa, pgsz, seg, a, arw, 4096 svd->cred, NULL); 4097 #ifdef DEBUG 4098 if (ierr == 0) { 4099 for (i = 0; i < pages; i++) { 4100 ASSERT(PAGE_LOCKED(ppa[i])); 4101 ASSERT(!PP_ISFREE(ppa[i])); 4102 ASSERT(ppa[i]->p_vnode == vp); 4103 ASSERT(ppa[i]->p_offset == 4104 off + (i << PAGESHIFT)); 4105 } 4106 } 4107 #endif /* DEBUG */ 4108 if (segtype == MAP_PRIVATE) { 4109 SEGVN_VMSTAT_FLTVNPAGES(15); 4110 vpprot &= ~PROT_WRITE; 4111 } 4112 } else { 4113 ASSERT(segtype == MAP_PRIVATE); 4114 SEGVN_VMSTAT_FLTVNPAGES(16); 4115 vpprot = PROT_ALL & ~PROT_WRITE; 4116 ierr = 0; 4117 } 4118 4119 if (ierr != 0) { 4120 SEGVN_VMSTAT_FLTVNPAGES(17); 4121 if (pplist != NULL) { 4122 SEGVN_VMSTAT_FLTVNPAGES(18); 4123 page_free_replacement_page(pplist); 4124 page_create_putback(pages); 4125 } 4126 SEGVN_RESTORE_SOFTLOCK(type, pages); 4127 if (a + pgsz <= eaddr) { 4128 SEGVN_VMSTAT_FLTVNPAGES(19); 4129 err = FC_MAKE_ERR(ierr); 4130 goto out; 4131 } 4132 va.va_mask = AT_SIZE; 4133 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) { 4134 SEGVN_VMSTAT_FLTVNPAGES(20); 4135 err = FC_MAKE_ERR(EIO); 4136 goto out; 4137 } 4138 if (btopr(va.va_size) >= btopr(off + pgsz)) { 4139 SEGVN_VMSTAT_FLTVNPAGES(21); 4140 err = FC_MAKE_ERR(ierr); 4141 goto out; 4142 } 4143 if (btopr(va.va_size) < 4144 btopr(off + (eaddr - a))) { 4145 SEGVN_VMSTAT_FLTVNPAGES(22); 4146 err = FC_MAKE_ERR(ierr); 4147 goto out; 4148 } 4149 if (brkcow || tron || type == F_SOFTLOCK) { 4150 /* can't reduce map area */ 4151 SEGVN_VMSTAT_FLTVNPAGES(23); 4152 vop_size_err = 1; 4153 goto out; 4154 } 4155 SEGVN_VMSTAT_FLTVNPAGES(24); 4156 ASSERT(szc != 0); 4157 pszc = 0; 4158 ierr = -1; 4159 break; 4160 } 4161 4162 if (amp != NULL) { 4163 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4164 anon_array_enter(amp, aindx, &an_cookie); 4165 } 4166 if (amp != NULL && 4167 anon_get_ptr(amp->ahp, aindx) != NULL) { 4168 ulong_t taindx = P2ALIGN(aindx, maxpages); 4169 4170 SEGVN_VMSTAT_FLTVNPAGES(25); 4171 ASSERT(anon_pages(amp->ahp, taindx, 4172 maxpages) == maxpages); 4173 for (i = 0; i < pages; i++) { 4174 page_unlock(ppa[i]); 4175 } 4176 anon_array_exit(&an_cookie); 4177 ANON_LOCK_EXIT(&->a_rwlock); 4178 if (pplist != NULL) { 4179 page_free_replacement_page(pplist); 4180 page_create_putback(pages); 4181 } 4182 SEGVN_RESTORE_SOFTLOCK(type, pages); 4183 if (szc < seg->s_szc) { 4184 SEGVN_VMSTAT_FLTVNPAGES(26); 4185 /* 4186 * For private segments SOFTLOCK 4187 * either always breaks cow (any rw 4188 * type except S_READ_NOCOW) or 4189 * address space is locked as writer 4190 * (S_READ_NOCOW case) and anon slots 4191 * can't show up on second check. 4192 * Therefore if we are here for 4193 * SOFTLOCK case it must be a cow 4194 * break but cow break never reduces 4195 * szc. text replication (tron) in 4196 * this case works as cow break. 4197 * Thus the assert below. 4198 */ 4199 ASSERT(!brkcow && !tron && 4200 type != F_SOFTLOCK); 4201 pszc = seg->s_szc; 4202 ierr = -2; 4203 break; 4204 } 4205 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4206 goto again; 4207 } 4208 #ifdef DEBUG 4209 if (amp != NULL) { 4210 ulong_t taindx = P2ALIGN(aindx, maxpages); 4211 ASSERT(!anon_pages(amp->ahp, taindx, maxpages)); 4212 } 4213 #endif /* DEBUG */ 4214 4215 if (brkcow || tron) { 4216 ASSERT(amp != NULL); 4217 ASSERT(pplist == NULL); 4218 ASSERT(szc == seg->s_szc); 4219 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4220 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 4221 SEGVN_VMSTAT_FLTVNPAGES(27); 4222 ierr = anon_map_privatepages(amp, aindx, szc, 4223 seg, a, prot, ppa, vpage, segvn_anypgsz, 4224 tron ? PG_LOCAL : 0, svd->cred); 4225 if (ierr != 0) { 4226 SEGVN_VMSTAT_FLTVNPAGES(28); 4227 anon_array_exit(&an_cookie); 4228 ANON_LOCK_EXIT(&->a_rwlock); 4229 SEGVN_RESTORE_SOFTLOCK(type, pages); 4230 err = FC_MAKE_ERR(ierr); 4231 goto out; 4232 } 4233 4234 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4235 /* 4236 * p_szc can't be changed for locked 4237 * swapfs pages. 4238 */ 4239 ASSERT(svd->rcookie == 4240 HAT_INVALID_REGION_COOKIE); 4241 hat_memload_array(hat, a, pgsz, ppa, prot, 4242 hat_flag); 4243 4244 if (!(hat_flag & HAT_LOAD_LOCK)) { 4245 SEGVN_VMSTAT_FLTVNPAGES(29); 4246 for (i = 0; i < pages; i++) { 4247 page_unlock(ppa[i]); 4248 } 4249 } 4250 anon_array_exit(&an_cookie); 4251 ANON_LOCK_EXIT(&->a_rwlock); 4252 goto next; 4253 } 4254 4255 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 4256 (!svd->pageprot && svd->prot == (prot & vpprot))); 4257 4258 pfn = page_pptonum(ppa[0]); 4259 /* 4260 * hat_page_demote() needs an SE_EXCL lock on one of 4261 * constituent page_t's and it decreases root's p_szc 4262 * last. This means if root's p_szc is equal szc and 4263 * all its constituent pages are locked 4264 * hat_page_demote() that could have changed p_szc to 4265 * szc is already done and no new have page_demote() 4266 * can start for this large page. 4267 */ 4268 4269 /* 4270 * we need to make sure same mapping size is used for 4271 * the same address range if there's a possibility the 4272 * adddress is already mapped because hat layer panics 4273 * when translation is loaded for the range already 4274 * mapped with a different page size. We achieve it 4275 * by always using largest page size possible subject 4276 * to the constraints of page size, segment page size 4277 * and page alignment. Since mappings are invalidated 4278 * when those constraints change and make it 4279 * impossible to use previously used mapping size no 4280 * mapping size conflicts should happen. 4281 */ 4282 4283 chkszc: 4284 if ((pszc = ppa[0]->p_szc) == szc && 4285 IS_P2ALIGNED(pfn, pages)) { 4286 4287 SEGVN_VMSTAT_FLTVNPAGES(30); 4288 #ifdef DEBUG 4289 for (i = 0; i < pages; i++) { 4290 ASSERT(PAGE_LOCKED(ppa[i])); 4291 ASSERT(!PP_ISFREE(ppa[i])); 4292 ASSERT(page_pptonum(ppa[i]) == 4293 pfn + i); 4294 ASSERT(ppa[i]->p_szc == szc); 4295 ASSERT(ppa[i]->p_vnode == vp); 4296 ASSERT(ppa[i]->p_offset == 4297 off + (i << PAGESHIFT)); 4298 } 4299 #endif /* DEBUG */ 4300 /* 4301 * All pages are of szc we need and they are 4302 * all locked so they can't change szc. load 4303 * translations. 4304 * 4305 * if page got promoted since last check 4306 * we don't need pplist. 4307 */ 4308 if (pplist != NULL) { 4309 page_free_replacement_page(pplist); 4310 page_create_putback(pages); 4311 } 4312 if (PP_ISMIGRATE(ppa[0])) { 4313 page_migrate(seg, a, ppa, pages); 4314 } 4315 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4316 prot, vpprot); 4317 if (!xhat) { 4318 hat_memload_array_region(hat, a, pgsz, 4319 ppa, prot & vpprot, hat_flag, 4320 svd->rcookie); 4321 } else { 4322 /* 4323 * avoid large xhat mappings to FS 4324 * pages so that hat_page_demote() 4325 * doesn't need to check for xhat 4326 * large mappings. 4327 * Don't use regions with xhats. 4328 */ 4329 for (i = 0; i < pages; i++) { 4330 hat_memload(hat, 4331 a + (i << PAGESHIFT), 4332 ppa[i], prot & vpprot, 4333 hat_flag); 4334 } 4335 } 4336 4337 if (!(hat_flag & HAT_LOAD_LOCK)) { 4338 for (i = 0; i < pages; i++) { 4339 page_unlock(ppa[i]); 4340 } 4341 } 4342 if (amp != NULL) { 4343 anon_array_exit(&an_cookie); 4344 ANON_LOCK_EXIT(&->a_rwlock); 4345 } 4346 goto next; 4347 } 4348 4349 /* 4350 * See if upsize is possible. 4351 */ 4352 if (pszc > szc && szc < seg->s_szc && 4353 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) { 4354 pgcnt_t aphase; 4355 uint_t pszc1 = MIN(pszc, seg->s_szc); 4356 ppgsz = page_get_pagesize(pszc1); 4357 ppages = btop(ppgsz); 4358 aphase = btop(P2PHASE((uintptr_t)a, ppgsz)); 4359 4360 ASSERT(type != F_SOFTLOCK); 4361 4362 SEGVN_VMSTAT_FLTVNPAGES(31); 4363 if (aphase != P2PHASE(pfn, ppages)) { 4364 segvn_faultvnmpss_align_err4++; 4365 } else { 4366 SEGVN_VMSTAT_FLTVNPAGES(32); 4367 if (pplist != NULL) { 4368 page_t *pl = pplist; 4369 page_free_replacement_page(pl); 4370 page_create_putback(pages); 4371 } 4372 for (i = 0; i < pages; i++) { 4373 page_unlock(ppa[i]); 4374 } 4375 if (amp != NULL) { 4376 anon_array_exit(&an_cookie); 4377 ANON_LOCK_EXIT(&->a_rwlock); 4378 } 4379 pszc = pszc1; 4380 ierr = -2; 4381 break; 4382 } 4383 } 4384 4385 /* 4386 * check if we should use smallest mapping size. 4387 */ 4388 upgrdfail = 0; 4389 if (szc == 0 || xhat || 4390 (pszc >= szc && 4391 !IS_P2ALIGNED(pfn, pages)) || 4392 (pszc < szc && 4393 !segvn_full_szcpages(ppa, szc, &upgrdfail, 4394 &pszc))) { 4395 4396 if (upgrdfail && type != F_SOFTLOCK) { 4397 /* 4398 * segvn_full_szcpages failed to lock 4399 * all pages EXCL. Size down. 4400 */ 4401 ASSERT(pszc < szc); 4402 4403 SEGVN_VMSTAT_FLTVNPAGES(33); 4404 4405 if (pplist != NULL) { 4406 page_t *pl = pplist; 4407 page_free_replacement_page(pl); 4408 page_create_putback(pages); 4409 } 4410 4411 for (i = 0; i < pages; i++) { 4412 page_unlock(ppa[i]); 4413 } 4414 if (amp != NULL) { 4415 anon_array_exit(&an_cookie); 4416 ANON_LOCK_EXIT(&->a_rwlock); 4417 } 4418 ierr = -1; 4419 break; 4420 } 4421 if (szc != 0 && !xhat && !upgrdfail) { 4422 segvn_faultvnmpss_align_err5++; 4423 } 4424 SEGVN_VMSTAT_FLTVNPAGES(34); 4425 if (pplist != NULL) { 4426 page_free_replacement_page(pplist); 4427 page_create_putback(pages); 4428 } 4429 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4430 prot, vpprot); 4431 if (upgrdfail && segvn_anypgsz_vnode) { 4432 /* SOFTLOCK case */ 4433 hat_memload_array_region(hat, a, pgsz, 4434 ppa, prot & vpprot, hat_flag, 4435 svd->rcookie); 4436 } else { 4437 for (i = 0; i < pages; i++) { 4438 hat_memload_region(hat, 4439 a + (i << PAGESHIFT), 4440 ppa[i], prot & vpprot, 4441 hat_flag, svd->rcookie); 4442 } 4443 } 4444 if (!(hat_flag & HAT_LOAD_LOCK)) { 4445 for (i = 0; i < pages; i++) { 4446 page_unlock(ppa[i]); 4447 } 4448 } 4449 if (amp != NULL) { 4450 anon_array_exit(&an_cookie); 4451 ANON_LOCK_EXIT(&->a_rwlock); 4452 } 4453 goto next; 4454 } 4455 4456 if (pszc == szc) { 4457 /* 4458 * segvn_full_szcpages() upgraded pages szc. 4459 */ 4460 ASSERT(pszc == ppa[0]->p_szc); 4461 ASSERT(IS_P2ALIGNED(pfn, pages)); 4462 goto chkszc; 4463 } 4464 4465 if (pszc > szc) { 4466 kmutex_t *szcmtx; 4467 SEGVN_VMSTAT_FLTVNPAGES(35); 4468 /* 4469 * p_szc of ppa[0] can change since we haven't 4470 * locked all constituent pages. Call 4471 * page_lock_szc() to prevent szc changes. 4472 * This should be a rare case that happens when 4473 * multiple segments use a different page size 4474 * to map the same file offsets. 4475 */ 4476 szcmtx = page_szc_lock(ppa[0]); 4477 pszc = ppa[0]->p_szc; 4478 ASSERT(szcmtx != NULL || pszc == 0); 4479 ASSERT(ppa[0]->p_szc <= pszc); 4480 if (pszc <= szc) { 4481 SEGVN_VMSTAT_FLTVNPAGES(36); 4482 if (szcmtx != NULL) { 4483 mutex_exit(szcmtx); 4484 } 4485 goto chkszc; 4486 } 4487 if (pplist != NULL) { 4488 /* 4489 * page got promoted since last check. 4490 * we don't need preaalocated large 4491 * page. 4492 */ 4493 SEGVN_VMSTAT_FLTVNPAGES(37); 4494 page_free_replacement_page(pplist); 4495 page_create_putback(pages); 4496 } 4497 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4498 prot, vpprot); 4499 hat_memload_array_region(hat, a, pgsz, ppa, 4500 prot & vpprot, hat_flag, svd->rcookie); 4501 mutex_exit(szcmtx); 4502 if (!(hat_flag & HAT_LOAD_LOCK)) { 4503 for (i = 0; i < pages; i++) { 4504 page_unlock(ppa[i]); 4505 } 4506 } 4507 if (amp != NULL) { 4508 anon_array_exit(&an_cookie); 4509 ANON_LOCK_EXIT(&->a_rwlock); 4510 } 4511 goto next; 4512 } 4513 4514 /* 4515 * if page got demoted since last check 4516 * we could have not allocated larger page. 4517 * allocate now. 4518 */ 4519 if (pplist == NULL && 4520 page_alloc_pages(vp, seg, a, &pplist, NULL, 4521 szc, 0, 0) && type != F_SOFTLOCK) { 4522 SEGVN_VMSTAT_FLTVNPAGES(38); 4523 for (i = 0; i < pages; i++) { 4524 page_unlock(ppa[i]); 4525 } 4526 if (amp != NULL) { 4527 anon_array_exit(&an_cookie); 4528 ANON_LOCK_EXIT(&->a_rwlock); 4529 } 4530 ierr = -1; 4531 alloc_failed |= (1 << szc); 4532 break; 4533 } 4534 4535 SEGVN_VMSTAT_FLTVNPAGES(39); 4536 4537 if (pplist != NULL) { 4538 segvn_relocate_pages(ppa, pplist); 4539 #ifdef DEBUG 4540 } else { 4541 ASSERT(type == F_SOFTLOCK); 4542 SEGVN_VMSTAT_FLTVNPAGES(40); 4543 #endif /* DEBUG */ 4544 } 4545 4546 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot); 4547 4548 if (pplist == NULL && segvn_anypgsz_vnode == 0) { 4549 ASSERT(type == F_SOFTLOCK); 4550 for (i = 0; i < pages; i++) { 4551 ASSERT(ppa[i]->p_szc < szc); 4552 hat_memload_region(hat, 4553 a + (i << PAGESHIFT), 4554 ppa[i], prot & vpprot, hat_flag, 4555 svd->rcookie); 4556 } 4557 } else { 4558 ASSERT(pplist != NULL || type == F_SOFTLOCK); 4559 hat_memload_array_region(hat, a, pgsz, ppa, 4560 prot & vpprot, hat_flag, svd->rcookie); 4561 } 4562 if (!(hat_flag & HAT_LOAD_LOCK)) { 4563 for (i = 0; i < pages; i++) { 4564 ASSERT(PAGE_SHARED(ppa[i])); 4565 page_unlock(ppa[i]); 4566 } 4567 } 4568 if (amp != NULL) { 4569 anon_array_exit(&an_cookie); 4570 ANON_LOCK_EXIT(&->a_rwlock); 4571 } 4572 4573 next: 4574 if (vpage != NULL) { 4575 vpage += pages; 4576 } 4577 adjszc_chk = 1; 4578 } 4579 if (a == lpgeaddr) 4580 break; 4581 ASSERT(a < lpgeaddr); 4582 4583 ASSERT(!brkcow && !tron && type != F_SOFTLOCK); 4584 4585 /* 4586 * ierr == -1 means we failed to map with a large page. 4587 * (either due to allocation/relocation failures or 4588 * misalignment with other mappings to this file. 4589 * 4590 * ierr == -2 means some other thread allocated a large page 4591 * after we gave up tp map with a large page. retry with 4592 * larger mapping. 4593 */ 4594 ASSERT(ierr == -1 || ierr == -2); 4595 ASSERT(ierr == -2 || szc != 0); 4596 ASSERT(ierr == -1 || szc < seg->s_szc); 4597 if (ierr == -2) { 4598 SEGVN_VMSTAT_FLTVNPAGES(41); 4599 ASSERT(pszc > szc && pszc <= seg->s_szc); 4600 szc = pszc; 4601 } else if (segvn_anypgsz_vnode) { 4602 SEGVN_VMSTAT_FLTVNPAGES(42); 4603 szc--; 4604 } else { 4605 SEGVN_VMSTAT_FLTVNPAGES(43); 4606 ASSERT(pszc < szc); 4607 /* 4608 * other process created pszc large page. 4609 * but we still have to drop to 0 szc. 4610 */ 4611 szc = 0; 4612 } 4613 4614 pgsz = page_get_pagesize(szc); 4615 pages = btop(pgsz); 4616 if (ierr == -2) { 4617 /* 4618 * Size up case. Note lpgaddr may only be needed for 4619 * softlock case so we don't adjust it here. 4620 */ 4621 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4622 ASSERT(a >= lpgaddr); 4623 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4624 off = svd->offset + (uintptr_t)(a - seg->s_base); 4625 aindx = svd->anon_index + seg_page(seg, a); 4626 vpage = (svd->vpage != NULL) ? 4627 &svd->vpage[seg_page(seg, a)] : NULL; 4628 } else { 4629 /* 4630 * Size down case. Note lpgaddr may only be needed for 4631 * softlock case so we don't adjust it here. 4632 */ 4633 ASSERT(IS_P2ALIGNED(a, pgsz)); 4634 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4635 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4636 ASSERT(a < lpgeaddr); 4637 if (a < addr) { 4638 SEGVN_VMSTAT_FLTVNPAGES(44); 4639 /* 4640 * The beginning of the large page region can 4641 * be pulled to the right to make a smaller 4642 * region. We haven't yet faulted a single 4643 * page. 4644 */ 4645 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4646 ASSERT(a >= lpgaddr); 4647 off = svd->offset + 4648 (uintptr_t)(a - seg->s_base); 4649 aindx = svd->anon_index + seg_page(seg, a); 4650 vpage = (svd->vpage != NULL) ? 4651 &svd->vpage[seg_page(seg, a)] : NULL; 4652 } 4653 } 4654 } 4655 out: 4656 kmem_free(ppa, ppasize); 4657 if (!err && !vop_size_err) { 4658 SEGVN_VMSTAT_FLTVNPAGES(45); 4659 return (0); 4660 } 4661 if (type == F_SOFTLOCK && a > lpgaddr) { 4662 SEGVN_VMSTAT_FLTVNPAGES(46); 4663 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4664 } 4665 if (!vop_size_err) { 4666 SEGVN_VMSTAT_FLTVNPAGES(47); 4667 return (err); 4668 } 4669 ASSERT(brkcow || tron || type == F_SOFTLOCK); 4670 /* 4671 * Large page end is mapped beyond the end of file and it's a cow 4672 * fault (can be a text replication induced cow) or softlock so we can't 4673 * reduce the map area. For now just demote the segment. This should 4674 * really only happen if the end of the file changed after the mapping 4675 * was established since when large page segments are created we make 4676 * sure they don't extend beyond the end of the file. 4677 */ 4678 SEGVN_VMSTAT_FLTVNPAGES(48); 4679 4680 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4681 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4682 err = 0; 4683 if (seg->s_szc != 0) { 4684 segvn_fltvnpages_clrszc_cnt++; 4685 ASSERT(svd->softlockcnt == 0); 4686 err = segvn_clrszc(seg); 4687 if (err != 0) { 4688 segvn_fltvnpages_clrszc_err++; 4689 } 4690 } 4691 ASSERT(err || seg->s_szc == 0); 4692 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock); 4693 /* segvn_fault will do its job as if szc had been zero to begin with */ 4694 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err)); 4695 } 4696 4697 /* 4698 * This routine will attempt to fault in one large page. 4699 * it will use smaller pages if that fails. 4700 * It should only be called for pure anonymous segments. 4701 */ 4702 static faultcode_t 4703 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 4704 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 4705 caddr_t eaddr, int brkcow) 4706 { 4707 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4708 struct anon_map *amp = svd->amp; 4709 uchar_t segtype = svd->type; 4710 uint_t szc = seg->s_szc; 4711 size_t pgsz = page_get_pagesize(szc); 4712 size_t maxpgsz = pgsz; 4713 pgcnt_t pages = btop(pgsz); 4714 uint_t ppaszc = szc; 4715 caddr_t a = lpgaddr; 4716 ulong_t aindx = svd->anon_index + seg_page(seg, a); 4717 struct vpage *vpage = (svd->vpage != NULL) ? 4718 &svd->vpage[seg_page(seg, a)] : NULL; 4719 page_t **ppa; 4720 uint_t ppa_szc; 4721 faultcode_t err; 4722 int ierr; 4723 uint_t protchk, prot, vpprot; 4724 ulong_t i; 4725 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 4726 anon_sync_obj_t cookie; 4727 int first = 1; 4728 int adjszc_chk; 4729 int purged = 0; 4730 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0; 4731 4732 ASSERT(szc != 0); 4733 ASSERT(amp != NULL); 4734 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 4735 ASSERT(!(svd->flags & MAP_NORESERVE)); 4736 ASSERT(type != F_SOFTUNLOCK); 4737 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4738 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF); 4739 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4740 4741 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 4742 4743 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]); 4744 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]); 4745 4746 if (svd->flags & MAP_TEXT) { 4747 hat_flag |= HAT_LOAD_TEXT; 4748 } 4749 4750 if (svd->pageprot) { 4751 switch (rw) { 4752 case S_READ: 4753 protchk = PROT_READ; 4754 break; 4755 case S_WRITE: 4756 protchk = PROT_WRITE; 4757 break; 4758 case S_EXEC: 4759 protchk = PROT_EXEC; 4760 break; 4761 case S_OTHER: 4762 default: 4763 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 4764 break; 4765 } 4766 VM_STAT_ADD(segvnvmstats.fltanpages[2]); 4767 } else { 4768 prot = svd->prot; 4769 /* caller has already done segment level protection check. */ 4770 } 4771 4772 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP); 4773 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4774 for (;;) { 4775 adjszc_chk = 0; 4776 for (; a < lpgeaddr; a += pgsz, aindx += pages) { 4777 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 4778 VM_STAT_ADD(segvnvmstats.fltanpages[3]); 4779 ASSERT(vpage != NULL); 4780 prot = VPP_PROT(vpage); 4781 ASSERT(sameprot(seg, a, maxpgsz)); 4782 if ((prot & protchk) == 0) { 4783 err = FC_PROT; 4784 goto error; 4785 } 4786 } 4787 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) && 4788 pgsz < maxpgsz) { 4789 ASSERT(a > lpgaddr); 4790 szc = seg->s_szc; 4791 pgsz = maxpgsz; 4792 pages = btop(pgsz); 4793 ASSERT(IS_P2ALIGNED(aindx, pages)); 4794 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, 4795 pgsz); 4796 } 4797 if (type == F_SOFTLOCK && svd->vp != NULL) { 4798 mutex_enter(&freemem_lock); 4799 if (availrmem < tune.t_minarmem + pages) { 4800 mutex_exit(&freemem_lock); 4801 err = FC_MAKE_ERR(ENOMEM); 4802 goto error; 4803 } else { 4804 availrmem -= pages; 4805 segvn_pages_locked += pages; 4806 svd->softlockcnt += pages; 4807 } 4808 mutex_exit(&freemem_lock); 4809 } 4810 anon_array_enter(amp, aindx, &cookie); 4811 ppa_szc = (uint_t)-1; 4812 ierr = anon_map_getpages(amp, aindx, szc, seg, a, 4813 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow, 4814 segvn_anypgsz, pgflags, svd->cred); 4815 if (ierr != 0) { 4816 anon_array_exit(&cookie); 4817 VM_STAT_ADD(segvnvmstats.fltanpages[4]); 4818 if (type == F_SOFTLOCK && svd->vp != NULL) { 4819 VM_STAT_ADD(segvnvmstats.fltanpages[5]); 4820 mutex_enter(&freemem_lock); 4821 availrmem += pages; 4822 segvn_pages_locked -= pages; 4823 svd->softlockcnt -= pages; 4824 mutex_exit(&freemem_lock); 4825 } 4826 if (ierr > 0) { 4827 VM_STAT_ADD(segvnvmstats.fltanpages[6]); 4828 err = FC_MAKE_ERR(ierr); 4829 goto error; 4830 } 4831 break; 4832 } 4833 4834 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4835 4836 ASSERT(segtype == MAP_SHARED || 4837 ppa[0]->p_szc <= szc); 4838 ASSERT(segtype == MAP_PRIVATE || 4839 ppa[0]->p_szc >= szc); 4840 4841 /* 4842 * Handle pages that have been marked for migration 4843 */ 4844 if (lgrp_optimizations()) 4845 page_migrate(seg, a, ppa, pages); 4846 4847 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 4848 if (type == F_SOFTLOCK && svd->vp == NULL) { 4849 /* 4850 * If all pages in ppa array belong to the same 4851 * large page call segvn_slock_anonpages() 4852 * just for ppa[0]. 4853 */ 4854 for (i = 0; i < pages; i++) { 4855 if (!segvn_slock_anonpages(ppa[i], 4856 i == 0 && first)) { 4857 ulong_t j; 4858 for (j = 0; j < i; j++) { 4859 segvn_sunlock_anonpages( 4860 ppa[j], j == 0 && 4861 first); 4862 page_unlock(ppa[j]); 4863 } 4864 for (j = i; j < pages; j++) { 4865 page_unlock(ppa[j]); 4866 } 4867 anon_array_exit(&cookie); 4868 err = FC_MAKE_ERR(ENOMEM); 4869 goto error; 4870 } 4871 if (i == 0 && ppa[0]->p_szc >= szc) { 4872 ASSERT(!(page_pptonum(ppa[0]) & 4873 (pages - 1))); 4874 break; 4875 } 4876 } 4877 first = 0; 4878 mutex_enter(&freemem_lock); 4879 svd->softlockcnt += pages; 4880 segvn_pages_locked += pages; 4881 mutex_exit(&freemem_lock); 4882 } 4883 4884 if (segtype == MAP_SHARED) { 4885 vpprot |= PROT_WRITE; 4886 } 4887 4888 hat_memload_array(hat, a, pgsz, ppa, 4889 prot & vpprot, hat_flag); 4890 4891 if (hat_flag & HAT_LOAD_LOCK) { 4892 VM_STAT_ADD(segvnvmstats.fltanpages[7]); 4893 } else { 4894 VM_STAT_ADD(segvnvmstats.fltanpages[8]); 4895 for (i = 0; i < pages; i++) 4896 page_unlock(ppa[i]); 4897 } 4898 if (vpage != NULL) 4899 vpage += pages; 4900 4901 anon_array_exit(&cookie); 4902 adjszc_chk = 1; 4903 } 4904 if (a == lpgeaddr) 4905 break; 4906 ASSERT(a < lpgeaddr); 4907 /* 4908 * ierr == -1 means we failed to allocate a large page. 4909 * so do a size down operation. 4910 * 4911 * ierr == -2 means some other process that privately shares 4912 * pages with this process has allocated a larger page and we 4913 * need to retry with larger pages. So do a size up 4914 * operation. This relies on the fact that large pages are 4915 * never partially shared i.e. if we share any constituent 4916 * page of a large page with another process we must share the 4917 * entire large page. Note this cannot happen for SOFTLOCK 4918 * case, unless current address (a) is at the beginning of the 4919 * next page size boundary because the other process couldn't 4920 * have relocated locked pages. 4921 */ 4922 ASSERT(ierr == -1 || ierr == -2); 4923 /* 4924 * For the very first relocation failure try to purge this 4925 * segment's cache so that the relocator can obtain an 4926 * exclusive lock on pages we want to relocate. 4927 */ 4928 if (!purged && ierr == -1 && ppa_szc != (uint_t)-1 && 4929 svd->softlockcnt != 0) { 4930 purged = 1; 4931 segvn_purge(seg); 4932 continue; 4933 } 4934 4935 if (segvn_anypgsz) { 4936 ASSERT(ierr == -2 || szc != 0); 4937 ASSERT(ierr == -1 || szc < seg->s_szc); 4938 szc = (ierr == -1) ? szc - 1 : szc + 1; 4939 } else { 4940 /* 4941 * For non COW faults and segvn_anypgsz == 0 4942 * we need to be careful not to loop forever 4943 * if existing page is found with szc other 4944 * than 0 or seg->s_szc. This could be due 4945 * to page relocations on behalf of DR or 4946 * more likely large page creation. For this 4947 * case simply re-size to existing page's szc 4948 * if returned by anon_map_getpages(). 4949 */ 4950 if (ppa_szc == (uint_t)-1) { 4951 szc = (ierr == -1) ? 0 : seg->s_szc; 4952 } else { 4953 ASSERT(ppa_szc <= seg->s_szc); 4954 ASSERT(ierr == -2 || ppa_szc < szc); 4955 ASSERT(ierr == -1 || ppa_szc > szc); 4956 szc = ppa_szc; 4957 } 4958 } 4959 4960 pgsz = page_get_pagesize(szc); 4961 pages = btop(pgsz); 4962 ASSERT(type != F_SOFTLOCK || ierr == -1 || 4963 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz))); 4964 if (type == F_SOFTLOCK) { 4965 /* 4966 * For softlocks we cannot reduce the fault area 4967 * (calculated based on the largest page size for this 4968 * segment) for size down and a is already next 4969 * page size aligned as assertted above for size 4970 * ups. Therefore just continue in case of softlock. 4971 */ 4972 VM_STAT_ADD(segvnvmstats.fltanpages[9]); 4973 continue; /* keep lint happy */ 4974 } else if (ierr == -2) { 4975 4976 /* 4977 * Size up case. Note lpgaddr may only be needed for 4978 * softlock case so we don't adjust it here. 4979 */ 4980 VM_STAT_ADD(segvnvmstats.fltanpages[10]); 4981 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4982 ASSERT(a >= lpgaddr); 4983 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4984 aindx = svd->anon_index + seg_page(seg, a); 4985 vpage = (svd->vpage != NULL) ? 4986 &svd->vpage[seg_page(seg, a)] : NULL; 4987 } else { 4988 /* 4989 * Size down case. Note lpgaddr may only be needed for 4990 * softlock case so we don't adjust it here. 4991 */ 4992 VM_STAT_ADD(segvnvmstats.fltanpages[11]); 4993 ASSERT(IS_P2ALIGNED(a, pgsz)); 4994 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4995 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4996 ASSERT(a < lpgeaddr); 4997 if (a < addr) { 4998 /* 4999 * The beginning of the large page region can 5000 * be pulled to the right to make a smaller 5001 * region. We haven't yet faulted a single 5002 * page. 5003 */ 5004 VM_STAT_ADD(segvnvmstats.fltanpages[12]); 5005 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 5006 ASSERT(a >= lpgaddr); 5007 aindx = svd->anon_index + seg_page(seg, a); 5008 vpage = (svd->vpage != NULL) ? 5009 &svd->vpage[seg_page(seg, a)] : NULL; 5010 } 5011 } 5012 } 5013 VM_STAT_ADD(segvnvmstats.fltanpages[13]); 5014 ANON_LOCK_EXIT(&->a_rwlock); 5015 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); 5016 return (0); 5017 error: 5018 VM_STAT_ADD(segvnvmstats.fltanpages[14]); 5019 ANON_LOCK_EXIT(&->a_rwlock); 5020 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); 5021 if (type == F_SOFTLOCK && a > lpgaddr) { 5022 VM_STAT_ADD(segvnvmstats.fltanpages[15]); 5023 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 5024 } 5025 return (err); 5026 } 5027 5028 int fltadvice = 1; /* set to free behind pages for sequential access */ 5029 5030 /* 5031 * This routine is called via a machine specific fault handling routine. 5032 * It is also called by software routines wishing to lock or unlock 5033 * a range of addresses. 5034 * 5035 * Here is the basic algorithm: 5036 * If unlocking 5037 * Call segvn_softunlock 5038 * Return 5039 * endif 5040 * Checking and set up work 5041 * If we will need some non-anonymous pages 5042 * Call VOP_GETPAGE over the range of non-anonymous pages 5043 * endif 5044 * Loop over all addresses requested 5045 * Call segvn_faultpage passing in page list 5046 * to load up translations and handle anonymous pages 5047 * endloop 5048 * Load up translation to any additional pages in page list not 5049 * already handled that fit into this segment 5050 */ 5051 static faultcode_t 5052 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len, 5053 enum fault_type type, enum seg_rw rw) 5054 { 5055 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5056 page_t **plp, **ppp, *pp; 5057 u_offset_t off; 5058 caddr_t a; 5059 struct vpage *vpage; 5060 uint_t vpprot, prot; 5061 int err; 5062 page_t *pl[PVN_GETPAGE_NUM + 1]; 5063 size_t plsz, pl_alloc_sz; 5064 size_t page; 5065 ulong_t anon_index; 5066 struct anon_map *amp; 5067 int dogetpage = 0; 5068 caddr_t lpgaddr, lpgeaddr; 5069 size_t pgsz; 5070 anon_sync_obj_t cookie; 5071 int brkcow = BREAK_COW_SHARE(rw, type, svd->type); 5072 5073 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5074 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE); 5075 5076 /* 5077 * First handle the easy stuff 5078 */ 5079 if (type == F_SOFTUNLOCK) { 5080 if (rw == S_READ_NOCOW) { 5081 rw = S_READ; 5082 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5083 } 5084 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 5085 pgsz = (seg->s_szc == 0) ? PAGESIZE : 5086 page_get_pagesize(seg->s_szc); 5087 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]); 5088 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 5089 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw); 5090 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5091 return (0); 5092 } 5093 5094 ASSERT(svd->tr_state == SEGVN_TR_OFF || 5095 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 5096 if (brkcow == 0) { 5097 if (svd->tr_state == SEGVN_TR_INIT) { 5098 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5099 if (svd->tr_state == SEGVN_TR_INIT) { 5100 ASSERT(svd->vp != NULL && svd->amp == NULL); 5101 ASSERT(svd->flags & MAP_TEXT); 5102 ASSERT(svd->type == MAP_PRIVATE); 5103 segvn_textrepl(seg); 5104 ASSERT(svd->tr_state != SEGVN_TR_INIT); 5105 ASSERT(svd->tr_state != SEGVN_TR_ON || 5106 svd->amp != NULL); 5107 } 5108 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5109 } 5110 } else if (svd->tr_state != SEGVN_TR_OFF) { 5111 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5112 5113 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) { 5114 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 5115 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5116 return (FC_PROT); 5117 } 5118 5119 if (svd->tr_state == SEGVN_TR_ON) { 5120 ASSERT(svd->vp != NULL && svd->amp != NULL); 5121 segvn_textunrepl(seg, 0); 5122 ASSERT(svd->amp == NULL && 5123 svd->tr_state == SEGVN_TR_OFF); 5124 } else if (svd->tr_state != SEGVN_TR_OFF) { 5125 svd->tr_state = SEGVN_TR_OFF; 5126 } 5127 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 5128 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5129 } 5130 5131 top: 5132 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 5133 5134 /* 5135 * If we have the same protections for the entire segment, 5136 * insure that the access being attempted is legitimate. 5137 */ 5138 5139 if (svd->pageprot == 0) { 5140 uint_t protchk; 5141 5142 switch (rw) { 5143 case S_READ: 5144 case S_READ_NOCOW: 5145 protchk = PROT_READ; 5146 break; 5147 case S_WRITE: 5148 protchk = PROT_WRITE; 5149 break; 5150 case S_EXEC: 5151 protchk = PROT_EXEC; 5152 break; 5153 case S_OTHER: 5154 default: 5155 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 5156 break; 5157 } 5158 5159 if ((svd->prot & protchk) == 0) { 5160 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5161 return (FC_PROT); /* illegal access type */ 5162 } 5163 } 5164 5165 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5166 /* this must be SOFTLOCK S_READ fault */ 5167 ASSERT(svd->amp == NULL); 5168 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5169 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5170 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5171 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5172 /* 5173 * this must be the first ever non S_READ_NOCOW 5174 * softlock for this segment. 5175 */ 5176 ASSERT(svd->softlockcnt == 0); 5177 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5178 HAT_REGION_TEXT); 5179 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5180 } 5181 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5182 goto top; 5183 } 5184 5185 /* 5186 * We can't allow the long term use of softlocks for vmpss segments, 5187 * because in some file truncation cases we should be able to demote 5188 * the segment, which requires that there are no softlocks. The 5189 * only case where it's ok to allow a SOFTLOCK fault against a vmpss 5190 * segment is S_READ_NOCOW, where the caller holds the address space 5191 * locked as writer and calls softunlock before dropping the as lock. 5192 * S_READ_NOCOW is used by /proc to read memory from another user. 5193 * 5194 * Another deadlock between SOFTLOCK and file truncation can happen 5195 * because segvn_fault_vnodepages() calls the FS one pagesize at 5196 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages() 5197 * can cause a deadlock because the first set of page_t's remain 5198 * locked SE_SHARED. To avoid this, we demote segments on a first 5199 * SOFTLOCK if they have a length greater than the segment's 5200 * page size. 5201 * 5202 * So for now, we only avoid demoting a segment on a SOFTLOCK when 5203 * the access type is S_READ_NOCOW and the fault length is less than 5204 * or equal to the segment's page size. While this is quite restrictive, 5205 * it should be the most common case of SOFTLOCK against a vmpss 5206 * segment. 5207 * 5208 * For S_READ_NOCOW, it's safe not to do a copy on write because the 5209 * caller makes sure no COW will be caused by another thread for a 5210 * softlocked page. 5211 */ 5212 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) { 5213 int demote = 0; 5214 5215 if (rw != S_READ_NOCOW) { 5216 demote = 1; 5217 } 5218 if (!demote && len > PAGESIZE) { 5219 pgsz = page_get_pagesize(seg->s_szc); 5220 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, 5221 lpgeaddr); 5222 if (lpgeaddr - lpgaddr > pgsz) { 5223 demote = 1; 5224 } 5225 } 5226 5227 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5228 5229 if (demote) { 5230 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5231 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5232 if (seg->s_szc != 0) { 5233 segvn_vmpss_clrszc_cnt++; 5234 ASSERT(svd->softlockcnt == 0); 5235 err = segvn_clrszc(seg); 5236 if (err) { 5237 segvn_vmpss_clrszc_err++; 5238 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5239 return (FC_MAKE_ERR(err)); 5240 } 5241 } 5242 ASSERT(seg->s_szc == 0); 5243 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5244 goto top; 5245 } 5246 } 5247 5248 /* 5249 * Check to see if we need to allocate an anon_map structure. 5250 */ 5251 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) { 5252 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5253 /* 5254 * Drop the "read" lock on the segment and acquire 5255 * the "write" version since we have to allocate the 5256 * anon_map. 5257 */ 5258 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5259 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5260 5261 if (svd->amp == NULL) { 5262 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 5263 svd->amp->a_szc = seg->s_szc; 5264 } 5265 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5266 5267 /* 5268 * Start all over again since segment protections 5269 * may have changed after we dropped the "read" lock. 5270 */ 5271 goto top; 5272 } 5273 5274 /* 5275 * S_READ_NOCOW vs S_READ distinction was 5276 * only needed for the code above. After 5277 * that we treat it as S_READ. 5278 */ 5279 if (rw == S_READ_NOCOW) { 5280 ASSERT(type == F_SOFTLOCK); 5281 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5282 rw = S_READ; 5283 } 5284 5285 amp = svd->amp; 5286 5287 /* 5288 * MADV_SEQUENTIAL work is ignored for large page segments. 5289 */ 5290 if (seg->s_szc != 0) { 5291 pgsz = page_get_pagesize(seg->s_szc); 5292 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 5293 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 5294 if (svd->vp == NULL) { 5295 err = segvn_fault_anonpages(hat, seg, lpgaddr, 5296 lpgeaddr, type, rw, addr, addr + len, brkcow); 5297 } else { 5298 err = segvn_fault_vnodepages(hat, seg, lpgaddr, 5299 lpgeaddr, type, rw, addr, addr + len, brkcow); 5300 if (err == IE_RETRY) { 5301 ASSERT(seg->s_szc == 0); 5302 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 5303 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5304 goto top; 5305 } 5306 } 5307 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5308 return (err); 5309 } 5310 5311 page = seg_page(seg, addr); 5312 if (amp != NULL) { 5313 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5314 anon_index = svd->anon_index + page; 5315 5316 if (type == F_PROT && rw == S_READ && 5317 svd->tr_state == SEGVN_TR_OFF && 5318 svd->type == MAP_PRIVATE && svd->pageprot == 0) { 5319 size_t index = anon_index; 5320 struct anon *ap; 5321 5322 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5323 /* 5324 * The fast path could apply to S_WRITE also, except 5325 * that the protection fault could be caused by lazy 5326 * tlb flush when ro->rw. In this case, the pte is 5327 * RW already. But RO in the other cpu's tlb causes 5328 * the fault. Since hat_chgprot won't do anything if 5329 * pte doesn't change, we may end up faulting 5330 * indefinitely until the RO tlb entry gets replaced. 5331 */ 5332 for (a = addr; a < addr + len; a += PAGESIZE, index++) { 5333 anon_array_enter(amp, index, &cookie); 5334 ap = anon_get_ptr(amp->ahp, index); 5335 anon_array_exit(&cookie); 5336 if ((ap == NULL) || (ap->an_refcnt != 1)) { 5337 ANON_LOCK_EXIT(&->a_rwlock); 5338 goto slow; 5339 } 5340 } 5341 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot); 5342 ANON_LOCK_EXIT(&->a_rwlock); 5343 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5344 return (0); 5345 } 5346 } 5347 slow: 5348 5349 if (svd->vpage == NULL) 5350 vpage = NULL; 5351 else 5352 vpage = &svd->vpage[page]; 5353 5354 off = svd->offset + (uintptr_t)(addr - seg->s_base); 5355 5356 /* 5357 * If MADV_SEQUENTIAL has been set for the particular page we 5358 * are faulting on, free behind all pages in the segment and put 5359 * them on the free list. 5360 */ 5361 5362 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) { 5363 struct vpage *vpp; 5364 ulong_t fanon_index; 5365 size_t fpage; 5366 u_offset_t pgoff, fpgoff; 5367 struct vnode *fvp; 5368 struct anon *fap = NULL; 5369 5370 if (svd->advice == MADV_SEQUENTIAL || 5371 (svd->pageadvice && 5372 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) { 5373 pgoff = off - PAGESIZE; 5374 fpage = page - 1; 5375 if (vpage != NULL) 5376 vpp = &svd->vpage[fpage]; 5377 if (amp != NULL) 5378 fanon_index = svd->anon_index + fpage; 5379 5380 while (pgoff > svd->offset) { 5381 if (svd->advice != MADV_SEQUENTIAL && 5382 (!svd->pageadvice || (vpage && 5383 VPP_ADVICE(vpp) != MADV_SEQUENTIAL))) 5384 break; 5385 5386 /* 5387 * If this is an anon page, we must find the 5388 * correct <vp, offset> for it 5389 */ 5390 fap = NULL; 5391 if (amp != NULL) { 5392 ANON_LOCK_ENTER(&->a_rwlock, 5393 RW_READER); 5394 anon_array_enter(amp, fanon_index, 5395 &cookie); 5396 fap = anon_get_ptr(amp->ahp, 5397 fanon_index); 5398 if (fap != NULL) { 5399 swap_xlate(fap, &fvp, &fpgoff); 5400 } else { 5401 fpgoff = pgoff; 5402 fvp = svd->vp; 5403 } 5404 anon_array_exit(&cookie); 5405 ANON_LOCK_EXIT(&->a_rwlock); 5406 } else { 5407 fpgoff = pgoff; 5408 fvp = svd->vp; 5409 } 5410 if (fvp == NULL) 5411 break; /* XXX */ 5412 /* 5413 * Skip pages that are free or have an 5414 * "exclusive" lock. 5415 */ 5416 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED); 5417 if (pp == NULL) 5418 break; 5419 /* 5420 * We don't need the page_struct_lock to test 5421 * as this is only advisory; even if we 5422 * acquire it someone might race in and lock 5423 * the page after we unlock and before the 5424 * PUTPAGE, then VOP_PUTPAGE will do nothing. 5425 */ 5426 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) { 5427 /* 5428 * Hold the vnode before releasing 5429 * the page lock to prevent it from 5430 * being freed and re-used by some 5431 * other thread. 5432 */ 5433 VN_HOLD(fvp); 5434 page_unlock(pp); 5435 /* 5436 * We should build a page list 5437 * to kluster putpages XXX 5438 */ 5439 (void) VOP_PUTPAGE(fvp, 5440 (offset_t)fpgoff, PAGESIZE, 5441 (B_DONTNEED|B_FREE|B_ASYNC), 5442 svd->cred, NULL); 5443 VN_RELE(fvp); 5444 } else { 5445 /* 5446 * XXX - Should the loop terminate if 5447 * the page is `locked'? 5448 */ 5449 page_unlock(pp); 5450 } 5451 --vpp; 5452 --fanon_index; 5453 pgoff -= PAGESIZE; 5454 } 5455 } 5456 } 5457 5458 plp = pl; 5459 *plp = NULL; 5460 pl_alloc_sz = 0; 5461 5462 /* 5463 * See if we need to call VOP_GETPAGE for 5464 * *any* of the range being faulted on. 5465 * We can skip all of this work if there 5466 * was no original vnode. 5467 */ 5468 if (svd->vp != NULL) { 5469 u_offset_t vp_off; 5470 size_t vp_len; 5471 struct anon *ap; 5472 vnode_t *vp; 5473 5474 vp_off = off; 5475 vp_len = len; 5476 5477 if (amp == NULL) 5478 dogetpage = 1; 5479 else { 5480 /* 5481 * Only acquire reader lock to prevent amp->ahp 5482 * from being changed. It's ok to miss pages, 5483 * hence we don't do anon_array_enter 5484 */ 5485 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5486 ap = anon_get_ptr(amp->ahp, anon_index); 5487 5488 if (len <= PAGESIZE) 5489 /* inline non_anon() */ 5490 dogetpage = (ap == NULL); 5491 else 5492 dogetpage = non_anon(amp->ahp, anon_index, 5493 &vp_off, &vp_len); 5494 ANON_LOCK_EXIT(&->a_rwlock); 5495 } 5496 5497 if (dogetpage) { 5498 enum seg_rw arw; 5499 struct as *as = seg->s_as; 5500 5501 if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) { 5502 /* 5503 * Page list won't fit in local array, 5504 * allocate one of the needed size. 5505 */ 5506 pl_alloc_sz = 5507 (btop(len) + 1) * sizeof (page_t *); 5508 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP); 5509 plp[0] = NULL; 5510 plsz = len; 5511 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE || 5512 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER || 5513 (((size_t)(addr + PAGESIZE) < 5514 (size_t)(seg->s_base + seg->s_size)) && 5515 hat_probe(as->a_hat, addr + PAGESIZE))) { 5516 /* 5517 * Ask VOP_GETPAGE to return the exact number 5518 * of pages if 5519 * (a) this is a COW fault, or 5520 * (b) this is a software fault, or 5521 * (c) next page is already mapped. 5522 */ 5523 plsz = len; 5524 } else { 5525 /* 5526 * Ask VOP_GETPAGE to return adjacent pages 5527 * within the segment. 5528 */ 5529 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t) 5530 ((seg->s_base + seg->s_size) - addr)); 5531 ASSERT((addr + plsz) <= 5532 (seg->s_base + seg->s_size)); 5533 } 5534 5535 /* 5536 * Need to get some non-anonymous pages. 5537 * We need to make only one call to GETPAGE to do 5538 * this to prevent certain deadlocking conditions 5539 * when we are doing locking. In this case 5540 * non_anon() should have picked up the smallest 5541 * range which includes all the non-anonymous 5542 * pages in the requested range. We have to 5543 * be careful regarding which rw flag to pass in 5544 * because on a private mapping, the underlying 5545 * object is never allowed to be written. 5546 */ 5547 if (rw == S_WRITE && svd->type == MAP_PRIVATE) { 5548 arw = S_READ; 5549 } else { 5550 arw = rw; 5551 } 5552 vp = svd->vp; 5553 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5554 "segvn_getpage:seg %p addr %p vp %p", 5555 seg, addr, vp); 5556 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len, 5557 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw, 5558 svd->cred, NULL); 5559 if (err) { 5560 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5561 segvn_pagelist_rele(plp); 5562 if (pl_alloc_sz) 5563 kmem_free(plp, pl_alloc_sz); 5564 return (FC_MAKE_ERR(err)); 5565 } 5566 if (svd->type == MAP_PRIVATE) 5567 vpprot &= ~PROT_WRITE; 5568 } 5569 } 5570 5571 /* 5572 * N.B. at this time the plp array has all the needed non-anon 5573 * pages in addition to (possibly) having some adjacent pages. 5574 */ 5575 5576 /* 5577 * Always acquire the anon_array_lock to prevent 5578 * 2 threads from allocating separate anon slots for 5579 * the same "addr". 5580 * 5581 * If this is a copy-on-write fault and we don't already 5582 * have the anon_array_lock, acquire it to prevent the 5583 * fault routine from handling multiple copy-on-write faults 5584 * on the same "addr" in the same address space. 5585 * 5586 * Only one thread should deal with the fault since after 5587 * it is handled, the other threads can acquire a translation 5588 * to the newly created private page. This prevents two or 5589 * more threads from creating different private pages for the 5590 * same fault. 5591 * 5592 * We grab "serialization" lock here if this is a MAP_PRIVATE segment 5593 * to prevent deadlock between this thread and another thread 5594 * which has soft-locked this page and wants to acquire serial_lock. 5595 * ( bug 4026339 ) 5596 * 5597 * The fix for bug 4026339 becomes unnecessary when using the 5598 * locking scheme with per amp rwlock and a global set of hash 5599 * lock, anon_array_lock. If we steal a vnode page when low 5600 * on memory and upgrad the page lock through page_rename, 5601 * then the page is PAGE_HANDLED, nothing needs to be done 5602 * for this page after returning from segvn_faultpage. 5603 * 5604 * But really, the page lock should be downgraded after 5605 * the stolen page is page_rename'd. 5606 */ 5607 5608 if (amp != NULL) 5609 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5610 5611 /* 5612 * Ok, now loop over the address range and handle faults 5613 */ 5614 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) { 5615 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot, 5616 type, rw, brkcow, a == addr); 5617 if (err) { 5618 if (amp != NULL) 5619 ANON_LOCK_EXIT(&->a_rwlock); 5620 if (type == F_SOFTLOCK && a > addr) { 5621 segvn_softunlock(seg, addr, (a - addr), 5622 S_OTHER); 5623 } 5624 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5625 segvn_pagelist_rele(plp); 5626 if (pl_alloc_sz) 5627 kmem_free(plp, pl_alloc_sz); 5628 return (err); 5629 } 5630 if (vpage) { 5631 vpage++; 5632 } else if (svd->vpage) { 5633 page = seg_page(seg, addr); 5634 vpage = &svd->vpage[++page]; 5635 } 5636 } 5637 5638 /* Didn't get pages from the underlying fs so we're done */ 5639 if (!dogetpage) 5640 goto done; 5641 5642 /* 5643 * Now handle any other pages in the list returned. 5644 * If the page can be used, load up the translations now. 5645 * Note that the for loop will only be entered if "plp" 5646 * is pointing to a non-NULL page pointer which means that 5647 * VOP_GETPAGE() was called and vpprot has been initialized. 5648 */ 5649 if (svd->pageprot == 0) 5650 prot = svd->prot & vpprot; 5651 5652 5653 /* 5654 * Large Files: diff should be unsigned value because we started 5655 * supporting > 2GB segment sizes from 2.5.1 and when a 5656 * large file of size > 2GB gets mapped to address space 5657 * the diff value can be > 2GB. 5658 */ 5659 5660 for (ppp = plp; (pp = *ppp) != NULL; ppp++) { 5661 size_t diff; 5662 struct anon *ap; 5663 int anon_index; 5664 anon_sync_obj_t cookie; 5665 int hat_flag = HAT_LOAD_ADV; 5666 5667 if (svd->flags & MAP_TEXT) { 5668 hat_flag |= HAT_LOAD_TEXT; 5669 } 5670 5671 if (pp == PAGE_HANDLED) 5672 continue; 5673 5674 if (svd->tr_state != SEGVN_TR_ON && 5675 pp->p_offset >= svd->offset && 5676 pp->p_offset < svd->offset + seg->s_size) { 5677 5678 diff = pp->p_offset - svd->offset; 5679 5680 /* 5681 * Large Files: Following is the assertion 5682 * validating the above cast. 5683 */ 5684 ASSERT(svd->vp == pp->p_vnode); 5685 5686 page = btop(diff); 5687 if (svd->pageprot) 5688 prot = VPP_PROT(&svd->vpage[page]) & vpprot; 5689 5690 /* 5691 * Prevent other threads in the address space from 5692 * creating private pages (i.e., allocating anon slots) 5693 * while we are in the process of loading translations 5694 * to additional pages returned by the underlying 5695 * object. 5696 */ 5697 if (amp != NULL) { 5698 anon_index = svd->anon_index + page; 5699 anon_array_enter(amp, anon_index, &cookie); 5700 ap = anon_get_ptr(amp->ahp, anon_index); 5701 } 5702 if ((amp == NULL) || (ap == NULL)) { 5703 if (IS_VMODSORT(pp->p_vnode) || 5704 enable_mbit_wa) { 5705 if (rw == S_WRITE) 5706 hat_setmod(pp); 5707 else if (rw != S_OTHER && 5708 !hat_ismod(pp)) 5709 prot &= ~PROT_WRITE; 5710 } 5711 /* 5712 * Skip mapping read ahead pages marked 5713 * for migration, so they will get migrated 5714 * properly on fault 5715 */ 5716 ASSERT(amp == NULL || 5717 svd->rcookie == HAT_INVALID_REGION_COOKIE); 5718 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) { 5719 hat_memload_region(hat, 5720 seg->s_base + diff, 5721 pp, prot, hat_flag, 5722 svd->rcookie); 5723 } 5724 } 5725 if (amp != NULL) 5726 anon_array_exit(&cookie); 5727 } 5728 page_unlock(pp); 5729 } 5730 done: 5731 if (amp != NULL) 5732 ANON_LOCK_EXIT(&->a_rwlock); 5733 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5734 if (pl_alloc_sz) 5735 kmem_free(plp, pl_alloc_sz); 5736 return (0); 5737 } 5738 5739 /* 5740 * This routine is used to start I/O on pages asynchronously. XXX it will 5741 * only create PAGESIZE pages. At fault time they will be relocated into 5742 * larger pages. 5743 */ 5744 static faultcode_t 5745 segvn_faulta(struct seg *seg, caddr_t addr) 5746 { 5747 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5748 int err; 5749 struct anon_map *amp; 5750 vnode_t *vp; 5751 5752 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5753 5754 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 5755 if ((amp = svd->amp) != NULL) { 5756 struct anon *ap; 5757 5758 /* 5759 * Reader lock to prevent amp->ahp from being changed. 5760 * This is advisory, it's ok to miss a page, so 5761 * we don't do anon_array_enter lock. 5762 */ 5763 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5764 if ((ap = anon_get_ptr(amp->ahp, 5765 svd->anon_index + seg_page(seg, addr))) != NULL) { 5766 5767 err = anon_getpage(&ap, NULL, NULL, 5768 0, seg, addr, S_READ, svd->cred); 5769 5770 ANON_LOCK_EXIT(&->a_rwlock); 5771 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5772 if (err) 5773 return (FC_MAKE_ERR(err)); 5774 return (0); 5775 } 5776 ANON_LOCK_EXIT(&->a_rwlock); 5777 } 5778 5779 if (svd->vp == NULL) { 5780 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5781 return (0); /* zfod page - do nothing now */ 5782 } 5783 5784 vp = svd->vp; 5785 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5786 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp); 5787 err = VOP_GETPAGE(vp, 5788 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)), 5789 PAGESIZE, NULL, NULL, 0, seg, addr, 5790 S_OTHER, svd->cred, NULL); 5791 5792 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5793 if (err) 5794 return (FC_MAKE_ERR(err)); 5795 return (0); 5796 } 5797 5798 static int 5799 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 5800 { 5801 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5802 struct vpage *cvp, *svp, *evp; 5803 struct vnode *vp; 5804 size_t pgsz; 5805 pgcnt_t pgcnt; 5806 anon_sync_obj_t cookie; 5807 int unload_done = 0; 5808 5809 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5810 5811 if ((svd->maxprot & prot) != prot) 5812 return (EACCES); /* violated maxprot */ 5813 5814 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5815 5816 /* return if prot is the same */ 5817 if (!svd->pageprot && svd->prot == prot) { 5818 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5819 return (0); 5820 } 5821 5822 /* 5823 * Since we change protections we first have to flush the cache. 5824 * This makes sure all the pagelock calls have to recheck 5825 * protections. 5826 */ 5827 if (svd->softlockcnt > 0) { 5828 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5829 /* 5830 * Since we do have the segvn writers lock nobody can fill 5831 * the cache with entries belonging to this seg during 5832 * the purge. The flush either succeeds or we still have 5833 * pending I/Os. 5834 */ 5835 segvn_purge(seg); 5836 if (svd->softlockcnt > 0) { 5837 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5838 return (EAGAIN); 5839 } 5840 } 5841 5842 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5843 ASSERT(svd->amp == NULL); 5844 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5845 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5846 HAT_REGION_TEXT); 5847 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5848 unload_done = 1; 5849 } else if (svd->tr_state == SEGVN_TR_INIT) { 5850 svd->tr_state = SEGVN_TR_OFF; 5851 } else if (svd->tr_state == SEGVN_TR_ON) { 5852 ASSERT(svd->amp != NULL); 5853 segvn_textunrepl(seg, 0); 5854 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 5855 unload_done = 1; 5856 } 5857 5858 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED && 5859 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) { 5860 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 5861 segvn_inval_trcache(svd->vp); 5862 } 5863 if (seg->s_szc != 0) { 5864 int err; 5865 pgsz = page_get_pagesize(seg->s_szc); 5866 pgcnt = pgsz >> PAGESHIFT; 5867 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 5868 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 5869 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5870 ASSERT(seg->s_base != addr || seg->s_size != len); 5871 /* 5872 * If we are holding the as lock as a reader then 5873 * we need to return IE_RETRY and let the as 5874 * layer drop and re-acquire the lock as a writer. 5875 */ 5876 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) 5877 return (IE_RETRY); 5878 VM_STAT_ADD(segvnvmstats.demoterange[1]); 5879 if (svd->type == MAP_PRIVATE || svd->vp != NULL) { 5880 err = segvn_demote_range(seg, addr, len, 5881 SDR_END, 0); 5882 } else { 5883 uint_t szcvec = map_pgszcvec(seg->s_base, 5884 pgsz, (uintptr_t)seg->s_base, 5885 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0); 5886 err = segvn_demote_range(seg, addr, len, 5887 SDR_END, szcvec); 5888 } 5889 if (err == 0) 5890 return (IE_RETRY); 5891 if (err == ENOMEM) 5892 return (IE_NOMEM); 5893 return (err); 5894 } 5895 } 5896 5897 5898 /* 5899 * If it's a private mapping and we're making it writable then we 5900 * may have to reserve the additional swap space now. If we are 5901 * making writable only a part of the segment then we use its vpage 5902 * array to keep a record of the pages for which we have reserved 5903 * swap. In this case we set the pageswap field in the segment's 5904 * segvn structure to record this. 5905 * 5906 * If it's a private mapping to a file (i.e., vp != NULL) and we're 5907 * removing write permission on the entire segment and we haven't 5908 * modified any pages, we can release the swap space. 5909 */ 5910 if (svd->type == MAP_PRIVATE) { 5911 if (prot & PROT_WRITE) { 5912 if (!(svd->flags & MAP_NORESERVE) && 5913 !(svd->swresv && svd->pageswap == 0)) { 5914 size_t sz = 0; 5915 5916 /* 5917 * Start by determining how much swap 5918 * space is required. 5919 */ 5920 if (addr == seg->s_base && 5921 len == seg->s_size && 5922 svd->pageswap == 0) { 5923 /* The whole segment */ 5924 sz = seg->s_size; 5925 } else { 5926 /* 5927 * Make sure that the vpage array 5928 * exists, and make a note of the 5929 * range of elements corresponding 5930 * to len. 5931 */ 5932 segvn_vpage(seg); 5933 svp = &svd->vpage[seg_page(seg, addr)]; 5934 evp = &svd->vpage[seg_page(seg, 5935 addr + len)]; 5936 5937 if (svd->pageswap == 0) { 5938 /* 5939 * This is the first time we've 5940 * asked for a part of this 5941 * segment, so we need to 5942 * reserve everything we've 5943 * been asked for. 5944 */ 5945 sz = len; 5946 } else { 5947 /* 5948 * We have to count the number 5949 * of pages required. 5950 */ 5951 for (cvp = svp; cvp < evp; 5952 cvp++) { 5953 if (!VPP_ISSWAPRES(cvp)) 5954 sz++; 5955 } 5956 sz <<= PAGESHIFT; 5957 } 5958 } 5959 5960 /* Try to reserve the necessary swap. */ 5961 if (anon_resv_zone(sz, 5962 seg->s_as->a_proc->p_zone) == 0) { 5963 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5964 return (IE_NOMEM); 5965 } 5966 5967 /* 5968 * Make a note of how much swap space 5969 * we've reserved. 5970 */ 5971 if (svd->pageswap == 0 && sz == seg->s_size) { 5972 svd->swresv = sz; 5973 } else { 5974 ASSERT(svd->vpage != NULL); 5975 svd->swresv += sz; 5976 svd->pageswap = 1; 5977 for (cvp = svp; cvp < evp; cvp++) { 5978 if (!VPP_ISSWAPRES(cvp)) 5979 VPP_SETSWAPRES(cvp); 5980 } 5981 } 5982 } 5983 } else { 5984 /* 5985 * Swap space is released only if this segment 5986 * does not map anonymous memory, since read faults 5987 * on such segments still need an anon slot to read 5988 * in the data. 5989 */ 5990 if (svd->swresv != 0 && svd->vp != NULL && 5991 svd->amp == NULL && addr == seg->s_base && 5992 len == seg->s_size && svd->pageprot == 0) { 5993 ASSERT(svd->pageswap == 0); 5994 anon_unresv_zone(svd->swresv, 5995 seg->s_as->a_proc->p_zone); 5996 svd->swresv = 0; 5997 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 5998 "anon proc:%p %lu %u", seg, 0, 0); 5999 } 6000 } 6001 } 6002 6003 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) { 6004 if (svd->prot == prot) { 6005 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6006 return (0); /* all done */ 6007 } 6008 svd->prot = (uchar_t)prot; 6009 } else if (svd->type == MAP_PRIVATE) { 6010 struct anon *ap = NULL; 6011 page_t *pp; 6012 u_offset_t offset, off; 6013 struct anon_map *amp; 6014 ulong_t anon_idx = 0; 6015 6016 /* 6017 * A vpage structure exists or else the change does not 6018 * involve the entire segment. Establish a vpage structure 6019 * if none is there. Then, for each page in the range, 6020 * adjust its individual permissions. Note that write- 6021 * enabling a MAP_PRIVATE page can affect the claims for 6022 * locked down memory. Overcommitting memory terminates 6023 * the operation. 6024 */ 6025 segvn_vpage(seg); 6026 svd->pageprot = 1; 6027 if ((amp = svd->amp) != NULL) { 6028 anon_idx = svd->anon_index + seg_page(seg, addr); 6029 ASSERT(seg->s_szc == 0 || 6030 IS_P2ALIGNED(anon_idx, pgcnt)); 6031 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 6032 } 6033 6034 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 6035 evp = &svd->vpage[seg_page(seg, addr + len)]; 6036 6037 /* 6038 * See Statement at the beginning of segvn_lockop regarding 6039 * the way cowcnts and lckcnts are handled. 6040 */ 6041 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 6042 6043 if (seg->s_szc != 0) { 6044 if (amp != NULL) { 6045 anon_array_enter(amp, anon_idx, 6046 &cookie); 6047 } 6048 if (IS_P2ALIGNED(anon_idx, pgcnt) && 6049 !segvn_claim_pages(seg, svp, offset, 6050 anon_idx, prot)) { 6051 if (amp != NULL) { 6052 anon_array_exit(&cookie); 6053 } 6054 break; 6055 } 6056 if (amp != NULL) { 6057 anon_array_exit(&cookie); 6058 } 6059 anon_idx++; 6060 } else { 6061 if (amp != NULL) { 6062 anon_array_enter(amp, anon_idx, 6063 &cookie); 6064 ap = anon_get_ptr(amp->ahp, anon_idx++); 6065 } 6066 6067 if (VPP_ISPPLOCK(svp) && 6068 VPP_PROT(svp) != prot) { 6069 6070 if (amp == NULL || ap == NULL) { 6071 vp = svd->vp; 6072 off = offset; 6073 } else 6074 swap_xlate(ap, &vp, &off); 6075 if (amp != NULL) 6076 anon_array_exit(&cookie); 6077 6078 if ((pp = page_lookup(vp, off, 6079 SE_SHARED)) == NULL) { 6080 panic("segvn_setprot: no page"); 6081 /*NOTREACHED*/ 6082 } 6083 ASSERT(seg->s_szc == 0); 6084 if ((VPP_PROT(svp) ^ prot) & 6085 PROT_WRITE) { 6086 if (prot & PROT_WRITE) { 6087 if (!page_addclaim(pp)) { 6088 page_unlock(pp); 6089 break; 6090 } 6091 } else { 6092 if (!page_subclaim(pp)) { 6093 page_unlock(pp); 6094 break; 6095 } 6096 } 6097 } 6098 page_unlock(pp); 6099 } else if (amp != NULL) 6100 anon_array_exit(&cookie); 6101 } 6102 VPP_SETPROT(svp, prot); 6103 offset += PAGESIZE; 6104 } 6105 if (amp != NULL) 6106 ANON_LOCK_EXIT(&->a_rwlock); 6107 6108 /* 6109 * Did we terminate prematurely? If so, simply unload 6110 * the translations to the things we've updated so far. 6111 */ 6112 if (svp != evp) { 6113 if (unload_done) { 6114 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6115 return (IE_NOMEM); 6116 } 6117 len = (svp - &svd->vpage[seg_page(seg, addr)]) * 6118 PAGESIZE; 6119 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz)); 6120 if (len != 0) 6121 hat_unload(seg->s_as->a_hat, addr, 6122 len, HAT_UNLOAD); 6123 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6124 return (IE_NOMEM); 6125 } 6126 } else { 6127 segvn_vpage(seg); 6128 svd->pageprot = 1; 6129 evp = &svd->vpage[seg_page(seg, addr + len)]; 6130 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 6131 VPP_SETPROT(svp, prot); 6132 } 6133 } 6134 6135 if (unload_done) { 6136 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6137 return (0); 6138 } 6139 6140 if (((prot & PROT_WRITE) != 0 && 6141 (svd->vp != NULL || svd->type == MAP_PRIVATE)) || 6142 (prot & ~PROT_USER) == PROT_NONE) { 6143 /* 6144 * Either private or shared data with write access (in 6145 * which case we need to throw out all former translations 6146 * so that we get the right translations set up on fault 6147 * and we don't allow write access to any copy-on-write pages 6148 * that might be around or to prevent write access to pages 6149 * representing holes in a file), or we don't have permission 6150 * to access the memory at all (in which case we have to 6151 * unload any current translations that might exist). 6152 */ 6153 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 6154 } else { 6155 /* 6156 * A shared mapping or a private mapping in which write 6157 * protection is going to be denied - just change all the 6158 * protections over the range of addresses in question. 6159 * segvn does not support any other attributes other 6160 * than prot so we can use hat_chgattr. 6161 */ 6162 hat_chgattr(seg->s_as->a_hat, addr, len, prot); 6163 } 6164 6165 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6166 6167 return (0); 6168 } 6169 6170 /* 6171 * segvn_setpagesize is called via SEGOP_SETPAGESIZE from as_setpagesize, 6172 * to determine if the seg is capable of mapping the requested szc. 6173 */ 6174 static int 6175 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 6176 { 6177 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6178 struct segvn_data *nsvd; 6179 struct anon_map *amp = svd->amp; 6180 struct seg *nseg; 6181 caddr_t eaddr = addr + len, a; 6182 size_t pgsz = page_get_pagesize(szc); 6183 pgcnt_t pgcnt = page_get_pagecnt(szc); 6184 int err; 6185 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base); 6186 extern struct vnode kvp; 6187 6188 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6189 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6190 6191 if (seg->s_szc == szc || segvn_lpg_disable != 0) { 6192 return (0); 6193 } 6194 6195 /* 6196 * addr should always be pgsz aligned but eaddr may be misaligned if 6197 * it's at the end of the segment. 6198 * 6199 * XXX we should assert this condition since as_setpagesize() logic 6200 * guarantees it. 6201 */ 6202 if (!IS_P2ALIGNED(addr, pgsz) || 6203 (!IS_P2ALIGNED(eaddr, pgsz) && 6204 eaddr != seg->s_base + seg->s_size)) { 6205 6206 segvn_setpgsz_align_err++; 6207 return (EINVAL); 6208 } 6209 6210 if (amp != NULL && svd->type == MAP_SHARED) { 6211 ulong_t an_idx = svd->anon_index + seg_page(seg, addr); 6212 if (!IS_P2ALIGNED(an_idx, pgcnt)) { 6213 6214 segvn_setpgsz_anon_align_err++; 6215 return (EINVAL); 6216 } 6217 } 6218 6219 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas || 6220 szc > segvn_maxpgszc) { 6221 return (EINVAL); 6222 } 6223 6224 /* paranoid check */ 6225 if (svd->vp != NULL && 6226 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) { 6227 return (EINVAL); 6228 } 6229 6230 if (seg->s_szc == 0 && svd->vp != NULL && 6231 map_addr_vacalign_check(addr, off)) { 6232 return (EINVAL); 6233 } 6234 6235 /* 6236 * Check that protections are the same within new page 6237 * size boundaries. 6238 */ 6239 if (svd->pageprot) { 6240 for (a = addr; a < eaddr; a += pgsz) { 6241 if ((a + pgsz) > eaddr) { 6242 if (!sameprot(seg, a, eaddr - a)) { 6243 return (EINVAL); 6244 } 6245 } else { 6246 if (!sameprot(seg, a, pgsz)) { 6247 return (EINVAL); 6248 } 6249 } 6250 } 6251 } 6252 6253 /* 6254 * Since we are changing page size we first have to flush 6255 * the cache. This makes sure all the pagelock calls have 6256 * to recheck protections. 6257 */ 6258 if (svd->softlockcnt > 0) { 6259 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6260 /* 6261 * Since we do have the segvn writers lock nobody can fill 6262 * the cache with entries belonging to this seg during 6263 * the purge. The flush either succeeds or we still have 6264 * pending I/Os. 6265 */ 6266 segvn_purge(seg); 6267 if (svd->softlockcnt > 0) { 6268 return (EAGAIN); 6269 } 6270 } 6271 6272 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6273 ASSERT(svd->amp == NULL); 6274 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6275 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6276 HAT_REGION_TEXT); 6277 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6278 } else if (svd->tr_state == SEGVN_TR_INIT) { 6279 svd->tr_state = SEGVN_TR_OFF; 6280 } else if (svd->tr_state == SEGVN_TR_ON) { 6281 ASSERT(svd->amp != NULL); 6282 segvn_textunrepl(seg, 1); 6283 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6284 amp = NULL; 6285 } 6286 6287 /* 6288 * Operation for sub range of existing segment. 6289 */ 6290 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) { 6291 if (szc < seg->s_szc) { 6292 VM_STAT_ADD(segvnvmstats.demoterange[2]); 6293 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0); 6294 if (err == 0) { 6295 return (IE_RETRY); 6296 } 6297 if (err == ENOMEM) { 6298 return (IE_NOMEM); 6299 } 6300 return (err); 6301 } 6302 if (addr != seg->s_base) { 6303 nseg = segvn_split_seg(seg, addr); 6304 if (eaddr != (nseg->s_base + nseg->s_size)) { 6305 /* eaddr is szc aligned */ 6306 (void) segvn_split_seg(nseg, eaddr); 6307 } 6308 return (IE_RETRY); 6309 } 6310 if (eaddr != (seg->s_base + seg->s_size)) { 6311 /* eaddr is szc aligned */ 6312 (void) segvn_split_seg(seg, eaddr); 6313 } 6314 return (IE_RETRY); 6315 } 6316 6317 /* 6318 * Break any low level sharing and reset seg->s_szc to 0. 6319 */ 6320 if ((err = segvn_clrszc(seg)) != 0) { 6321 if (err == ENOMEM) { 6322 err = IE_NOMEM; 6323 } 6324 return (err); 6325 } 6326 ASSERT(seg->s_szc == 0); 6327 6328 /* 6329 * If the end of the current segment is not pgsz aligned 6330 * then attempt to concatenate with the next segment. 6331 */ 6332 if (!IS_P2ALIGNED(eaddr, pgsz)) { 6333 nseg = AS_SEGNEXT(seg->s_as, seg); 6334 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) { 6335 return (ENOMEM); 6336 } 6337 if (nseg->s_ops != &segvn_ops) { 6338 return (EINVAL); 6339 } 6340 nsvd = (struct segvn_data *)nseg->s_data; 6341 if (nsvd->softlockcnt > 0) { 6342 segvn_purge(nseg); 6343 if (nsvd->softlockcnt > 0) { 6344 return (EAGAIN); 6345 } 6346 } 6347 err = segvn_clrszc(nseg); 6348 if (err == ENOMEM) { 6349 err = IE_NOMEM; 6350 } 6351 if (err != 0) { 6352 return (err); 6353 } 6354 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6355 err = segvn_concat(seg, nseg, 1); 6356 if (err == -1) { 6357 return (EINVAL); 6358 } 6359 if (err == -2) { 6360 return (IE_NOMEM); 6361 } 6362 return (IE_RETRY); 6363 } 6364 6365 /* 6366 * May need to re-align anon array to 6367 * new szc. 6368 */ 6369 if (amp != NULL) { 6370 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) { 6371 struct anon_hdr *nahp; 6372 6373 ASSERT(svd->type == MAP_PRIVATE); 6374 6375 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6376 ASSERT(amp->refcnt == 1); 6377 nahp = anon_create(btop(amp->size), ANON_NOSLEEP); 6378 if (nahp == NULL) { 6379 ANON_LOCK_EXIT(&->a_rwlock); 6380 return (IE_NOMEM); 6381 } 6382 if (anon_copy_ptr(amp->ahp, svd->anon_index, 6383 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) { 6384 anon_release(nahp, btop(amp->size)); 6385 ANON_LOCK_EXIT(&->a_rwlock); 6386 return (IE_NOMEM); 6387 } 6388 anon_release(amp->ahp, btop(amp->size)); 6389 amp->ahp = nahp; 6390 svd->anon_index = 0; 6391 ANON_LOCK_EXIT(&->a_rwlock); 6392 } 6393 } 6394 if (svd->vp != NULL && szc != 0) { 6395 struct vattr va; 6396 u_offset_t eoffpage = svd->offset; 6397 va.va_mask = AT_SIZE; 6398 eoffpage += seg->s_size; 6399 eoffpage = btopr(eoffpage); 6400 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) { 6401 segvn_setpgsz_getattr_err++; 6402 return (EINVAL); 6403 } 6404 if (btopr(va.va_size) < eoffpage) { 6405 segvn_setpgsz_eof_err++; 6406 return (EINVAL); 6407 } 6408 if (amp != NULL) { 6409 /* 6410 * anon_fill_cow_holes() may call VOP_GETPAGE(). 6411 * don't take anon map lock here to avoid holding it 6412 * across VOP_GETPAGE() calls that may call back into 6413 * segvn for klsutering checks. We don't really need 6414 * anon map lock here since it's a private segment and 6415 * we hold as level lock as writers. 6416 */ 6417 if ((err = anon_fill_cow_holes(seg, seg->s_base, 6418 amp->ahp, svd->anon_index, svd->vp, svd->offset, 6419 seg->s_size, szc, svd->prot, svd->vpage, 6420 svd->cred)) != 0) { 6421 return (EINVAL); 6422 } 6423 } 6424 segvn_setvnode_mpss(svd->vp); 6425 } 6426 6427 if (amp != NULL) { 6428 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6429 if (svd->type == MAP_PRIVATE) { 6430 amp->a_szc = szc; 6431 } else if (szc > amp->a_szc) { 6432 amp->a_szc = szc; 6433 } 6434 ANON_LOCK_EXIT(&->a_rwlock); 6435 } 6436 6437 seg->s_szc = szc; 6438 6439 return (0); 6440 } 6441 6442 static int 6443 segvn_clrszc(struct seg *seg) 6444 { 6445 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6446 struct anon_map *amp = svd->amp; 6447 size_t pgsz; 6448 pgcnt_t pages; 6449 int err = 0; 6450 caddr_t a = seg->s_base; 6451 caddr_t ea = a + seg->s_size; 6452 ulong_t an_idx = svd->anon_index; 6453 vnode_t *vp = svd->vp; 6454 struct vpage *vpage = svd->vpage; 6455 page_t *anon_pl[1 + 1], *pp; 6456 struct anon *ap, *oldap; 6457 uint_t prot = svd->prot, vpprot; 6458 int pageflag = 0; 6459 6460 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6461 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 6462 ASSERT(svd->softlockcnt == 0); 6463 6464 if (vp == NULL && amp == NULL) { 6465 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6466 seg->s_szc = 0; 6467 return (0); 6468 } 6469 6470 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6471 ASSERT(svd->amp == NULL); 6472 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6473 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6474 HAT_REGION_TEXT); 6475 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6476 } else if (svd->tr_state == SEGVN_TR_ON) { 6477 ASSERT(svd->amp != NULL); 6478 segvn_textunrepl(seg, 1); 6479 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6480 amp = NULL; 6481 } else { 6482 if (svd->tr_state != SEGVN_TR_OFF) { 6483 ASSERT(svd->tr_state == SEGVN_TR_INIT); 6484 svd->tr_state = SEGVN_TR_OFF; 6485 } 6486 6487 /* 6488 * do HAT_UNLOAD_UNMAP since we are changing the pagesize. 6489 * unload argument is 0 when we are freeing the segment 6490 * and unload was already done. 6491 */ 6492 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size, 6493 HAT_UNLOAD_UNMAP); 6494 } 6495 6496 if (amp == NULL || svd->type == MAP_SHARED) { 6497 seg->s_szc = 0; 6498 return (0); 6499 } 6500 6501 pgsz = page_get_pagesize(seg->s_szc); 6502 pages = btop(pgsz); 6503 6504 /* 6505 * XXX anon rwlock is not really needed because this is a 6506 * private segment and we are writers. 6507 */ 6508 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6509 6510 for (; a < ea; a += pgsz, an_idx += pages) { 6511 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) { 6512 ASSERT(vpage != NULL || svd->pageprot == 0); 6513 if (vpage != NULL) { 6514 ASSERT(sameprot(seg, a, pgsz)); 6515 prot = VPP_PROT(vpage); 6516 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0; 6517 } 6518 if (seg->s_szc != 0) { 6519 ASSERT(vp == NULL || anon_pages(amp->ahp, 6520 an_idx, pages) == pages); 6521 if ((err = anon_map_demotepages(amp, an_idx, 6522 seg, a, prot, vpage, svd->cred)) != 0) { 6523 goto out; 6524 } 6525 } else { 6526 if (oldap->an_refcnt == 1) { 6527 continue; 6528 } 6529 if ((err = anon_getpage(&oldap, &vpprot, 6530 anon_pl, PAGESIZE, seg, a, S_READ, 6531 svd->cred))) { 6532 goto out; 6533 } 6534 if ((pp = anon_private(&ap, seg, a, prot, 6535 anon_pl[0], pageflag, svd->cred)) == NULL) { 6536 err = ENOMEM; 6537 goto out; 6538 } 6539 anon_decref(oldap); 6540 (void) anon_set_ptr(amp->ahp, an_idx, ap, 6541 ANON_SLEEP); 6542 page_unlock(pp); 6543 } 6544 } 6545 vpage = (vpage == NULL) ? NULL : vpage + pages; 6546 } 6547 6548 amp->a_szc = 0; 6549 seg->s_szc = 0; 6550 out: 6551 ANON_LOCK_EXIT(&->a_rwlock); 6552 return (err); 6553 } 6554 6555 static int 6556 segvn_claim_pages( 6557 struct seg *seg, 6558 struct vpage *svp, 6559 u_offset_t off, 6560 ulong_t anon_idx, 6561 uint_t prot) 6562 { 6563 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6564 size_t ppasize = (pgcnt + 1) * sizeof (page_t *); 6565 page_t **ppa; 6566 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6567 struct anon_map *amp = svd->amp; 6568 struct vpage *evp = svp + pgcnt; 6569 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT) 6570 + seg->s_base; 6571 struct anon *ap; 6572 struct vnode *vp = svd->vp; 6573 page_t *pp; 6574 pgcnt_t pg_idx, i; 6575 int err = 0; 6576 anoff_t aoff; 6577 int anon = (amp != NULL) ? 1 : 0; 6578 6579 ASSERT(svd->type == MAP_PRIVATE); 6580 ASSERT(svd->vpage != NULL); 6581 ASSERT(seg->s_szc != 0); 6582 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 6583 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt)); 6584 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT)); 6585 6586 if (VPP_PROT(svp) == prot) 6587 return (1); 6588 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE)) 6589 return (1); 6590 6591 ppa = kmem_alloc(ppasize, KM_SLEEP); 6592 if (anon && vp != NULL) { 6593 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) { 6594 anon = 0; 6595 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt)); 6596 } 6597 ASSERT(!anon || 6598 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt); 6599 } 6600 6601 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) { 6602 if (!VPP_ISPPLOCK(svp)) 6603 continue; 6604 if (anon) { 6605 ap = anon_get_ptr(amp->ahp, anon_idx); 6606 if (ap == NULL) { 6607 panic("segvn_claim_pages: no anon slot"); 6608 } 6609 swap_xlate(ap, &vp, &aoff); 6610 off = (u_offset_t)aoff; 6611 } 6612 ASSERT(vp != NULL); 6613 if ((pp = page_lookup(vp, 6614 (u_offset_t)off, SE_SHARED)) == NULL) { 6615 panic("segvn_claim_pages: no page"); 6616 } 6617 ppa[pg_idx++] = pp; 6618 off += PAGESIZE; 6619 } 6620 6621 if (ppa[0] == NULL) { 6622 kmem_free(ppa, ppasize); 6623 return (1); 6624 } 6625 6626 ASSERT(pg_idx <= pgcnt); 6627 ppa[pg_idx] = NULL; 6628 6629 if (prot & PROT_WRITE) 6630 err = page_addclaim_pages(ppa); 6631 else 6632 err = page_subclaim_pages(ppa); 6633 6634 for (i = 0; i < pg_idx; i++) { 6635 ASSERT(ppa[i] != NULL); 6636 page_unlock(ppa[i]); 6637 } 6638 6639 kmem_free(ppa, ppasize); 6640 return (err); 6641 } 6642 6643 /* 6644 * Returns right (upper address) segment if split occurred. 6645 * If the address is equal to the beginning or end of its segment it returns 6646 * the current segment. 6647 */ 6648 static struct seg * 6649 segvn_split_seg(struct seg *seg, caddr_t addr) 6650 { 6651 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6652 struct seg *nseg; 6653 size_t nsize; 6654 struct segvn_data *nsvd; 6655 6656 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6657 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6658 6659 ASSERT(addr >= seg->s_base); 6660 ASSERT(addr <= seg->s_base + seg->s_size); 6661 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6662 6663 if (addr == seg->s_base || addr == seg->s_base + seg->s_size) 6664 return (seg); 6665 6666 nsize = seg->s_base + seg->s_size - addr; 6667 seg->s_size = addr - seg->s_base; 6668 nseg = seg_alloc(seg->s_as, addr, nsize); 6669 ASSERT(nseg != NULL); 6670 nseg->s_ops = seg->s_ops; 6671 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 6672 nseg->s_data = (void *)nsvd; 6673 nseg->s_szc = seg->s_szc; 6674 *nsvd = *svd; 6675 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6676 nsvd->seg = nseg; 6677 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL); 6678 6679 if (nsvd->vp != NULL) { 6680 VN_HOLD(nsvd->vp); 6681 nsvd->offset = svd->offset + 6682 (uintptr_t)(nseg->s_base - seg->s_base); 6683 if (nsvd->type == MAP_SHARED) 6684 lgrp_shm_policy_init(NULL, nsvd->vp); 6685 } else { 6686 /* 6687 * The offset for an anonymous segment has no signifigance in 6688 * terms of an offset into a file. If we were to use the above 6689 * calculation instead, the structures read out of 6690 * /proc/<pid>/xmap would be more difficult to decipher since 6691 * it would be unclear whether two seemingly contiguous 6692 * prxmap_t structures represented different segments or a 6693 * single segment that had been split up into multiple prxmap_t 6694 * structures (e.g. if some part of the segment had not yet 6695 * been faulted in). 6696 */ 6697 nsvd->offset = 0; 6698 } 6699 6700 ASSERT(svd->softlockcnt == 0); 6701 crhold(svd->cred); 6702 6703 if (svd->vpage != NULL) { 6704 size_t bytes = vpgtob(seg_pages(seg)); 6705 size_t nbytes = vpgtob(seg_pages(nseg)); 6706 struct vpage *ovpage = svd->vpage; 6707 6708 svd->vpage = kmem_alloc(bytes, KM_SLEEP); 6709 bcopy(ovpage, svd->vpage, bytes); 6710 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 6711 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes); 6712 kmem_free(ovpage, bytes + nbytes); 6713 } 6714 if (svd->amp != NULL && svd->type == MAP_PRIVATE) { 6715 struct anon_map *oamp = svd->amp, *namp; 6716 struct anon_hdr *nahp; 6717 6718 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER); 6719 ASSERT(oamp->refcnt == 1); 6720 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 6721 (void) anon_copy_ptr(oamp->ahp, svd->anon_index, 6722 nahp, 0, btop(seg->s_size), ANON_SLEEP); 6723 6724 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 6725 namp->a_szc = nseg->s_szc; 6726 (void) anon_copy_ptr(oamp->ahp, 6727 svd->anon_index + btop(seg->s_size), 6728 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 6729 anon_release(oamp->ahp, btop(oamp->size)); 6730 oamp->ahp = nahp; 6731 oamp->size = seg->s_size; 6732 svd->anon_index = 0; 6733 nsvd->amp = namp; 6734 nsvd->anon_index = 0; 6735 ANON_LOCK_EXIT(&oamp->a_rwlock); 6736 } else if (svd->amp != NULL) { 6737 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6738 ASSERT(svd->amp == nsvd->amp); 6739 ASSERT(seg->s_szc <= svd->amp->a_szc); 6740 nsvd->anon_index = svd->anon_index + seg_pages(seg); 6741 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt)); 6742 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER); 6743 svd->amp->refcnt++; 6744 ANON_LOCK_EXIT(&svd->amp->a_rwlock); 6745 } 6746 6747 /* 6748 * Split the amount of swap reserved. 6749 */ 6750 if (svd->swresv) { 6751 /* 6752 * For MAP_NORESERVE, only allocate swap reserve for pages 6753 * being used. Other segments get enough to cover whole 6754 * segment. 6755 */ 6756 if (svd->flags & MAP_NORESERVE) { 6757 size_t oswresv; 6758 6759 ASSERT(svd->amp); 6760 oswresv = svd->swresv; 6761 svd->swresv = ptob(anon_pages(svd->amp->ahp, 6762 svd->anon_index, btop(seg->s_size))); 6763 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 6764 nsvd->anon_index, btop(nseg->s_size))); 6765 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 6766 } else { 6767 if (svd->pageswap) { 6768 svd->swresv = segvn_count_swap_by_vpages(seg); 6769 ASSERT(nsvd->swresv >= svd->swresv); 6770 nsvd->swresv -= svd->swresv; 6771 } else { 6772 ASSERT(svd->swresv == seg->s_size + 6773 nseg->s_size); 6774 svd->swresv = seg->s_size; 6775 nsvd->swresv = nseg->s_size; 6776 } 6777 } 6778 } 6779 6780 return (nseg); 6781 } 6782 6783 /* 6784 * called on memory operations (unmap, setprot, setpagesize) for a subset 6785 * of a large page segment to either demote the memory range (SDR_RANGE) 6786 * or the ends (SDR_END) by addr/len. 6787 * 6788 * returns 0 on success. returns errno, including ENOMEM, on failure. 6789 */ 6790 static int 6791 segvn_demote_range( 6792 struct seg *seg, 6793 caddr_t addr, 6794 size_t len, 6795 int flag, 6796 uint_t szcvec) 6797 { 6798 caddr_t eaddr = addr + len; 6799 caddr_t lpgaddr, lpgeaddr; 6800 struct seg *nseg; 6801 struct seg *badseg1 = NULL; 6802 struct seg *badseg2 = NULL; 6803 size_t pgsz; 6804 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6805 int err; 6806 uint_t szc = seg->s_szc; 6807 uint_t tszcvec; 6808 6809 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6810 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6811 ASSERT(szc != 0); 6812 pgsz = page_get_pagesize(szc); 6813 ASSERT(seg->s_base != addr || seg->s_size != len); 6814 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6815 ASSERT(svd->softlockcnt == 0); 6816 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6817 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED)); 6818 6819 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 6820 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr); 6821 if (flag == SDR_RANGE) { 6822 /* demote entire range */ 6823 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6824 (void) segvn_split_seg(nseg, lpgeaddr); 6825 ASSERT(badseg1->s_base == lpgaddr); 6826 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr); 6827 } else if (addr != lpgaddr) { 6828 ASSERT(flag == SDR_END); 6829 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6830 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz && 6831 eaddr < lpgaddr + 2 * pgsz) { 6832 (void) segvn_split_seg(nseg, lpgeaddr); 6833 ASSERT(badseg1->s_base == lpgaddr); 6834 ASSERT(badseg1->s_size == 2 * pgsz); 6835 } else { 6836 nseg = segvn_split_seg(nseg, lpgaddr + pgsz); 6837 ASSERT(badseg1->s_base == lpgaddr); 6838 ASSERT(badseg1->s_size == pgsz); 6839 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) { 6840 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz); 6841 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz); 6842 badseg2 = nseg; 6843 (void) segvn_split_seg(nseg, lpgeaddr); 6844 ASSERT(badseg2->s_base == lpgeaddr - pgsz); 6845 ASSERT(badseg2->s_size == pgsz); 6846 } 6847 } 6848 } else { 6849 ASSERT(flag == SDR_END); 6850 ASSERT(eaddr < lpgeaddr); 6851 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz); 6852 (void) segvn_split_seg(nseg, lpgeaddr); 6853 ASSERT(badseg1->s_base == lpgeaddr - pgsz); 6854 ASSERT(badseg1->s_size == pgsz); 6855 } 6856 6857 ASSERT(badseg1 != NULL); 6858 ASSERT(badseg1->s_szc == szc); 6859 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz || 6860 badseg1->s_size == 2 * pgsz); 6861 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz)); 6862 ASSERT(badseg1->s_size == pgsz || 6863 sameprot(badseg1, badseg1->s_base + pgsz, pgsz)); 6864 if (err = segvn_clrszc(badseg1)) { 6865 return (err); 6866 } 6867 ASSERT(badseg1->s_szc == 0); 6868 6869 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6870 uint_t tszc = highbit(tszcvec) - 1; 6871 caddr_t ta = MAX(addr, badseg1->s_base); 6872 caddr_t te; 6873 size_t tpgsz = page_get_pagesize(tszc); 6874 6875 ASSERT(svd->type == MAP_SHARED); 6876 ASSERT(flag == SDR_END); 6877 ASSERT(tszc < szc && tszc > 0); 6878 6879 if (eaddr > badseg1->s_base + badseg1->s_size) { 6880 te = badseg1->s_base + badseg1->s_size; 6881 } else { 6882 te = eaddr; 6883 } 6884 6885 ASSERT(ta <= te); 6886 badseg1->s_szc = tszc; 6887 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) { 6888 if (badseg2 != NULL) { 6889 err = segvn_demote_range(badseg1, ta, te - ta, 6890 SDR_END, tszcvec); 6891 if (err != 0) { 6892 return (err); 6893 } 6894 } else { 6895 return (segvn_demote_range(badseg1, ta, 6896 te - ta, SDR_END, tszcvec)); 6897 } 6898 } 6899 } 6900 6901 if (badseg2 == NULL) 6902 return (0); 6903 ASSERT(badseg2->s_szc == szc); 6904 ASSERT(badseg2->s_size == pgsz); 6905 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size)); 6906 if (err = segvn_clrszc(badseg2)) { 6907 return (err); 6908 } 6909 ASSERT(badseg2->s_szc == 0); 6910 6911 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6912 uint_t tszc = highbit(tszcvec) - 1; 6913 size_t tpgsz = page_get_pagesize(tszc); 6914 6915 ASSERT(svd->type == MAP_SHARED); 6916 ASSERT(flag == SDR_END); 6917 ASSERT(tszc < szc && tszc > 0); 6918 ASSERT(badseg2->s_base > addr); 6919 ASSERT(eaddr > badseg2->s_base); 6920 ASSERT(eaddr < badseg2->s_base + badseg2->s_size); 6921 6922 badseg2->s_szc = tszc; 6923 if (!IS_P2ALIGNED(eaddr, tpgsz)) { 6924 return (segvn_demote_range(badseg2, badseg2->s_base, 6925 eaddr - badseg2->s_base, SDR_END, tszcvec)); 6926 } 6927 } 6928 6929 return (0); 6930 } 6931 6932 static int 6933 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 6934 { 6935 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6936 struct vpage *vp, *evp; 6937 6938 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6939 6940 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6941 /* 6942 * If segment protection can be used, simply check against them. 6943 */ 6944 if (svd->pageprot == 0) { 6945 int err; 6946 6947 err = ((svd->prot & prot) != prot) ? EACCES : 0; 6948 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6949 return (err); 6950 } 6951 6952 /* 6953 * Have to check down to the vpage level. 6954 */ 6955 evp = &svd->vpage[seg_page(seg, addr + len)]; 6956 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) { 6957 if ((VPP_PROT(vp) & prot) != prot) { 6958 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6959 return (EACCES); 6960 } 6961 } 6962 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6963 return (0); 6964 } 6965 6966 static int 6967 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 6968 { 6969 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6970 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1; 6971 6972 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6973 6974 if (pgno != 0) { 6975 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6976 if (svd->pageprot == 0) { 6977 do { 6978 protv[--pgno] = svd->prot; 6979 } while (pgno != 0); 6980 } else { 6981 size_t pgoff = seg_page(seg, addr); 6982 6983 do { 6984 pgno--; 6985 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]); 6986 } while (pgno != 0); 6987 } 6988 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6989 } 6990 return (0); 6991 } 6992 6993 static u_offset_t 6994 segvn_getoffset(struct seg *seg, caddr_t addr) 6995 { 6996 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6997 6998 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6999 7000 return (svd->offset + (uintptr_t)(addr - seg->s_base)); 7001 } 7002 7003 /*ARGSUSED*/ 7004 static int 7005 segvn_gettype(struct seg *seg, caddr_t addr) 7006 { 7007 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7008 7009 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7010 7011 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT | 7012 MAP_INITDATA))); 7013 } 7014 7015 /*ARGSUSED*/ 7016 static int 7017 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 7018 { 7019 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7020 7021 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7022 7023 *vpp = svd->vp; 7024 return (0); 7025 } 7026 7027 /* 7028 * Check to see if it makes sense to do kluster/read ahead to 7029 * addr + delta relative to the mapping at addr. We assume here 7030 * that delta is a signed PAGESIZE'd multiple (which can be negative). 7031 * 7032 * For segvn, we currently "approve" of the action if we are 7033 * still in the segment and it maps from the same vp/off, 7034 * or if the advice stored in segvn_data or vpages allows it. 7035 * Currently, klustering is not allowed only if MADV_RANDOM is set. 7036 */ 7037 static int 7038 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 7039 { 7040 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7041 struct anon *oap, *ap; 7042 ssize_t pd; 7043 size_t page; 7044 struct vnode *vp1, *vp2; 7045 u_offset_t off1, off2; 7046 struct anon_map *amp; 7047 7048 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7049 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 7050 SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 7051 7052 if (addr + delta < seg->s_base || 7053 addr + delta >= (seg->s_base + seg->s_size)) 7054 return (-1); /* exceeded segment bounds */ 7055 7056 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */ 7057 page = seg_page(seg, addr); 7058 7059 /* 7060 * Check to see if either of the pages addr or addr + delta 7061 * have advice set that prevents klustering (if MADV_RANDOM advice 7062 * is set for entire segment, or MADV_SEQUENTIAL is set and delta 7063 * is negative). 7064 */ 7065 if (svd->advice == MADV_RANDOM || 7066 svd->advice == MADV_SEQUENTIAL && delta < 0) 7067 return (-1); 7068 else if (svd->pageadvice && svd->vpage) { 7069 struct vpage *bvpp, *evpp; 7070 7071 bvpp = &svd->vpage[page]; 7072 evpp = &svd->vpage[page + pd]; 7073 if (VPP_ADVICE(bvpp) == MADV_RANDOM || 7074 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0) 7075 return (-1); 7076 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) && 7077 VPP_ADVICE(evpp) == MADV_RANDOM) 7078 return (-1); 7079 } 7080 7081 if (svd->type == MAP_SHARED) 7082 return (0); /* shared mapping - all ok */ 7083 7084 if ((amp = svd->amp) == NULL) 7085 return (0); /* off original vnode */ 7086 7087 page += svd->anon_index; 7088 7089 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7090 7091 oap = anon_get_ptr(amp->ahp, page); 7092 ap = anon_get_ptr(amp->ahp, page + pd); 7093 7094 ANON_LOCK_EXIT(&->a_rwlock); 7095 7096 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) { 7097 return (-1); /* one with and one without an anon */ 7098 } 7099 7100 if (oap == NULL) { /* implies that ap == NULL */ 7101 return (0); /* off original vnode */ 7102 } 7103 7104 /* 7105 * Now we know we have two anon pointers - check to 7106 * see if they happen to be properly allocated. 7107 */ 7108 7109 /* 7110 * XXX We cheat here and don't lock the anon slots. We can't because 7111 * we may have been called from the anon layer which might already 7112 * have locked them. We are holding a refcnt on the slots so they 7113 * can't disappear. The worst that will happen is we'll get the wrong 7114 * names (vp, off) for the slots and make a poor klustering decision. 7115 */ 7116 swap_xlate(ap, &vp1, &off1); 7117 swap_xlate(oap, &vp2, &off2); 7118 7119 7120 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta) 7121 return (-1); 7122 return (0); 7123 } 7124 7125 /* 7126 * Swap the pages of seg out to secondary storage, returning the 7127 * number of bytes of storage freed. 7128 * 7129 * The basic idea is first to unload all translations and then to call 7130 * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the 7131 * swap device. Pages to which other segments have mappings will remain 7132 * mapped and won't be swapped. Our caller (as_swapout) has already 7133 * performed the unloading step. 7134 * 7135 * The value returned is intended to correlate well with the process's 7136 * memory requirements. However, there are some caveats: 7137 * 1) When given a shared segment as argument, this routine will 7138 * only succeed in swapping out pages for the last sharer of the 7139 * segment. (Previous callers will only have decremented mapping 7140 * reference counts.) 7141 * 2) We assume that the hat layer maintains a large enough translation 7142 * cache to capture process reference patterns. 7143 */ 7144 static size_t 7145 segvn_swapout(struct seg *seg) 7146 { 7147 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7148 struct anon_map *amp; 7149 pgcnt_t pgcnt = 0; 7150 pgcnt_t npages; 7151 pgcnt_t page; 7152 ulong_t anon_index; 7153 7154 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7155 7156 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7157 /* 7158 * Find pages unmapped by our caller and force them 7159 * out to the virtual swap device. 7160 */ 7161 if ((amp = svd->amp) != NULL) 7162 anon_index = svd->anon_index; 7163 npages = seg->s_size >> PAGESHIFT; 7164 for (page = 0; page < npages; page++) { 7165 page_t *pp; 7166 struct anon *ap; 7167 struct vnode *vp; 7168 u_offset_t off; 7169 anon_sync_obj_t cookie; 7170 7171 /* 7172 * Obtain <vp, off> pair for the page, then look it up. 7173 * 7174 * Note that this code is willing to consider regular 7175 * pages as well as anon pages. Is this appropriate here? 7176 */ 7177 ap = NULL; 7178 if (amp != NULL) { 7179 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7180 if (anon_array_try_enter(amp, anon_index + page, 7181 &cookie)) { 7182 ANON_LOCK_EXIT(&->a_rwlock); 7183 continue; 7184 } 7185 ap = anon_get_ptr(amp->ahp, anon_index + page); 7186 if (ap != NULL) { 7187 swap_xlate(ap, &vp, &off); 7188 } else { 7189 vp = svd->vp; 7190 off = svd->offset + ptob(page); 7191 } 7192 anon_array_exit(&cookie); 7193 ANON_LOCK_EXIT(&->a_rwlock); 7194 } else { 7195 vp = svd->vp; 7196 off = svd->offset + ptob(page); 7197 } 7198 if (vp == NULL) { /* untouched zfod page */ 7199 ASSERT(ap == NULL); 7200 continue; 7201 } 7202 7203 pp = page_lookup_nowait(vp, off, SE_SHARED); 7204 if (pp == NULL) 7205 continue; 7206 7207 7208 /* 7209 * Examine the page to see whether it can be tossed out, 7210 * keeping track of how many we've found. 7211 */ 7212 if (!page_tryupgrade(pp)) { 7213 /* 7214 * If the page has an i/o lock and no mappings, 7215 * it's very likely that the page is being 7216 * written out as a result of klustering. 7217 * Assume this is so and take credit for it here. 7218 */ 7219 if (!page_io_trylock(pp)) { 7220 if (!hat_page_is_mapped(pp)) 7221 pgcnt++; 7222 } else { 7223 page_io_unlock(pp); 7224 } 7225 page_unlock(pp); 7226 continue; 7227 } 7228 ASSERT(!page_iolock_assert(pp)); 7229 7230 7231 /* 7232 * Skip if page is locked or has mappings. 7233 * We don't need the page_struct_lock to look at lckcnt 7234 * and cowcnt because the page is exclusive locked. 7235 */ 7236 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 7237 hat_page_is_mapped(pp)) { 7238 page_unlock(pp); 7239 continue; 7240 } 7241 7242 /* 7243 * dispose skips large pages so try to demote first. 7244 */ 7245 if (pp->p_szc != 0 && !page_try_demote_pages(pp)) { 7246 page_unlock(pp); 7247 /* 7248 * XXX should skip the remaining page_t's of this 7249 * large page. 7250 */ 7251 continue; 7252 } 7253 7254 ASSERT(pp->p_szc == 0); 7255 7256 /* 7257 * No longer mapped -- we can toss it out. How 7258 * we do so depends on whether or not it's dirty. 7259 */ 7260 if (hat_ismod(pp) && pp->p_vnode) { 7261 /* 7262 * We must clean the page before it can be 7263 * freed. Setting B_FREE will cause pvn_done 7264 * to free the page when the i/o completes. 7265 * XXX: This also causes it to be accounted 7266 * as a pageout instead of a swap: need 7267 * B_SWAPOUT bit to use instead of B_FREE. 7268 * 7269 * Hold the vnode before releasing the page lock 7270 * to prevent it from being freed and re-used by 7271 * some other thread. 7272 */ 7273 VN_HOLD(vp); 7274 page_unlock(pp); 7275 7276 /* 7277 * Queue all i/o requests for the pageout thread 7278 * to avoid saturating the pageout devices. 7279 */ 7280 if (!queue_io_request(vp, off)) 7281 VN_RELE(vp); 7282 } else { 7283 /* 7284 * The page was clean, free it. 7285 * 7286 * XXX: Can we ever encounter modified pages 7287 * with no associated vnode here? 7288 */ 7289 ASSERT(pp->p_vnode != NULL); 7290 /*LINTED: constant in conditional context*/ 7291 VN_DISPOSE(pp, B_FREE, 0, kcred); 7292 } 7293 7294 /* 7295 * Credit now even if i/o is in progress. 7296 */ 7297 pgcnt++; 7298 } 7299 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7300 7301 /* 7302 * Wakeup pageout to initiate i/o on all queued requests. 7303 */ 7304 cv_signal_pageout(); 7305 return (ptob(pgcnt)); 7306 } 7307 7308 /* 7309 * Synchronize primary storage cache with real object in virtual memory. 7310 * 7311 * XXX - Anonymous pages should not be sync'ed out at all. 7312 */ 7313 static int 7314 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) 7315 { 7316 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7317 struct vpage *vpp; 7318 page_t *pp; 7319 u_offset_t offset; 7320 struct vnode *vp; 7321 u_offset_t off; 7322 caddr_t eaddr; 7323 int bflags; 7324 int err = 0; 7325 int segtype; 7326 int pageprot; 7327 int prot; 7328 ulong_t anon_index; 7329 struct anon_map *amp; 7330 struct anon *ap; 7331 anon_sync_obj_t cookie; 7332 7333 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7334 7335 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7336 7337 if (svd->softlockcnt > 0) { 7338 /* 7339 * flush all pages from seg cache 7340 * otherwise we may deadlock in swap_putpage 7341 * for B_INVAL page (4175402). 7342 * 7343 * Even if we grab segvn WRITER's lock or segp_slock 7344 * here, there might be another thread which could've 7345 * successfully performed lookup/insert just before 7346 * we acquired the lock here. So, grabbing either 7347 * lock here is of not much use. Until we devise 7348 * a strategy at upper layers to solve the 7349 * synchronization issues completely, we expect 7350 * applications to handle this appropriately. 7351 */ 7352 segvn_purge(seg); 7353 if (svd->softlockcnt > 0) { 7354 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7355 return (EAGAIN); 7356 } 7357 } 7358 7359 vpp = svd->vpage; 7360 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7361 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) | 7362 ((flags & MS_INVALIDATE) ? B_INVAL : 0); 7363 7364 if (attr) { 7365 pageprot = attr & ~(SHARED|PRIVATE); 7366 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE; 7367 7368 /* 7369 * We are done if the segment types don't match 7370 * or if we have segment level protections and 7371 * they don't match. 7372 */ 7373 if (svd->type != segtype) { 7374 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7375 return (0); 7376 } 7377 if (vpp == NULL) { 7378 if (svd->prot != pageprot) { 7379 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7380 return (0); 7381 } 7382 prot = svd->prot; 7383 } else 7384 vpp = &svd->vpage[seg_page(seg, addr)]; 7385 7386 } else if (svd->vp && svd->amp == NULL && 7387 (flags & MS_INVALIDATE) == 0) { 7388 7389 /* 7390 * No attributes, no anonymous pages and MS_INVALIDATE flag 7391 * is not on, just use one big request. 7392 */ 7393 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len, 7394 bflags, svd->cred, NULL); 7395 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7396 return (err); 7397 } 7398 7399 if ((amp = svd->amp) != NULL) 7400 anon_index = svd->anon_index + seg_page(seg, addr); 7401 7402 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) { 7403 ap = NULL; 7404 if (amp != NULL) { 7405 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7406 anon_array_enter(amp, anon_index, &cookie); 7407 ap = anon_get_ptr(amp->ahp, anon_index++); 7408 if (ap != NULL) { 7409 swap_xlate(ap, &vp, &off); 7410 } else { 7411 vp = svd->vp; 7412 off = offset; 7413 } 7414 anon_array_exit(&cookie); 7415 ANON_LOCK_EXIT(&->a_rwlock); 7416 } else { 7417 vp = svd->vp; 7418 off = offset; 7419 } 7420 offset += PAGESIZE; 7421 7422 if (vp == NULL) /* untouched zfod page */ 7423 continue; 7424 7425 if (attr) { 7426 if (vpp) { 7427 prot = VPP_PROT(vpp); 7428 vpp++; 7429 } 7430 if (prot != pageprot) { 7431 continue; 7432 } 7433 } 7434 7435 /* 7436 * See if any of these pages are locked -- if so, then we 7437 * will have to truncate an invalidate request at the first 7438 * locked one. We don't need the page_struct_lock to test 7439 * as this is only advisory; even if we acquire it someone 7440 * might race in and lock the page after we unlock and before 7441 * we do the PUTPAGE, then PUTPAGE simply does nothing. 7442 */ 7443 if (flags & MS_INVALIDATE) { 7444 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) { 7445 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 7446 page_unlock(pp); 7447 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7448 return (EBUSY); 7449 } 7450 if (ap != NULL && pp->p_szc != 0 && 7451 page_tryupgrade(pp)) { 7452 if (pp->p_lckcnt == 0 && 7453 pp->p_cowcnt == 0) { 7454 /* 7455 * swapfs VN_DISPOSE() won't 7456 * invalidate large pages. 7457 * Attempt to demote. 7458 * XXX can't help it if it 7459 * fails. But for swapfs 7460 * pages it is no big deal. 7461 */ 7462 (void) page_try_demote_pages( 7463 pp); 7464 } 7465 } 7466 page_unlock(pp); 7467 } 7468 } else if (svd->type == MAP_SHARED && amp != NULL) { 7469 /* 7470 * Avoid writing out to disk ISM's large pages 7471 * because segspt_free_pages() relies on NULL an_pvp 7472 * of anon slots of such pages. 7473 */ 7474 7475 ASSERT(svd->vp == NULL); 7476 /* 7477 * swapfs uses page_lookup_nowait if not freeing or 7478 * invalidating and skips a page if 7479 * page_lookup_nowait returns NULL. 7480 */ 7481 pp = page_lookup_nowait(vp, off, SE_SHARED); 7482 if (pp == NULL) { 7483 continue; 7484 } 7485 if (pp->p_szc != 0) { 7486 page_unlock(pp); 7487 continue; 7488 } 7489 7490 /* 7491 * Note ISM pages are created large so (vp, off)'s 7492 * page cannot suddenly become large after we unlock 7493 * pp. 7494 */ 7495 page_unlock(pp); 7496 } 7497 /* 7498 * XXX - Should ultimately try to kluster 7499 * calls to VOP_PUTPAGE() for performance. 7500 */ 7501 VN_HOLD(vp); 7502 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 7503 bflags, svd->cred, NULL); 7504 VN_RELE(vp); 7505 if (err) 7506 break; 7507 } 7508 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7509 return (err); 7510 } 7511 7512 /* 7513 * Determine if we have data corresponding to pages in the 7514 * primary storage virtual memory cache (i.e., "in core"). 7515 */ 7516 static size_t 7517 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec) 7518 { 7519 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7520 struct vnode *vp, *avp; 7521 u_offset_t offset, aoffset; 7522 size_t p, ep; 7523 int ret; 7524 struct vpage *vpp; 7525 page_t *pp; 7526 uint_t start; 7527 struct anon_map *amp; /* XXX - for locknest */ 7528 struct anon *ap; 7529 uint_t attr; 7530 anon_sync_obj_t cookie; 7531 7532 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7533 7534 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7535 if (svd->amp == NULL && svd->vp == NULL) { 7536 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7537 bzero(vec, btopr(len)); 7538 return (len); /* no anonymous pages created yet */ 7539 } 7540 7541 p = seg_page(seg, addr); 7542 ep = seg_page(seg, addr + len); 7543 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0; 7544 7545 amp = svd->amp; 7546 for (; p < ep; p++, addr += PAGESIZE) { 7547 vpp = (svd->vpage) ? &svd->vpage[p]: NULL; 7548 ret = start; 7549 ap = NULL; 7550 avp = NULL; 7551 /* Grab the vnode/offset for the anon slot */ 7552 if (amp != NULL) { 7553 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7554 anon_array_enter(amp, svd->anon_index + p, &cookie); 7555 ap = anon_get_ptr(amp->ahp, svd->anon_index + p); 7556 if (ap != NULL) { 7557 swap_xlate(ap, &avp, &aoffset); 7558 } 7559 anon_array_exit(&cookie); 7560 ANON_LOCK_EXIT(&->a_rwlock); 7561 } 7562 if ((avp != NULL) && page_exists(avp, aoffset)) { 7563 /* A page exists for the anon slot */ 7564 ret |= SEG_PAGE_INCORE; 7565 7566 /* 7567 * If page is mapped and writable 7568 */ 7569 attr = (uint_t)0; 7570 if ((hat_getattr(seg->s_as->a_hat, addr, 7571 &attr) != -1) && (attr & PROT_WRITE)) { 7572 ret |= SEG_PAGE_ANON; 7573 } 7574 /* 7575 * Don't get page_struct lock for lckcnt and cowcnt, 7576 * since this is purely advisory. 7577 */ 7578 if ((pp = page_lookup_nowait(avp, aoffset, 7579 SE_SHARED)) != NULL) { 7580 if (pp->p_lckcnt) 7581 ret |= SEG_PAGE_SOFTLOCK; 7582 if (pp->p_cowcnt) 7583 ret |= SEG_PAGE_HASCOW; 7584 page_unlock(pp); 7585 } 7586 } 7587 7588 /* Gather vnode statistics */ 7589 vp = svd->vp; 7590 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7591 7592 if (vp != NULL) { 7593 /* 7594 * Try to obtain a "shared" lock on the page 7595 * without blocking. If this fails, determine 7596 * if the page is in memory. 7597 */ 7598 pp = page_lookup_nowait(vp, offset, SE_SHARED); 7599 if ((pp == NULL) && (page_exists(vp, offset))) { 7600 /* Page is incore, and is named */ 7601 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7602 } 7603 /* 7604 * Don't get page_struct lock for lckcnt and cowcnt, 7605 * since this is purely advisory. 7606 */ 7607 if (pp != NULL) { 7608 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7609 if (pp->p_lckcnt) 7610 ret |= SEG_PAGE_SOFTLOCK; 7611 if (pp->p_cowcnt) 7612 ret |= SEG_PAGE_HASCOW; 7613 page_unlock(pp); 7614 } 7615 } 7616 7617 /* Gather virtual page information */ 7618 if (vpp) { 7619 if (VPP_ISPPLOCK(vpp)) 7620 ret |= SEG_PAGE_LOCKED; 7621 vpp++; 7622 } 7623 7624 *vec++ = (char)ret; 7625 } 7626 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7627 return (len); 7628 } 7629 7630 /* 7631 * Statement for p_cowcnts/p_lckcnts. 7632 * 7633 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region 7634 * irrespective of the following factors or anything else: 7635 * 7636 * (1) anon slots are populated or not 7637 * (2) cow is broken or not 7638 * (3) refcnt on ap is 1 or greater than 1 7639 * 7640 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock 7641 * and munlock. 7642 * 7643 * 7644 * Handling p_cowcnts/p_lckcnts during copy-on-write fault: 7645 * 7646 * if vpage has PROT_WRITE 7647 * transfer cowcnt on the oldpage -> cowcnt on the newpage 7648 * else 7649 * transfer lckcnt on the oldpage -> lckcnt on the newpage 7650 * 7651 * During copy-on-write, decrement p_cowcnt on the oldpage and increment 7652 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE. 7653 * 7654 * We may also break COW if softlocking on read access in the physio case. 7655 * In this case, vpage may not have PROT_WRITE. So, we need to decrement 7656 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the 7657 * vpage doesn't have PROT_WRITE. 7658 * 7659 * 7660 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region: 7661 * 7662 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and 7663 * increment p_lckcnt by calling page_subclaim() which takes care of 7664 * availrmem accounting and p_lckcnt overflow. 7665 * 7666 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and 7667 * increment p_cowcnt by calling page_addclaim() which takes care of 7668 * availrmem availability and p_cowcnt overflow. 7669 */ 7670 7671 /* 7672 * Lock down (or unlock) pages mapped by this segment. 7673 * 7674 * XXX only creates PAGESIZE pages if anon slots are not initialized. 7675 * At fault time they will be relocated into larger pages. 7676 */ 7677 static int 7678 segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 7679 int attr, int op, ulong_t *lockmap, size_t pos) 7680 { 7681 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7682 struct vpage *vpp; 7683 struct vpage *evp; 7684 page_t *pp; 7685 u_offset_t offset; 7686 u_offset_t off; 7687 int segtype; 7688 int pageprot; 7689 int claim; 7690 struct vnode *vp; 7691 ulong_t anon_index; 7692 struct anon_map *amp; 7693 struct anon *ap; 7694 struct vattr va; 7695 anon_sync_obj_t cookie; 7696 struct kshmid *sp = NULL; 7697 struct proc *p = curproc; 7698 kproject_t *proj = NULL; 7699 int chargeproc = 1; 7700 size_t locked_bytes = 0; 7701 size_t unlocked_bytes = 0; 7702 int err = 0; 7703 7704 /* 7705 * Hold write lock on address space because may split or concatenate 7706 * segments 7707 */ 7708 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7709 7710 /* 7711 * If this is a shm, use shm's project and zone, else use 7712 * project and zone of calling process 7713 */ 7714 7715 /* Determine if this segment backs a sysV shm */ 7716 if (svd->amp != NULL && svd->amp->a_sp != NULL) { 7717 ASSERT(svd->type == MAP_SHARED); 7718 ASSERT(svd->tr_state == SEGVN_TR_OFF); 7719 sp = svd->amp->a_sp; 7720 proj = sp->shm_perm.ipc_proj; 7721 chargeproc = 0; 7722 } 7723 7724 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 7725 if (attr) { 7726 pageprot = attr & ~(SHARED|PRIVATE); 7727 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE; 7728 7729 /* 7730 * We are done if the segment types don't match 7731 * or if we have segment level protections and 7732 * they don't match. 7733 */ 7734 if (svd->type != segtype) { 7735 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7736 return (0); 7737 } 7738 if (svd->pageprot == 0 && svd->prot != pageprot) { 7739 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7740 return (0); 7741 } 7742 } 7743 7744 if (op == MC_LOCK) { 7745 if (svd->tr_state == SEGVN_TR_INIT) { 7746 svd->tr_state = SEGVN_TR_OFF; 7747 } else if (svd->tr_state == SEGVN_TR_ON) { 7748 ASSERT(svd->amp != NULL); 7749 segvn_textunrepl(seg, 0); 7750 ASSERT(svd->amp == NULL && 7751 svd->tr_state == SEGVN_TR_OFF); 7752 } 7753 } 7754 7755 /* 7756 * If we're locking, then we must create a vpage structure if 7757 * none exists. If we're unlocking, then check to see if there 7758 * is a vpage -- if not, then we could not have locked anything. 7759 */ 7760 7761 if ((vpp = svd->vpage) == NULL) { 7762 if (op == MC_LOCK) 7763 segvn_vpage(seg); 7764 else { 7765 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7766 return (0); 7767 } 7768 } 7769 7770 /* 7771 * The anonymous data vector (i.e., previously 7772 * unreferenced mapping to swap space) can be allocated 7773 * by lazily testing for its existence. 7774 */ 7775 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) { 7776 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 7777 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 7778 svd->amp->a_szc = seg->s_szc; 7779 } 7780 7781 if ((amp = svd->amp) != NULL) { 7782 anon_index = svd->anon_index + seg_page(seg, addr); 7783 } 7784 7785 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7786 evp = &svd->vpage[seg_page(seg, addr + len)]; 7787 7788 if (sp != NULL) 7789 mutex_enter(&sp->shm_mlock); 7790 7791 /* determine number of unlocked bytes in range for lock operation */ 7792 if (op == MC_LOCK) { 7793 7794 if (sp == NULL) { 7795 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7796 vpp++) { 7797 if (!VPP_ISPPLOCK(vpp)) 7798 unlocked_bytes += PAGESIZE; 7799 } 7800 } else { 7801 ulong_t i_idx, i_edx; 7802 anon_sync_obj_t i_cookie; 7803 struct anon *i_ap; 7804 struct vnode *i_vp; 7805 u_offset_t i_off; 7806 7807 /* Only count sysV pages once for locked memory */ 7808 i_edx = svd->anon_index + seg_page(seg, addr + len); 7809 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7810 for (i_idx = anon_index; i_idx < i_edx; i_idx++) { 7811 anon_array_enter(amp, i_idx, &i_cookie); 7812 i_ap = anon_get_ptr(amp->ahp, i_idx); 7813 if (i_ap == NULL) { 7814 unlocked_bytes += PAGESIZE; 7815 anon_array_exit(&i_cookie); 7816 continue; 7817 } 7818 swap_xlate(i_ap, &i_vp, &i_off); 7819 anon_array_exit(&i_cookie); 7820 pp = page_lookup(i_vp, i_off, SE_SHARED); 7821 if (pp == NULL) { 7822 unlocked_bytes += PAGESIZE; 7823 continue; 7824 } else if (pp->p_lckcnt == 0) 7825 unlocked_bytes += PAGESIZE; 7826 page_unlock(pp); 7827 } 7828 ANON_LOCK_EXIT(&->a_rwlock); 7829 } 7830 7831 mutex_enter(&p->p_lock); 7832 err = rctl_incr_locked_mem(p, proj, unlocked_bytes, 7833 chargeproc); 7834 mutex_exit(&p->p_lock); 7835 7836 if (err) { 7837 if (sp != NULL) 7838 mutex_exit(&sp->shm_mlock); 7839 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7840 return (err); 7841 } 7842 } 7843 /* 7844 * Loop over all pages in the range. Process if we're locking and 7845 * page has not already been locked in this mapping; or if we're 7846 * unlocking and the page has been locked. 7847 */ 7848 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7849 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) { 7850 if ((attr == 0 || VPP_PROT(vpp) == pageprot) && 7851 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) || 7852 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) { 7853 7854 if (amp != NULL) 7855 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7856 /* 7857 * If this isn't a MAP_NORESERVE segment and 7858 * we're locking, allocate anon slots if they 7859 * don't exist. The page is brought in later on. 7860 */ 7861 if (op == MC_LOCK && svd->vp == NULL && 7862 ((svd->flags & MAP_NORESERVE) == 0) && 7863 amp != NULL && 7864 ((ap = anon_get_ptr(amp->ahp, anon_index)) 7865 == NULL)) { 7866 anon_array_enter(amp, anon_index, &cookie); 7867 7868 if ((ap = anon_get_ptr(amp->ahp, 7869 anon_index)) == NULL) { 7870 pp = anon_zero(seg, addr, &ap, 7871 svd->cred); 7872 if (pp == NULL) { 7873 anon_array_exit(&cookie); 7874 ANON_LOCK_EXIT(&->a_rwlock); 7875 err = ENOMEM; 7876 goto out; 7877 } 7878 ASSERT(anon_get_ptr(amp->ahp, 7879 anon_index) == NULL); 7880 (void) anon_set_ptr(amp->ahp, 7881 anon_index, ap, ANON_SLEEP); 7882 page_unlock(pp); 7883 } 7884 anon_array_exit(&cookie); 7885 } 7886 7887 /* 7888 * Get name for page, accounting for 7889 * existence of private copy. 7890 */ 7891 ap = NULL; 7892 if (amp != NULL) { 7893 anon_array_enter(amp, anon_index, &cookie); 7894 ap = anon_get_ptr(amp->ahp, anon_index); 7895 if (ap != NULL) { 7896 swap_xlate(ap, &vp, &off); 7897 } else { 7898 if (svd->vp == NULL && 7899 (svd->flags & MAP_NORESERVE)) { 7900 anon_array_exit(&cookie); 7901 ANON_LOCK_EXIT(&->a_rwlock); 7902 continue; 7903 } 7904 vp = svd->vp; 7905 off = offset; 7906 } 7907 anon_array_exit(&cookie); 7908 ANON_LOCK_EXIT(&->a_rwlock); 7909 } else { 7910 vp = svd->vp; 7911 off = offset; 7912 } 7913 7914 /* 7915 * Get page frame. It's ok if the page is 7916 * not available when we're unlocking, as this 7917 * may simply mean that a page we locked got 7918 * truncated out of existence after we locked it. 7919 * 7920 * Invoke VOP_GETPAGE() to obtain the page struct 7921 * since we may need to read it from disk if its 7922 * been paged out. 7923 */ 7924 if (op != MC_LOCK) 7925 pp = page_lookup(vp, off, SE_SHARED); 7926 else { 7927 page_t *pl[1 + 1]; 7928 int error; 7929 7930 ASSERT(vp != NULL); 7931 7932 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 7933 (uint_t *)NULL, pl, PAGESIZE, seg, addr, 7934 S_OTHER, svd->cred, NULL); 7935 7936 /* 7937 * If the error is EDEADLK then we must bounce 7938 * up and drop all vm subsystem locks and then 7939 * retry the operation later 7940 * This behavior is a temporary measure because 7941 * ufs/sds logging is badly designed and will 7942 * deadlock if we don't allow this bounce to 7943 * happen. The real solution is to re-design 7944 * the logging code to work properly. See bug 7945 * 4125102 for details of the problem. 7946 */ 7947 if (error == EDEADLK) { 7948 err = error; 7949 goto out; 7950 } 7951 /* 7952 * Quit if we fail to fault in the page. Treat 7953 * the failure as an error, unless the addr 7954 * is mapped beyond the end of a file. 7955 */ 7956 if (error && svd->vp) { 7957 va.va_mask = AT_SIZE; 7958 if (VOP_GETATTR(svd->vp, &va, 0, 7959 svd->cred, NULL) != 0) { 7960 err = EIO; 7961 goto out; 7962 } 7963 if (btopr(va.va_size) >= 7964 btopr(off + 1)) { 7965 err = EIO; 7966 goto out; 7967 } 7968 goto out; 7969 7970 } else if (error) { 7971 err = EIO; 7972 goto out; 7973 } 7974 pp = pl[0]; 7975 ASSERT(pp != NULL); 7976 } 7977 7978 /* 7979 * See Statement at the beginning of this routine. 7980 * 7981 * claim is always set if MAP_PRIVATE and PROT_WRITE 7982 * irrespective of following factors: 7983 * 7984 * (1) anon slots are populated or not 7985 * (2) cow is broken or not 7986 * (3) refcnt on ap is 1 or greater than 1 7987 * 7988 * See 4140683 for details 7989 */ 7990 claim = ((VPP_PROT(vpp) & PROT_WRITE) && 7991 (svd->type == MAP_PRIVATE)); 7992 7993 /* 7994 * Perform page-level operation appropriate to 7995 * operation. If locking, undo the SOFTLOCK 7996 * performed to bring the page into memory 7997 * after setting the lock. If unlocking, 7998 * and no page was found, account for the claim 7999 * separately. 8000 */ 8001 if (op == MC_LOCK) { 8002 int ret = 1; /* Assume success */ 8003 8004 ASSERT(!VPP_ISPPLOCK(vpp)); 8005 8006 ret = page_pp_lock(pp, claim, 0); 8007 if (ret == 0) { 8008 /* locking page failed */ 8009 page_unlock(pp); 8010 err = EAGAIN; 8011 goto out; 8012 } 8013 VPP_SETPPLOCK(vpp); 8014 if (sp != NULL) { 8015 if (pp->p_lckcnt == 1) 8016 locked_bytes += PAGESIZE; 8017 } else 8018 locked_bytes += PAGESIZE; 8019 8020 if (lockmap != (ulong_t *)NULL) 8021 BT_SET(lockmap, pos); 8022 8023 page_unlock(pp); 8024 } else { 8025 ASSERT(VPP_ISPPLOCK(vpp)); 8026 if (pp != NULL) { 8027 /* sysV pages should be locked */ 8028 ASSERT(sp == NULL || pp->p_lckcnt > 0); 8029 page_pp_unlock(pp, claim, 0); 8030 if (sp != NULL) { 8031 if (pp->p_lckcnt == 0) 8032 unlocked_bytes 8033 += PAGESIZE; 8034 } else 8035 unlocked_bytes += PAGESIZE; 8036 page_unlock(pp); 8037 } else { 8038 ASSERT(sp == NULL); 8039 unlocked_bytes += PAGESIZE; 8040 } 8041 VPP_CLRPPLOCK(vpp); 8042 } 8043 } 8044 } 8045 out: 8046 if (op == MC_LOCK) { 8047 /* Credit back bytes that did not get locked */ 8048 if ((unlocked_bytes - locked_bytes) > 0) { 8049 if (proj == NULL) 8050 mutex_enter(&p->p_lock); 8051 rctl_decr_locked_mem(p, proj, 8052 (unlocked_bytes - locked_bytes), chargeproc); 8053 if (proj == NULL) 8054 mutex_exit(&p->p_lock); 8055 } 8056 8057 } else { 8058 /* Account bytes that were unlocked */ 8059 if (unlocked_bytes > 0) { 8060 if (proj == NULL) 8061 mutex_enter(&p->p_lock); 8062 rctl_decr_locked_mem(p, proj, unlocked_bytes, 8063 chargeproc); 8064 if (proj == NULL) 8065 mutex_exit(&p->p_lock); 8066 } 8067 } 8068 if (sp != NULL) 8069 mutex_exit(&sp->shm_mlock); 8070 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8071 8072 return (err); 8073 } 8074 8075 /* 8076 * Set advice from user for specified pages 8077 * There are 5 types of advice: 8078 * MADV_NORMAL - Normal (default) behavior (whatever that is) 8079 * MADV_RANDOM - Random page references 8080 * do not allow readahead or 'klustering' 8081 * MADV_SEQUENTIAL - Sequential page references 8082 * Pages previous to the one currently being 8083 * accessed (determined by fault) are 'not needed' 8084 * and are freed immediately 8085 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl) 8086 * MADV_DONTNEED - Pages are not needed (synced out in mctl) 8087 * MADV_FREE - Contents can be discarded 8088 * MADV_ACCESS_DEFAULT- Default access 8089 * MADV_ACCESS_LWP - Next LWP will access heavily 8090 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily 8091 */ 8092 static int 8093 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 8094 { 8095 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8096 size_t page; 8097 int err = 0; 8098 int already_set; 8099 struct anon_map *amp; 8100 ulong_t anon_index; 8101 struct seg *next; 8102 lgrp_mem_policy_t policy; 8103 struct seg *prev; 8104 struct vnode *vp; 8105 8106 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8107 8108 /* 8109 * In case of MADV_FREE, we won't be modifying any segment private 8110 * data structures; so, we only need to grab READER's lock 8111 */ 8112 if (behav != MADV_FREE) { 8113 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 8114 if (svd->tr_state != SEGVN_TR_OFF) { 8115 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8116 return (0); 8117 } 8118 } else { 8119 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8120 } 8121 8122 /* 8123 * Large pages are assumed to be only turned on when accesses to the 8124 * segment's address range have spatial and temporal locality. That 8125 * justifies ignoring MADV_SEQUENTIAL for large page segments. 8126 * Also, ignore advice affecting lgroup memory allocation 8127 * if don't need to do lgroup optimizations on this system 8128 */ 8129 8130 if ((behav == MADV_SEQUENTIAL && 8131 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) || 8132 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT || 8133 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) { 8134 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8135 return (0); 8136 } 8137 8138 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT || 8139 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) { 8140 /* 8141 * Since we are going to unload hat mappings 8142 * we first have to flush the cache. Otherwise 8143 * this might lead to system panic if another 8144 * thread is doing physio on the range whose 8145 * mappings are unloaded by madvise(3C). 8146 */ 8147 if (svd->softlockcnt > 0) { 8148 /* 8149 * Since we do have the segvn writers lock 8150 * nobody can fill the cache with entries 8151 * belonging to this seg during the purge. 8152 * The flush either succeeds or we still 8153 * have pending I/Os. In the later case, 8154 * madvise(3C) fails. 8155 */ 8156 segvn_purge(seg); 8157 if (svd->softlockcnt > 0) { 8158 /* 8159 * Since madvise(3C) is advisory and 8160 * it's not part of UNIX98, madvise(3C) 8161 * failure here doesn't cause any hardship. 8162 * Note that we don't block in "as" layer. 8163 */ 8164 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8165 return (EAGAIN); 8166 } 8167 } 8168 } 8169 8170 amp = svd->amp; 8171 vp = svd->vp; 8172 if (behav == MADV_FREE) { 8173 /* 8174 * MADV_FREE is not supported for segments with 8175 * underlying object; if anonmap is NULL, anon slots 8176 * are not yet populated and there is nothing for 8177 * us to do. As MADV_FREE is advisory, we don't 8178 * return error in either case. 8179 */ 8180 if (vp != NULL || amp == NULL) { 8181 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8182 return (0); 8183 } 8184 8185 page = seg_page(seg, addr); 8186 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8187 anon_disclaim(amp, svd->anon_index + page, len); 8188 ANON_LOCK_EXIT(&->a_rwlock); 8189 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8190 return (0); 8191 } 8192 8193 /* 8194 * If advice is to be applied to entire segment, 8195 * use advice field in seg_data structure 8196 * otherwise use appropriate vpage entry. 8197 */ 8198 if ((addr == seg->s_base) && (len == seg->s_size)) { 8199 switch (behav) { 8200 case MADV_ACCESS_LWP: 8201 case MADV_ACCESS_MANY: 8202 case MADV_ACCESS_DEFAULT: 8203 /* 8204 * Set memory allocation policy for this segment 8205 */ 8206 policy = lgrp_madv_to_policy(behav, len, svd->type); 8207 if (svd->type == MAP_SHARED) 8208 already_set = lgrp_shm_policy_set(policy, amp, 8209 svd->anon_index, vp, svd->offset, len); 8210 else { 8211 /* 8212 * For private memory, need writers lock on 8213 * address space because the segment may be 8214 * split or concatenated when changing policy 8215 */ 8216 if (AS_READ_HELD(seg->s_as, 8217 &seg->s_as->a_lock)) { 8218 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8219 return (IE_RETRY); 8220 } 8221 8222 already_set = lgrp_privm_policy_set(policy, 8223 &svd->policy_info, len); 8224 } 8225 8226 /* 8227 * If policy set already and it shouldn't be reapplied, 8228 * don't do anything. 8229 */ 8230 if (already_set && 8231 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8232 break; 8233 8234 /* 8235 * Mark any existing pages in given range for 8236 * migration 8237 */ 8238 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8239 vp, svd->offset, 1); 8240 8241 /* 8242 * If same policy set already or this is a shared 8243 * memory segment, don't need to try to concatenate 8244 * segment with adjacent ones. 8245 */ 8246 if (already_set || svd->type == MAP_SHARED) 8247 break; 8248 8249 /* 8250 * Try to concatenate this segment with previous 8251 * one and next one, since we changed policy for 8252 * this one and it may be compatible with adjacent 8253 * ones now. 8254 */ 8255 prev = AS_SEGPREV(seg->s_as, seg); 8256 next = AS_SEGNEXT(seg->s_as, seg); 8257 8258 if (next && next->s_ops == &segvn_ops && 8259 addr + len == next->s_base) 8260 (void) segvn_concat(seg, next, 1); 8261 8262 if (prev && prev->s_ops == &segvn_ops && 8263 addr == prev->s_base + prev->s_size) { 8264 /* 8265 * Drop lock for private data of current 8266 * segment before concatenating (deleting) it 8267 * and return IE_REATTACH to tell as_ctl() that 8268 * current segment has changed 8269 */ 8270 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8271 if (!segvn_concat(prev, seg, 1)) 8272 err = IE_REATTACH; 8273 8274 return (err); 8275 } 8276 break; 8277 8278 case MADV_SEQUENTIAL: 8279 /* 8280 * unloading mapping guarantees 8281 * detection in segvn_fault 8282 */ 8283 ASSERT(seg->s_szc == 0); 8284 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8285 hat_unload(seg->s_as->a_hat, addr, len, 8286 HAT_UNLOAD); 8287 /* FALLTHROUGH */ 8288 case MADV_NORMAL: 8289 case MADV_RANDOM: 8290 svd->advice = (uchar_t)behav; 8291 svd->pageadvice = 0; 8292 break; 8293 case MADV_WILLNEED: /* handled in memcntl */ 8294 case MADV_DONTNEED: /* handled in memcntl */ 8295 case MADV_FREE: /* handled above */ 8296 break; 8297 default: 8298 err = EINVAL; 8299 } 8300 } else { 8301 caddr_t eaddr; 8302 struct seg *new_seg; 8303 struct segvn_data *new_svd; 8304 u_offset_t off; 8305 caddr_t oldeaddr; 8306 8307 page = seg_page(seg, addr); 8308 8309 segvn_vpage(seg); 8310 8311 switch (behav) { 8312 struct vpage *bvpp, *evpp; 8313 8314 case MADV_ACCESS_LWP: 8315 case MADV_ACCESS_MANY: 8316 case MADV_ACCESS_DEFAULT: 8317 /* 8318 * Set memory allocation policy for portion of this 8319 * segment 8320 */ 8321 8322 /* 8323 * Align address and length of advice to page 8324 * boundaries for large pages 8325 */ 8326 if (seg->s_szc != 0) { 8327 size_t pgsz; 8328 8329 pgsz = page_get_pagesize(seg->s_szc); 8330 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 8331 len = P2ROUNDUP(len, pgsz); 8332 } 8333 8334 /* 8335 * Check to see whether policy is set already 8336 */ 8337 policy = lgrp_madv_to_policy(behav, len, svd->type); 8338 8339 anon_index = svd->anon_index + page; 8340 off = svd->offset + (uintptr_t)(addr - seg->s_base); 8341 8342 if (svd->type == MAP_SHARED) 8343 already_set = lgrp_shm_policy_set(policy, amp, 8344 anon_index, vp, off, len); 8345 else 8346 already_set = 8347 (policy == svd->policy_info.mem_policy); 8348 8349 /* 8350 * If policy set already and it shouldn't be reapplied, 8351 * don't do anything. 8352 */ 8353 if (already_set && 8354 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8355 break; 8356 8357 /* 8358 * For private memory, need writers lock on 8359 * address space because the segment may be 8360 * split or concatenated when changing policy 8361 */ 8362 if (svd->type == MAP_PRIVATE && 8363 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) { 8364 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8365 return (IE_RETRY); 8366 } 8367 8368 /* 8369 * Mark any existing pages in given range for 8370 * migration 8371 */ 8372 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8373 vp, svd->offset, 1); 8374 8375 /* 8376 * Don't need to try to split or concatenate 8377 * segments, since policy is same or this is a shared 8378 * memory segment 8379 */ 8380 if (already_set || svd->type == MAP_SHARED) 8381 break; 8382 8383 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 8384 ASSERT(svd->amp == NULL); 8385 ASSERT(svd->tr_state == SEGVN_TR_OFF); 8386 ASSERT(svd->softlockcnt == 0); 8387 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 8388 HAT_REGION_TEXT); 8389 svd->rcookie = HAT_INVALID_REGION_COOKIE; 8390 } 8391 8392 /* 8393 * Split off new segment if advice only applies to a 8394 * portion of existing segment starting in middle 8395 */ 8396 new_seg = NULL; 8397 eaddr = addr + len; 8398 oldeaddr = seg->s_base + seg->s_size; 8399 if (addr > seg->s_base) { 8400 /* 8401 * Must flush I/O page cache 8402 * before splitting segment 8403 */ 8404 if (svd->softlockcnt > 0) 8405 segvn_purge(seg); 8406 8407 /* 8408 * Split segment and return IE_REATTACH to tell 8409 * as_ctl() that current segment changed 8410 */ 8411 new_seg = segvn_split_seg(seg, addr); 8412 new_svd = (struct segvn_data *)new_seg->s_data; 8413 err = IE_REATTACH; 8414 8415 /* 8416 * If new segment ends where old one 8417 * did, try to concatenate the new 8418 * segment with next one. 8419 */ 8420 if (eaddr == oldeaddr) { 8421 /* 8422 * Set policy for new segment 8423 */ 8424 (void) lgrp_privm_policy_set(policy, 8425 &new_svd->policy_info, 8426 new_seg->s_size); 8427 8428 next = AS_SEGNEXT(new_seg->s_as, 8429 new_seg); 8430 8431 if (next && 8432 next->s_ops == &segvn_ops && 8433 eaddr == next->s_base) 8434 (void) segvn_concat(new_seg, 8435 next, 1); 8436 } 8437 } 8438 8439 /* 8440 * Split off end of existing segment if advice only 8441 * applies to a portion of segment ending before 8442 * end of the existing segment 8443 */ 8444 if (eaddr < oldeaddr) { 8445 /* 8446 * Must flush I/O page cache 8447 * before splitting segment 8448 */ 8449 if (svd->softlockcnt > 0) 8450 segvn_purge(seg); 8451 8452 /* 8453 * If beginning of old segment was already 8454 * split off, use new segment to split end off 8455 * from. 8456 */ 8457 if (new_seg != NULL && new_seg != seg) { 8458 /* 8459 * Split segment 8460 */ 8461 (void) segvn_split_seg(new_seg, eaddr); 8462 8463 /* 8464 * Set policy for new segment 8465 */ 8466 (void) lgrp_privm_policy_set(policy, 8467 &new_svd->policy_info, 8468 new_seg->s_size); 8469 } else { 8470 /* 8471 * Split segment and return IE_REATTACH 8472 * to tell as_ctl() that current 8473 * segment changed 8474 */ 8475 (void) segvn_split_seg(seg, eaddr); 8476 err = IE_REATTACH; 8477 8478 (void) lgrp_privm_policy_set(policy, 8479 &svd->policy_info, seg->s_size); 8480 8481 /* 8482 * If new segment starts where old one 8483 * did, try to concatenate it with 8484 * previous segment. 8485 */ 8486 if (addr == seg->s_base) { 8487 prev = AS_SEGPREV(seg->s_as, 8488 seg); 8489 8490 /* 8491 * Drop lock for private data 8492 * of current segment before 8493 * concatenating (deleting) it 8494 */ 8495 if (prev && 8496 prev->s_ops == 8497 &segvn_ops && 8498 addr == prev->s_base + 8499 prev->s_size) { 8500 SEGVN_LOCK_EXIT( 8501 seg->s_as, 8502 &svd->lock); 8503 (void) segvn_concat( 8504 prev, seg, 1); 8505 return (err); 8506 } 8507 } 8508 } 8509 } 8510 break; 8511 case MADV_SEQUENTIAL: 8512 ASSERT(seg->s_szc == 0); 8513 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8514 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 8515 /* FALLTHROUGH */ 8516 case MADV_NORMAL: 8517 case MADV_RANDOM: 8518 bvpp = &svd->vpage[page]; 8519 evpp = &svd->vpage[page + (len >> PAGESHIFT)]; 8520 for (; bvpp < evpp; bvpp++) 8521 VPP_SETADVICE(bvpp, behav); 8522 svd->advice = MADV_NORMAL; 8523 break; 8524 case MADV_WILLNEED: /* handled in memcntl */ 8525 case MADV_DONTNEED: /* handled in memcntl */ 8526 case MADV_FREE: /* handled above */ 8527 break; 8528 default: 8529 err = EINVAL; 8530 } 8531 } 8532 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8533 return (err); 8534 } 8535 8536 /* 8537 * Create a vpage structure for this seg. 8538 */ 8539 static void 8540 segvn_vpage(struct seg *seg) 8541 { 8542 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8543 struct vpage *vp, *evp; 8544 8545 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 8546 8547 /* 8548 * If no vpage structure exists, allocate one. Copy the protections 8549 * and the advice from the segment itself to the individual pages. 8550 */ 8551 if (svd->vpage == NULL) { 8552 svd->pageadvice = 1; 8553 svd->vpage = kmem_zalloc(seg_pages(seg) * sizeof (struct vpage), 8554 KM_SLEEP); 8555 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 8556 for (vp = svd->vpage; vp < evp; vp++) { 8557 VPP_SETPROT(vp, svd->prot); 8558 VPP_SETADVICE(vp, svd->advice); 8559 } 8560 } 8561 } 8562 8563 /* 8564 * Dump the pages belonging to this segvn segment. 8565 */ 8566 static void 8567 segvn_dump(struct seg *seg) 8568 { 8569 struct segvn_data *svd; 8570 page_t *pp; 8571 struct anon_map *amp; 8572 ulong_t anon_index; 8573 struct vnode *vp; 8574 u_offset_t off, offset; 8575 pfn_t pfn; 8576 pgcnt_t page, npages; 8577 caddr_t addr; 8578 8579 npages = seg_pages(seg); 8580 svd = (struct segvn_data *)seg->s_data; 8581 vp = svd->vp; 8582 off = offset = svd->offset; 8583 addr = seg->s_base; 8584 8585 if ((amp = svd->amp) != NULL) { 8586 anon_index = svd->anon_index; 8587 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8588 } 8589 8590 for (page = 0; page < npages; page++, offset += PAGESIZE) { 8591 struct anon *ap; 8592 int we_own_it = 0; 8593 8594 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) { 8595 swap_xlate_nopanic(ap, &vp, &off); 8596 } else { 8597 vp = svd->vp; 8598 off = offset; 8599 } 8600 8601 /* 8602 * If pp == NULL, the page either does not exist 8603 * or is exclusively locked. So determine if it 8604 * exists before searching for it. 8605 */ 8606 8607 if ((pp = page_lookup_nowait(vp, off, SE_SHARED))) 8608 we_own_it = 1; 8609 else 8610 pp = page_exists(vp, off); 8611 8612 if (pp) { 8613 pfn = page_pptonum(pp); 8614 dump_addpage(seg->s_as, addr, pfn); 8615 if (we_own_it) 8616 page_unlock(pp); 8617 } 8618 addr += PAGESIZE; 8619 dump_timeleft = dump_timeout; 8620 } 8621 8622 if (amp != NULL) 8623 ANON_LOCK_EXIT(&->a_rwlock); 8624 } 8625 8626 /* 8627 * lock/unlock anon pages over a given range. Return shadow list 8628 */ 8629 static int 8630 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp, 8631 enum lock_type type, enum seg_rw rw) 8632 { 8633 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8634 size_t np, adjustpages = 0, npages = (len >> PAGESHIFT); 8635 ulong_t anon_index; 8636 uint_t protchk; 8637 uint_t error; 8638 struct anon_map *amp; 8639 struct page **pplist, **pl, *pp; 8640 caddr_t a; 8641 size_t page; 8642 caddr_t lpgaddr, lpgeaddr; 8643 pgcnt_t szc0_npages = 0; 8644 8645 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START, 8646 "segvn_pagelock: start seg %p addr %p", seg, addr); 8647 8648 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8649 if (seg->s_szc != 0 && (type == L_PAGELOCK || type == L_PAGEUNLOCK)) { 8650 /* 8651 * We are adjusting the pagelock region to the large page size 8652 * boundary because the unlocked part of a large page cannot 8653 * be freed anyway unless all constituent pages of a large 8654 * page are locked. Therefore this adjustment allows us to 8655 * decrement availrmem by the right value (note we don't want 8656 * to just decrement availrem by the large page size without 8657 * adjusting addr and len because then we may end up 8658 * decrementing availrmem by large page size for every 8659 * constituent page locked by a new as_pagelock call). 8660 * as_pageunlock caller must always match as_pagelock call's 8661 * addr and len. 8662 * 8663 * Note segment's page size cannot change while we are holding 8664 * as lock. And then it cannot change while softlockcnt is 8665 * not 0. This will allow us to correctly recalculate large 8666 * page size region for the matching pageunlock/reclaim call. 8667 * 8668 * for pageunlock *ppp points to the pointer of page_t that 8669 * corresponds to the real unadjusted start address. Similar 8670 * for pagelock *ppp must point to the pointer of page_t that 8671 * corresponds to the real unadjusted start address. 8672 */ 8673 size_t pgsz = page_get_pagesize(seg->s_szc); 8674 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 8675 adjustpages = ((uintptr_t)(addr - lpgaddr)) >> PAGESHIFT; 8676 } 8677 8678 if (type == L_PAGEUNLOCK) { 8679 8680 /* 8681 * update hat ref bits for /proc. We need to make sure 8682 * that threads tracing the ref and mod bits of the 8683 * address space get the right data. 8684 * Note: page ref and mod bits are updated at reclaim time 8685 */ 8686 if (seg->s_as->a_vbits) { 8687 for (a = addr; a < addr + len; a += PAGESIZE) { 8688 if (rw == S_WRITE) { 8689 hat_setstat(seg->s_as, a, 8690 PAGESIZE, P_REF | P_MOD); 8691 } else { 8692 hat_setstat(seg->s_as, a, 8693 PAGESIZE, P_REF); 8694 } 8695 } 8696 } 8697 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8698 if (seg->s_szc != 0) { 8699 VM_STAT_ADD(segvnvmstats.pagelock[0]); 8700 seg_pinactive(seg, lpgaddr, lpgeaddr - lpgaddr, 8701 *ppp - adjustpages, rw, segvn_reclaim); 8702 } else { 8703 seg_pinactive(seg, addr, len, *ppp, rw, segvn_reclaim); 8704 } 8705 8706 /* 8707 * If someone is blocked while unmapping, we purge 8708 * segment page cache and thus reclaim pplist synchronously 8709 * without waiting for seg_pasync_thread. This speeds up 8710 * unmapping in cases where munmap(2) is called, while 8711 * raw async i/o is still in progress or where a thread 8712 * exits on data fault in a multithreaded application. 8713 */ 8714 if (AS_ISUNMAPWAIT(seg->s_as) && (svd->softlockcnt > 0)) { 8715 /* 8716 * Even if we grab segvn WRITER's lock or segp_slock 8717 * here, there might be another thread which could've 8718 * successfully performed lookup/insert just before 8719 * we acquired the lock here. So, grabbing either 8720 * lock here is of not much use. Until we devise 8721 * a strategy at upper layers to solve the 8722 * synchronization issues completely, we expect 8723 * applications to handle this appropriately. 8724 */ 8725 segvn_purge(seg); 8726 } 8727 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8728 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END, 8729 "segvn_pagelock: unlock seg %p addr %p", seg, addr); 8730 return (0); 8731 } else if (type == L_PAGERECLAIM) { 8732 VM_STAT_COND_ADD(seg->s_szc != 0, segvnvmstats.pagelock[1]); 8733 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8734 (void) segvn_reclaim(seg, addr, len, *ppp, rw); 8735 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8736 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END, 8737 "segvn_pagelock: reclaim seg %p addr %p", seg, addr); 8738 return (0); 8739 } 8740 8741 if (seg->s_szc != 0) { 8742 VM_STAT_ADD(segvnvmstats.pagelock[2]); 8743 addr = lpgaddr; 8744 len = lpgeaddr - lpgaddr; 8745 npages = (len >> PAGESHIFT); 8746 } 8747 8748 /* 8749 * for now we only support pagelock to anon memory. We've to check 8750 * protections for vnode objects and call into the vnode driver. 8751 * That's too much for a fast path. Let the fault entry point handle it. 8752 */ 8753 if (svd->vp != NULL) { 8754 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END, 8755 "segvn_pagelock: mapped vnode seg %p addr %p", seg, addr); 8756 *ppp = NULL; 8757 return (ENOTSUP); 8758 } 8759 8760 /* 8761 * if anonmap is not yet created, let the fault entry point populate it 8762 * with anon ptrs. 8763 */ 8764 if ((amp = svd->amp) == NULL) { 8765 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END, 8766 "segvn_pagelock: anonmap null seg %p addr %p", seg, addr); 8767 *ppp = NULL; 8768 return (EFAULT); 8769 } 8770 8771 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8772 8773 /* 8774 * we acquire segp_slock to prevent duplicate entries 8775 * in seg_pcache 8776 */ 8777 mutex_enter(&svd->segp_slock); 8778 8779 /* 8780 * try to find pages in segment page cache 8781 */ 8782 pplist = seg_plookup(seg, addr, len, rw); 8783 if (pplist != NULL) { 8784 mutex_exit(&svd->segp_slock); 8785 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8786 *ppp = pplist + adjustpages; 8787 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END, 8788 "segvn_pagelock: cache hit seg %p addr %p", seg, addr); 8789 return (0); 8790 } 8791 8792 if (rw == S_READ) { 8793 protchk = PROT_READ; 8794 } else { 8795 protchk = PROT_WRITE; 8796 } 8797 8798 if (svd->pageprot == 0) { 8799 if ((svd->prot & protchk) == 0) { 8800 mutex_exit(&svd->segp_slock); 8801 error = EFAULT; 8802 goto out; 8803 } 8804 } else { 8805 /* 8806 * check page protections 8807 */ 8808 for (a = addr; a < addr + len; a += PAGESIZE) { 8809 struct vpage *vp; 8810 8811 vp = &svd->vpage[seg_page(seg, a)]; 8812 if ((VPP_PROT(vp) & protchk) == 0) { 8813 mutex_exit(&svd->segp_slock); 8814 error = EFAULT; 8815 goto out; 8816 } 8817 } 8818 } 8819 8820 /* 8821 * Avoid per page overhead of segvn_slock_anonpages() for small 8822 * pages. For large pages segvn_slock_anonpages() only does real 8823 * work once per large page. The tradeoff is that we may decrement 8824 * availrmem more than once for the same page but this is ok 8825 * for small pages. 8826 */ 8827 if (seg->s_szc == 0) { 8828 mutex_enter(&freemem_lock); 8829 if (availrmem < tune.t_minarmem + npages) { 8830 mutex_exit(&freemem_lock); 8831 mutex_exit(&svd->segp_slock); 8832 error = ENOMEM; 8833 goto out; 8834 } 8835 availrmem -= npages; 8836 mutex_exit(&freemem_lock); 8837 } 8838 8839 pplist = kmem_alloc(sizeof (page_t *) * npages, KM_SLEEP); 8840 pl = pplist; 8841 *ppp = pplist + adjustpages; 8842 8843 page = seg_page(seg, addr); 8844 anon_index = svd->anon_index + page; 8845 8846 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8847 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) { 8848 struct anon *ap; 8849 struct vnode *vp; 8850 u_offset_t off; 8851 anon_sync_obj_t cookie; 8852 8853 anon_array_enter(amp, anon_index, &cookie); 8854 ap = anon_get_ptr(amp->ahp, anon_index); 8855 if (ap == NULL) { 8856 anon_array_exit(&cookie); 8857 break; 8858 } else { 8859 /* 8860 * We must never use seg_pcache for COW pages 8861 * because we might end up with original page still 8862 * lying in seg_pcache even after private page is 8863 * created. This leads to data corruption as 8864 * aio_write refers to the page still in cache 8865 * while all other accesses refer to the private 8866 * page. 8867 */ 8868 if (ap->an_refcnt != 1) { 8869 anon_array_exit(&cookie); 8870 break; 8871 } 8872 } 8873 swap_xlate(ap, &vp, &off); 8874 anon_array_exit(&cookie); 8875 8876 pp = page_lookup_nowait(vp, off, SE_SHARED); 8877 if (pp == NULL) { 8878 break; 8879 } 8880 if (seg->s_szc != 0 || pp->p_szc != 0) { 8881 if (!segvn_slock_anonpages(pp, a == addr)) { 8882 page_unlock(pp); 8883 break; 8884 } 8885 } else { 8886 szc0_npages++; 8887 } 8888 *pplist++ = pp; 8889 } 8890 ANON_LOCK_EXIT(&->a_rwlock); 8891 8892 ASSERT(npages >= szc0_npages); 8893 8894 if (a >= addr + len) { 8895 mutex_enter(&freemem_lock); 8896 if (seg->s_szc == 0 && npages != szc0_npages) { 8897 ASSERT(svd->type == MAP_SHARED && amp->a_szc > 0); 8898 availrmem += (npages - szc0_npages); 8899 } 8900 svd->softlockcnt += npages; 8901 segvn_pages_locked += npages; 8902 mutex_exit(&freemem_lock); 8903 (void) seg_pinsert(seg, addr, len, pl, rw, SEGP_ASYNC_FLUSH, 8904 segvn_reclaim); 8905 mutex_exit(&svd->segp_slock); 8906 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8907 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END, 8908 "segvn_pagelock: cache fill seg %p addr %p", seg, addr); 8909 return (0); 8910 } 8911 8912 mutex_exit(&svd->segp_slock); 8913 if (seg->s_szc == 0) { 8914 mutex_enter(&freemem_lock); 8915 availrmem += npages; 8916 mutex_exit(&freemem_lock); 8917 } 8918 error = EFAULT; 8919 pplist = pl; 8920 np = ((uintptr_t)(a - addr)) >> PAGESHIFT; 8921 while (np > (uint_t)0) { 8922 ASSERT(PAGE_LOCKED(*pplist)); 8923 if (seg->s_szc != 0 || (*pplist)->p_szc != 0) { 8924 segvn_sunlock_anonpages(*pplist, pplist == pl); 8925 } 8926 page_unlock(*pplist); 8927 np--; 8928 pplist++; 8929 } 8930 kmem_free(pl, sizeof (page_t *) * npages); 8931 out: 8932 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8933 *ppp = NULL; 8934 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END, 8935 "segvn_pagelock: cache miss seg %p addr %p", seg, addr); 8936 return (error); 8937 } 8938 8939 /* 8940 * purge any cached pages in the I/O page cache 8941 */ 8942 static void 8943 segvn_purge(struct seg *seg) 8944 { 8945 seg_ppurge(seg); 8946 } 8947 8948 static int 8949 segvn_reclaim(struct seg *seg, caddr_t addr, size_t len, struct page **pplist, 8950 enum seg_rw rw) 8951 { 8952 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8953 pgcnt_t np, npages; 8954 struct page **pl; 8955 pgcnt_t szc0_npages = 0; 8956 8957 #ifdef lint 8958 addr = addr; 8959 #endif 8960 8961 npages = np = (len >> PAGESHIFT); 8962 ASSERT(npages); 8963 pl = pplist; 8964 if (seg->s_szc != 0) { 8965 size_t pgsz = page_get_pagesize(seg->s_szc); 8966 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 8967 panic("segvn_reclaim: unaligned addr or len"); 8968 /*NOTREACHED*/ 8969 } 8970 } 8971 8972 ASSERT(svd->vp == NULL && svd->amp != NULL); 8973 8974 while (np > (uint_t)0) { 8975 if (rw == S_WRITE) { 8976 hat_setrefmod(*pplist); 8977 } else { 8978 hat_setref(*pplist); 8979 } 8980 if (seg->s_szc != 0 || (*pplist)->p_szc != 0) { 8981 segvn_sunlock_anonpages(*pplist, pplist == pl); 8982 } else { 8983 szc0_npages++; 8984 } 8985 page_unlock(*pplist); 8986 np--; 8987 pplist++; 8988 } 8989 kmem_free(pl, sizeof (page_t *) * npages); 8990 8991 mutex_enter(&freemem_lock); 8992 segvn_pages_locked -= npages; 8993 svd->softlockcnt -= npages; 8994 if (szc0_npages != 0) { 8995 availrmem += szc0_npages; 8996 } 8997 mutex_exit(&freemem_lock); 8998 if (svd->softlockcnt <= 0) { 8999 if (AS_ISUNMAPWAIT(seg->s_as)) { 9000 mutex_enter(&seg->s_as->a_contents); 9001 if (AS_ISUNMAPWAIT(seg->s_as)) { 9002 AS_CLRUNMAPWAIT(seg->s_as); 9003 cv_broadcast(&seg->s_as->a_cv); 9004 } 9005 mutex_exit(&seg->s_as->a_contents); 9006 } 9007 } 9008 return (0); 9009 } 9010 /* 9011 * get a memory ID for an addr in a given segment 9012 * 9013 * XXX only creates PAGESIZE pages if anon slots are not initialized. 9014 * At fault time they will be relocated into larger pages. 9015 */ 9016 static int 9017 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 9018 { 9019 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9020 struct anon *ap = NULL; 9021 ulong_t anon_index; 9022 struct anon_map *amp; 9023 anon_sync_obj_t cookie; 9024 9025 if (svd->type == MAP_PRIVATE) { 9026 memidp->val[0] = (uintptr_t)seg->s_as; 9027 memidp->val[1] = (uintptr_t)addr; 9028 return (0); 9029 } 9030 9031 if (svd->type == MAP_SHARED) { 9032 if (svd->vp) { 9033 memidp->val[0] = (uintptr_t)svd->vp; 9034 memidp->val[1] = (u_longlong_t)svd->offset + 9035 (uintptr_t)(addr - seg->s_base); 9036 return (0); 9037 } else { 9038 9039 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 9040 if ((amp = svd->amp) != NULL) { 9041 anon_index = svd->anon_index + 9042 seg_page(seg, addr); 9043 } 9044 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9045 9046 ASSERT(amp != NULL); 9047 9048 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 9049 anon_array_enter(amp, anon_index, &cookie); 9050 ap = anon_get_ptr(amp->ahp, anon_index); 9051 if (ap == NULL) { 9052 page_t *pp; 9053 9054 pp = anon_zero(seg, addr, &ap, svd->cred); 9055 if (pp == NULL) { 9056 anon_array_exit(&cookie); 9057 ANON_LOCK_EXIT(&->a_rwlock); 9058 return (ENOMEM); 9059 } 9060 ASSERT(anon_get_ptr(amp->ahp, anon_index) 9061 == NULL); 9062 (void) anon_set_ptr(amp->ahp, anon_index, 9063 ap, ANON_SLEEP); 9064 page_unlock(pp); 9065 } 9066 9067 anon_array_exit(&cookie); 9068 ANON_LOCK_EXIT(&->a_rwlock); 9069 9070 memidp->val[0] = (uintptr_t)ap; 9071 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 9072 return (0); 9073 } 9074 } 9075 return (EINVAL); 9076 } 9077 9078 static int 9079 sameprot(struct seg *seg, caddr_t a, size_t len) 9080 { 9081 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9082 struct vpage *vpage; 9083 spgcnt_t pages = btop(len); 9084 uint_t prot; 9085 9086 if (svd->pageprot == 0) 9087 return (1); 9088 9089 ASSERT(svd->vpage != NULL); 9090 9091 vpage = &svd->vpage[seg_page(seg, a)]; 9092 prot = VPP_PROT(vpage); 9093 vpage++; 9094 pages--; 9095 while (pages-- > 0) { 9096 if (prot != VPP_PROT(vpage)) 9097 return (0); 9098 vpage++; 9099 } 9100 return (1); 9101 } 9102 9103 /* 9104 * Get memory allocation policy info for specified address in given segment 9105 */ 9106 static lgrp_mem_policy_info_t * 9107 segvn_getpolicy(struct seg *seg, caddr_t addr) 9108 { 9109 struct anon_map *amp; 9110 ulong_t anon_index; 9111 lgrp_mem_policy_info_t *policy_info; 9112 struct segvn_data *svn_data; 9113 u_offset_t vn_off; 9114 vnode_t *vp; 9115 9116 ASSERT(seg != NULL); 9117 9118 svn_data = (struct segvn_data *)seg->s_data; 9119 if (svn_data == NULL) 9120 return (NULL); 9121 9122 /* 9123 * Get policy info for private or shared memory 9124 */ 9125 if (svn_data->type != MAP_SHARED) { 9126 if (svn_data->tr_state != SEGVN_TR_ON) { 9127 policy_info = &svn_data->policy_info; 9128 } else { 9129 policy_info = &svn_data->tr_policy_info; 9130 ASSERT(policy_info->mem_policy == 9131 LGRP_MEM_POLICY_NEXT_SEG); 9132 } 9133 } else { 9134 amp = svn_data->amp; 9135 anon_index = svn_data->anon_index + seg_page(seg, addr); 9136 vp = svn_data->vp; 9137 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base); 9138 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off); 9139 } 9140 9141 return (policy_info); 9142 } 9143 9144 /*ARGSUSED*/ 9145 static int 9146 segvn_capable(struct seg *seg, segcapability_t capability) 9147 { 9148 return (0); 9149 } 9150 9151 /* 9152 * Bind text vnode segment to an amp. If we bind successfully mappings will be 9153 * established to per vnode mapping per lgroup amp pages instead of to vnode 9154 * pages. There's one amp per vnode text mapping per lgroup. Many processes 9155 * may share the same text replication amp. If a suitable amp doesn't already 9156 * exist in svntr hash table create a new one. We may fail to bind to amp if 9157 * segment is not eligible for text replication. Code below first checks for 9158 * these conditions. If binding is successful segment tr_state is set to on 9159 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and 9160 * svd->amp remains as NULL. 9161 */ 9162 static void 9163 segvn_textrepl(struct seg *seg) 9164 { 9165 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9166 vnode_t *vp = svd->vp; 9167 u_offset_t off = svd->offset; 9168 size_t size = seg->s_size; 9169 u_offset_t eoff = off + size; 9170 uint_t szc = seg->s_szc; 9171 ulong_t hash = SVNTR_HASH_FUNC(vp); 9172 svntr_t *svntrp; 9173 struct vattr va; 9174 proc_t *p = seg->s_as->a_proc; 9175 lgrp_id_t lgrp_id; 9176 lgrp_id_t olid; 9177 int first; 9178 struct anon_map *amp; 9179 9180 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9181 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 9182 ASSERT(p != NULL); 9183 ASSERT(svd->tr_state == SEGVN_TR_INIT); 9184 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9185 ASSERT(svd->flags & MAP_TEXT); 9186 ASSERT(svd->type == MAP_PRIVATE); 9187 ASSERT(vp != NULL && svd->amp == NULL); 9188 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 9189 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0); 9190 ASSERT(seg->s_as != &kas); 9191 ASSERT(off < eoff); 9192 ASSERT(svntr_hashtab != NULL); 9193 9194 /* 9195 * If numa optimizations are no longer desired bail out. 9196 */ 9197 if (!lgrp_optimizations()) { 9198 svd->tr_state = SEGVN_TR_OFF; 9199 return; 9200 } 9201 9202 /* 9203 * Avoid creating anon maps with size bigger than the file size. 9204 * If VOP_GETATTR() call fails bail out. 9205 */ 9206 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME; 9207 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) { 9208 svd->tr_state = SEGVN_TR_OFF; 9209 SEGVN_TR_ADDSTAT(gaerr); 9210 return; 9211 } 9212 if (btopr(va.va_size) < btopr(eoff)) { 9213 svd->tr_state = SEGVN_TR_OFF; 9214 SEGVN_TR_ADDSTAT(overmap); 9215 return; 9216 } 9217 9218 /* 9219 * VVMEXEC may not be set yet if exec() prefaults text segment. Set 9220 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED 9221 * mapping that checks if trcache for this vnode needs to be 9222 * invalidated can't miss us. 9223 */ 9224 if (!(vp->v_flag & VVMEXEC)) { 9225 mutex_enter(&vp->v_lock); 9226 vp->v_flag |= VVMEXEC; 9227 mutex_exit(&vp->v_lock); 9228 } 9229 mutex_enter(&svntr_hashtab[hash].tr_lock); 9230 /* 9231 * Bail out if potentially MAP_SHARED writable mappings exist to this 9232 * vnode. We don't want to use old file contents from existing 9233 * replicas if this mapping was established after the original file 9234 * was changed. 9235 */ 9236 if (vn_is_mapped(vp, V_WRITE)) { 9237 mutex_exit(&svntr_hashtab[hash].tr_lock); 9238 svd->tr_state = SEGVN_TR_OFF; 9239 SEGVN_TR_ADDSTAT(wrcnt); 9240 return; 9241 } 9242 svntrp = svntr_hashtab[hash].tr_head; 9243 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9244 ASSERT(svntrp->tr_refcnt != 0); 9245 if (svntrp->tr_vp != vp) { 9246 continue; 9247 } 9248 9249 /* 9250 * Bail out if the file or its attributes were changed after 9251 * this replication entry was created since we need to use the 9252 * latest file contents. Note that mtime test alone is not 9253 * sufficient because a user can explicitly change mtime via 9254 * utimes(2) interfaces back to the old value after modifiying 9255 * the file contents. To detect this case we also have to test 9256 * ctime which among other things records the time of the last 9257 * mtime change by utimes(2). ctime is not changed when the file 9258 * is only read or executed so we expect that typically existing 9259 * replication amp's can be used most of the time. 9260 */ 9261 if (!svntrp->tr_valid || 9262 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec || 9263 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec || 9264 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec || 9265 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) { 9266 mutex_exit(&svntr_hashtab[hash].tr_lock); 9267 svd->tr_state = SEGVN_TR_OFF; 9268 SEGVN_TR_ADDSTAT(stale); 9269 return; 9270 } 9271 /* 9272 * if off, eoff and szc match current segment we found the 9273 * existing entry we can use. 9274 */ 9275 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff && 9276 svntrp->tr_szc == szc) { 9277 break; 9278 } 9279 /* 9280 * Don't create different but overlapping in file offsets 9281 * entries to avoid replication of the same file pages more 9282 * than once per lgroup. 9283 */ 9284 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) || 9285 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) { 9286 mutex_exit(&svntr_hashtab[hash].tr_lock); 9287 svd->tr_state = SEGVN_TR_OFF; 9288 SEGVN_TR_ADDSTAT(overlap); 9289 return; 9290 } 9291 } 9292 /* 9293 * If we didn't find existing entry create a new one. 9294 */ 9295 if (svntrp == NULL) { 9296 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP); 9297 if (svntrp == NULL) { 9298 mutex_exit(&svntr_hashtab[hash].tr_lock); 9299 svd->tr_state = SEGVN_TR_OFF; 9300 SEGVN_TR_ADDSTAT(nokmem); 9301 return; 9302 } 9303 #ifdef DEBUG 9304 { 9305 lgrp_id_t i; 9306 for (i = 0; i < NLGRPS_MAX; i++) { 9307 ASSERT(svntrp->tr_amp[i] == NULL); 9308 } 9309 } 9310 #endif /* DEBUG */ 9311 svntrp->tr_vp = vp; 9312 svntrp->tr_off = off; 9313 svntrp->tr_eoff = eoff; 9314 svntrp->tr_szc = szc; 9315 svntrp->tr_valid = 1; 9316 svntrp->tr_mtime = va.va_mtime; 9317 svntrp->tr_ctime = va.va_ctime; 9318 svntrp->tr_refcnt = 0; 9319 svntrp->tr_next = svntr_hashtab[hash].tr_head; 9320 svntr_hashtab[hash].tr_head = svntrp; 9321 } 9322 first = 1; 9323 again: 9324 /* 9325 * We want to pick a replica with pages on main thread's (t_tid = 1, 9326 * aka T1) lgrp. Currently text replication is only optimized for 9327 * workloads that either have all threads of a process on the same 9328 * lgrp or execute their large text primarily on main thread. 9329 */ 9330 lgrp_id = p->p_t1_lgrpid; 9331 if (lgrp_id == LGRP_NONE) { 9332 /* 9333 * In case exec() prefaults text on non main thread use 9334 * current thread lgrpid. It will become main thread anyway 9335 * soon. 9336 */ 9337 lgrp_id = lgrp_home_id(curthread); 9338 } 9339 /* 9340 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise 9341 * just set it to NLGRPS_MAX if it's different from current process T1 9342 * home lgrp. p_tr_lgrpid is used to detect if process uses text 9343 * replication and T1 new home is different from lgrp used for text 9344 * replication. When this happens asyncronous segvn thread rechecks if 9345 * segments should change lgrps used for text replication. If we fail 9346 * to set p_tr_lgrpid with cas32 then set it to NLGRPS_MAX without cas 9347 * if it's not already NLGRPS_MAX and not equal lgrp_id we want to 9348 * use. We don't need to use cas in this case because another thread 9349 * that races in between our non atomic check and set may only change 9350 * p_tr_lgrpid to NLGRPS_MAX at this point. 9351 */ 9352 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9353 olid = p->p_tr_lgrpid; 9354 if (lgrp_id != olid && olid != NLGRPS_MAX) { 9355 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX; 9356 if (cas32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) != olid) { 9357 olid = p->p_tr_lgrpid; 9358 ASSERT(olid != LGRP_NONE); 9359 if (olid != lgrp_id && olid != NLGRPS_MAX) { 9360 p->p_tr_lgrpid = NLGRPS_MAX; 9361 } 9362 } 9363 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 9364 membar_producer(); 9365 /* 9366 * lgrp_move_thread() won't schedule async recheck after 9367 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not 9368 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid 9369 * is not LGRP_NONE. 9370 */ 9371 if (first && p->p_t1_lgrpid != LGRP_NONE && 9372 p->p_t1_lgrpid != lgrp_id) { 9373 first = 0; 9374 goto again; 9375 } 9376 } 9377 /* 9378 * If no amp was created yet for lgrp_id create a new one as long as 9379 * we have enough memory to afford it. 9380 */ 9381 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) { 9382 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 9383 if (trmem > segvn_textrepl_max_bytes) { 9384 SEGVN_TR_ADDSTAT(normem); 9385 goto fail; 9386 } 9387 if (anon_try_resv_zone(size, NULL) == 0) { 9388 SEGVN_TR_ADDSTAT(noanon); 9389 goto fail; 9390 } 9391 amp = anonmap_alloc(size, size, ANON_NOSLEEP); 9392 if (amp == NULL) { 9393 anon_unresv_zone(size, NULL); 9394 SEGVN_TR_ADDSTAT(nokmem); 9395 goto fail; 9396 } 9397 ASSERT(amp->refcnt == 1); 9398 amp->a_szc = szc; 9399 svntrp->tr_amp[lgrp_id] = amp; 9400 SEGVN_TR_ADDSTAT(newamp); 9401 } 9402 svntrp->tr_refcnt++; 9403 ASSERT(svd->svn_trnext == NULL); 9404 ASSERT(svd->svn_trprev == NULL); 9405 svd->svn_trnext = svntrp->tr_svnhead; 9406 svd->svn_trprev = NULL; 9407 if (svntrp->tr_svnhead != NULL) { 9408 svntrp->tr_svnhead->svn_trprev = svd; 9409 } 9410 svntrp->tr_svnhead = svd; 9411 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size); 9412 ASSERT(amp->refcnt >= 1); 9413 svd->amp = amp; 9414 svd->anon_index = 0; 9415 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG; 9416 svd->tr_policy_info.mem_lgrpid = lgrp_id; 9417 svd->tr_state = SEGVN_TR_ON; 9418 mutex_exit(&svntr_hashtab[hash].tr_lock); 9419 SEGVN_TR_ADDSTAT(repl); 9420 return; 9421 fail: 9422 ASSERT(segvn_textrepl_bytes >= size); 9423 atomic_add_long(&segvn_textrepl_bytes, -size); 9424 ASSERT(svntrp != NULL); 9425 ASSERT(svntrp->tr_amp[lgrp_id] == NULL); 9426 if (svntrp->tr_refcnt == 0) { 9427 ASSERT(svntrp == svntr_hashtab[hash].tr_head); 9428 svntr_hashtab[hash].tr_head = svntrp->tr_next; 9429 mutex_exit(&svntr_hashtab[hash].tr_lock); 9430 kmem_cache_free(svntr_cache, svntrp); 9431 } else { 9432 mutex_exit(&svntr_hashtab[hash].tr_lock); 9433 } 9434 svd->tr_state = SEGVN_TR_OFF; 9435 } 9436 9437 /* 9438 * Convert seg back to regular vnode mapping seg by unbinding it from its text 9439 * replication amp. This routine is most typically called when segment is 9440 * unmapped but can also be called when segment no longer qualifies for text 9441 * replication (e.g. due to protection changes). If unload_unmap is set use 9442 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of 9443 * svntr free all its anon maps and remove it from the hash table. 9444 */ 9445 static void 9446 segvn_textunrepl(struct seg *seg, int unload_unmap) 9447 { 9448 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9449 vnode_t *vp = svd->vp; 9450 u_offset_t off = svd->offset; 9451 size_t size = seg->s_size; 9452 u_offset_t eoff = off + size; 9453 uint_t szc = seg->s_szc; 9454 ulong_t hash = SVNTR_HASH_FUNC(vp); 9455 svntr_t *svntrp; 9456 svntr_t **prv_svntrp; 9457 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid; 9458 lgrp_id_t i; 9459 9460 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9461 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 9462 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 9463 ASSERT(svd->tr_state == SEGVN_TR_ON); 9464 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9465 ASSERT(svd->amp != NULL); 9466 ASSERT(svd->amp->refcnt >= 1); 9467 ASSERT(svd->anon_index == 0); 9468 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9469 ASSERT(svntr_hashtab != NULL); 9470 9471 mutex_enter(&svntr_hashtab[hash].tr_lock); 9472 prv_svntrp = &svntr_hashtab[hash].tr_head; 9473 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) { 9474 ASSERT(svntrp->tr_refcnt != 0); 9475 if (svntrp->tr_vp == vp && svntrp->tr_off == off && 9476 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) { 9477 break; 9478 } 9479 } 9480 if (svntrp == NULL) { 9481 panic("segvn_textunrepl: svntr record not found"); 9482 } 9483 if (svntrp->tr_amp[lgrp_id] != svd->amp) { 9484 panic("segvn_textunrepl: amp mismatch"); 9485 } 9486 svd->tr_state = SEGVN_TR_OFF; 9487 svd->amp = NULL; 9488 if (svd->svn_trprev == NULL) { 9489 ASSERT(svntrp->tr_svnhead == svd); 9490 svntrp->tr_svnhead = svd->svn_trnext; 9491 if (svntrp->tr_svnhead != NULL) { 9492 svntrp->tr_svnhead->svn_trprev = NULL; 9493 } 9494 svd->svn_trnext = NULL; 9495 } else { 9496 svd->svn_trprev->svn_trnext = svd->svn_trnext; 9497 if (svd->svn_trnext != NULL) { 9498 svd->svn_trnext->svn_trprev = svd->svn_trprev; 9499 svd->svn_trnext = NULL; 9500 } 9501 svd->svn_trprev = NULL; 9502 } 9503 if (--svntrp->tr_refcnt) { 9504 mutex_exit(&svntr_hashtab[hash].tr_lock); 9505 goto done; 9506 } 9507 *prv_svntrp = svntrp->tr_next; 9508 mutex_exit(&svntr_hashtab[hash].tr_lock); 9509 for (i = 0; i < NLGRPS_MAX; i++) { 9510 struct anon_map *amp = svntrp->tr_amp[i]; 9511 if (amp == NULL) { 9512 continue; 9513 } 9514 ASSERT(amp->refcnt == 1); 9515 ASSERT(amp->swresv == size); 9516 ASSERT(amp->size == size); 9517 ASSERT(amp->a_szc == szc); 9518 if (amp->a_szc != 0) { 9519 anon_free_pages(amp->ahp, 0, size, szc); 9520 } else { 9521 anon_free(amp->ahp, 0, size); 9522 } 9523 svntrp->tr_amp[i] = NULL; 9524 ASSERT(segvn_textrepl_bytes >= size); 9525 atomic_add_long(&segvn_textrepl_bytes, -size); 9526 anon_unresv_zone(amp->swresv, NULL); 9527 amp->refcnt = 0; 9528 anonmap_free(amp); 9529 } 9530 kmem_cache_free(svntr_cache, svntrp); 9531 done: 9532 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size, 9533 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL); 9534 } 9535 9536 /* 9537 * This is called when a MAP_SHARED writable mapping is created to a vnode 9538 * that is currently used for execution (VVMEXEC flag is set). In this case we 9539 * need to prevent further use of existing replicas. 9540 */ 9541 static void 9542 segvn_inval_trcache(vnode_t *vp) 9543 { 9544 ulong_t hash = SVNTR_HASH_FUNC(vp); 9545 svntr_t *svntrp; 9546 9547 ASSERT(vp->v_flag & VVMEXEC); 9548 9549 if (svntr_hashtab == NULL) { 9550 return; 9551 } 9552 9553 mutex_enter(&svntr_hashtab[hash].tr_lock); 9554 svntrp = svntr_hashtab[hash].tr_head; 9555 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9556 ASSERT(svntrp->tr_refcnt != 0); 9557 if (svntrp->tr_vp == vp && svntrp->tr_valid) { 9558 svntrp->tr_valid = 0; 9559 } 9560 } 9561 mutex_exit(&svntr_hashtab[hash].tr_lock); 9562 } 9563 9564 static void 9565 segvn_trasync_thread(void) 9566 { 9567 callb_cpr_t cpr_info; 9568 kmutex_t cpr_lock; /* just for CPR stuff */ 9569 9570 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL); 9571 9572 CALLB_CPR_INIT(&cpr_info, &cpr_lock, 9573 callb_generic_cpr, "segvn_async"); 9574 9575 if (segvn_update_textrepl_interval == 0) { 9576 segvn_update_textrepl_interval = segvn_update_tr_time * hz; 9577 } else { 9578 segvn_update_textrepl_interval *= hz; 9579 } 9580 (void) timeout(segvn_trupdate_wakeup, NULL, 9581 segvn_update_textrepl_interval); 9582 9583 for (;;) { 9584 mutex_enter(&cpr_lock); 9585 CALLB_CPR_SAFE_BEGIN(&cpr_info); 9586 mutex_exit(&cpr_lock); 9587 sema_p(&segvn_trasync_sem); 9588 mutex_enter(&cpr_lock); 9589 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 9590 mutex_exit(&cpr_lock); 9591 segvn_trupdate(); 9592 } 9593 } 9594 9595 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0; 9596 9597 static void 9598 segvn_trupdate_wakeup(void *dummy) 9599 { 9600 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations(); 9601 9602 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) { 9603 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs; 9604 sema_v(&segvn_trasync_sem); 9605 } 9606 9607 if (!segvn_disable_textrepl_update && 9608 segvn_update_textrepl_interval != 0) { 9609 (void) timeout(segvn_trupdate_wakeup, dummy, 9610 segvn_update_textrepl_interval); 9611 } 9612 } 9613 9614 static void 9615 segvn_trupdate(void) 9616 { 9617 ulong_t hash; 9618 svntr_t *svntrp; 9619 segvn_data_t *svd; 9620 9621 ASSERT(svntr_hashtab != NULL); 9622 9623 for (hash = 0; hash < svntr_hashtab_sz; hash++) { 9624 mutex_enter(&svntr_hashtab[hash].tr_lock); 9625 svntrp = svntr_hashtab[hash].tr_head; 9626 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9627 ASSERT(svntrp->tr_refcnt != 0); 9628 svd = svntrp->tr_svnhead; 9629 for (; svd != NULL; svd = svd->svn_trnext) { 9630 segvn_trupdate_seg(svd->seg, svd, svntrp, 9631 hash); 9632 } 9633 } 9634 mutex_exit(&svntr_hashtab[hash].tr_lock); 9635 } 9636 } 9637 9638 static void 9639 segvn_trupdate_seg(struct seg *seg, 9640 segvn_data_t *svd, 9641 svntr_t *svntrp, 9642 ulong_t hash) 9643 { 9644 proc_t *p; 9645 lgrp_id_t lgrp_id; 9646 struct as *as; 9647 size_t size; 9648 struct anon_map *amp; 9649 9650 ASSERT(svd->vp != NULL); 9651 ASSERT(svd->vp == svntrp->tr_vp); 9652 ASSERT(svd->offset == svntrp->tr_off); 9653 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff); 9654 ASSERT(seg != NULL); 9655 ASSERT(svd->seg == seg); 9656 ASSERT(seg->s_data == (void *)svd); 9657 ASSERT(seg->s_szc == svntrp->tr_szc); 9658 ASSERT(svd->tr_state == SEGVN_TR_ON); 9659 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9660 ASSERT(svd->amp != NULL); 9661 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 9662 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE); 9663 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX); 9664 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp); 9665 ASSERT(svntrp->tr_refcnt != 0); 9666 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock)); 9667 9668 as = seg->s_as; 9669 ASSERT(as != NULL && as != &kas); 9670 p = as->a_proc; 9671 ASSERT(p != NULL); 9672 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 9673 lgrp_id = p->p_t1_lgrpid; 9674 if (lgrp_id == LGRP_NONE) { 9675 return; 9676 } 9677 ASSERT(lgrp_id < NLGRPS_MAX); 9678 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) { 9679 return; 9680 } 9681 9682 /* 9683 * Use tryenter locking since we are locking as/seg and svntr hash 9684 * lock in reverse from syncrounous thread order. 9685 */ 9686 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) { 9687 SEGVN_TR_ADDSTAT(nolock); 9688 if (segvn_lgrp_trthr_migrs_snpsht) { 9689 segvn_lgrp_trthr_migrs_snpsht = 0; 9690 } 9691 return; 9692 } 9693 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) { 9694 AS_LOCK_EXIT(as, &as->a_lock); 9695 SEGVN_TR_ADDSTAT(nolock); 9696 if (segvn_lgrp_trthr_migrs_snpsht) { 9697 segvn_lgrp_trthr_migrs_snpsht = 0; 9698 } 9699 return; 9700 } 9701 size = seg->s_size; 9702 if (svntrp->tr_amp[lgrp_id] == NULL) { 9703 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 9704 if (trmem > segvn_textrepl_max_bytes) { 9705 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9706 AS_LOCK_EXIT(as, &as->a_lock); 9707 atomic_add_long(&segvn_textrepl_bytes, -size); 9708 SEGVN_TR_ADDSTAT(normem); 9709 return; 9710 } 9711 if (anon_try_resv_zone(size, NULL) == 0) { 9712 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9713 AS_LOCK_EXIT(as, &as->a_lock); 9714 atomic_add_long(&segvn_textrepl_bytes, -size); 9715 SEGVN_TR_ADDSTAT(noanon); 9716 return; 9717 } 9718 amp = anonmap_alloc(size, size, KM_NOSLEEP); 9719 if (amp == NULL) { 9720 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9721 AS_LOCK_EXIT(as, &as->a_lock); 9722 atomic_add_long(&segvn_textrepl_bytes, -size); 9723 anon_unresv_zone(size, NULL); 9724 SEGVN_TR_ADDSTAT(nokmem); 9725 return; 9726 } 9727 ASSERT(amp->refcnt == 1); 9728 amp->a_szc = seg->s_szc; 9729 svntrp->tr_amp[lgrp_id] = amp; 9730 } 9731 /* 9732 * We don't need to drop the bucket lock but here we give other 9733 * threads a chance. svntr and svd can't be unlinked as long as 9734 * segment lock is held as a writer and AS held as well. After we 9735 * retake bucket lock we'll continue from where we left. We'll be able 9736 * to reach the end of either list since new entries are always added 9737 * to the beginning of the lists. 9738 */ 9739 mutex_exit(&svntr_hashtab[hash].tr_lock); 9740 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL); 9741 mutex_enter(&svntr_hashtab[hash].tr_lock); 9742 9743 ASSERT(svd->tr_state == SEGVN_TR_ON); 9744 ASSERT(svd->amp != NULL); 9745 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 9746 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id); 9747 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]); 9748 9749 svd->tr_policy_info.mem_lgrpid = lgrp_id; 9750 svd->amp = svntrp->tr_amp[lgrp_id]; 9751 p->p_tr_lgrpid = NLGRPS_MAX; 9752 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9753 AS_LOCK_EXIT(as, &as->a_lock); 9754 9755 ASSERT(svntrp->tr_refcnt != 0); 9756 ASSERT(svd->vp == svntrp->tr_vp); 9757 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id); 9758 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]); 9759 ASSERT(svd->seg == seg); 9760 ASSERT(svd->tr_state == SEGVN_TR_ON); 9761 9762 SEGVN_TR_ADDSTAT(asyncrepl); 9763 } 9764