1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * University Copyright- Copyright (c) 1982, 1986, 1988 31 * The Regents of the University of California 32 * All Rights Reserved 33 * 34 * University Acknowledgment- Portions of this document are derived from 35 * software developed by the University of California, Berkeley, and its 36 * contributors. 37 */ 38 39 #pragma ident "%Z%%M% %I% %E% SMI" 40 41 /* 42 * VM - shared or copy-on-write from a vnode/anonymous memory. 43 */ 44 45 #include <sys/types.h> 46 #include <sys/param.h> 47 #include <sys/t_lock.h> 48 #include <sys/errno.h> 49 #include <sys/systm.h> 50 #include <sys/mman.h> 51 #include <sys/debug.h> 52 #include <sys/cred.h> 53 #include <sys/vmsystm.h> 54 #include <sys/tuneable.h> 55 #include <sys/bitmap.h> 56 #include <sys/swap.h> 57 #include <sys/kmem.h> 58 #include <sys/sysmacros.h> 59 #include <sys/vtrace.h> 60 #include <sys/cmn_err.h> 61 #include <sys/callb.h> 62 #include <sys/vm.h> 63 #include <sys/dumphdr.h> 64 #include <sys/lgrp.h> 65 66 #include <vm/hat.h> 67 #include <vm/as.h> 68 #include <vm/seg.h> 69 #include <vm/seg_vn.h> 70 #include <vm/pvn.h> 71 #include <vm/anon.h> 72 #include <vm/page.h> 73 #include <vm/vpage.h> 74 #include <sys/proc.h> 75 #include <sys/task.h> 76 #include <sys/project.h> 77 #include <sys/zone.h> 78 #include <sys/shm_impl.h> 79 /* 80 * Private seg op routines. 81 */ 82 static int segvn_dup(struct seg *seg, struct seg *newseg); 83 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len); 84 static void segvn_free(struct seg *seg); 85 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg, 86 caddr_t addr, size_t len, enum fault_type type, 87 enum seg_rw rw); 88 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr); 89 static int segvn_setprot(struct seg *seg, caddr_t addr, 90 size_t len, uint_t prot); 91 static int segvn_checkprot(struct seg *seg, caddr_t addr, 92 size_t len, uint_t prot); 93 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 94 static size_t segvn_swapout(struct seg *seg); 95 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len, 96 int attr, uint_t flags); 97 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len, 98 char *vec); 99 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 100 int attr, int op, ulong_t *lockmap, size_t pos); 101 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len, 102 uint_t *protv); 103 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr); 104 static int segvn_gettype(struct seg *seg, caddr_t addr); 105 static int segvn_getvp(struct seg *seg, caddr_t addr, 106 struct vnode **vpp); 107 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len, 108 uint_t behav); 109 static void segvn_dump(struct seg *seg); 110 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, 111 struct page ***ppp, enum lock_type type, enum seg_rw rw); 112 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, 113 uint_t szc); 114 static int segvn_getmemid(struct seg *seg, caddr_t addr, 115 memid_t *memidp); 116 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t); 117 static int segvn_capable(struct seg *seg, segcapability_t capable); 118 119 struct seg_ops segvn_ops = { 120 segvn_dup, 121 segvn_unmap, 122 segvn_free, 123 segvn_fault, 124 segvn_faulta, 125 segvn_setprot, 126 segvn_checkprot, 127 segvn_kluster, 128 segvn_swapout, 129 segvn_sync, 130 segvn_incore, 131 segvn_lockop, 132 segvn_getprot, 133 segvn_getoffset, 134 segvn_gettype, 135 segvn_getvp, 136 segvn_advise, 137 segvn_dump, 138 segvn_pagelock, 139 segvn_setpagesize, 140 segvn_getmemid, 141 segvn_getpolicy, 142 segvn_capable, 143 }; 144 145 /* 146 * Common zfod structures, provided as a shorthand for others to use. 147 */ 148 static segvn_crargs_t zfod_segvn_crargs = 149 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL); 150 static segvn_crargs_t kzfod_segvn_crargs = 151 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER, 152 PROT_ALL & ~PROT_USER); 153 static segvn_crargs_t stack_noexec_crargs = 154 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL); 155 156 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */ 157 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */ 158 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */ 159 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */ 160 161 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */ 162 163 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */ 164 165 static int segvn_concat(struct seg *, struct seg *, int); 166 static int segvn_extend_prev(struct seg *, struct seg *, 167 struct segvn_crargs *, size_t); 168 static int segvn_extend_next(struct seg *, struct seg *, 169 struct segvn_crargs *, size_t); 170 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw); 171 static void segvn_pagelist_rele(page_t **); 172 static void segvn_setvnode_mpss(vnode_t *); 173 static void segvn_relocate_pages(page_t **, page_t *); 174 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *); 175 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t, 176 uint_t, page_t **, page_t **, uint_t *, int *); 177 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t, 178 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 179 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t, 180 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 181 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t, 182 u_offset_t, struct vpage *, page_t **, uint_t, 183 enum fault_type, enum seg_rw, int, int); 184 static void segvn_vpage(struct seg *); 185 186 static void segvn_purge(struct seg *seg); 187 static int segvn_reclaim(struct seg *, caddr_t, size_t, struct page **, 188 enum seg_rw); 189 190 static int sameprot(struct seg *, caddr_t, size_t); 191 192 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t); 193 static int segvn_clrszc(struct seg *); 194 static struct seg *segvn_split_seg(struct seg *, caddr_t); 195 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t, 196 ulong_t, uint_t); 197 198 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t, 199 size_t, void *, u_offset_t); 200 201 static int segvn_slock_anonpages(page_t *, int); 202 static void segvn_sunlock_anonpages(page_t *, int); 203 204 static struct kmem_cache *segvn_cache; 205 206 #ifdef VM_STATS 207 static struct segvnvmstats_str { 208 ulong_t fill_vp_pages[31]; 209 ulong_t fltvnpages[49]; 210 ulong_t fullszcpages[10]; 211 ulong_t relocatepages[3]; 212 ulong_t fltanpages[17]; 213 ulong_t pagelock[3]; 214 ulong_t demoterange[3]; 215 } segvnvmstats; 216 #endif /* VM_STATS */ 217 218 #define SDR_RANGE 1 /* demote entire range */ 219 #define SDR_END 2 /* demote non aligned ends only */ 220 221 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \ 222 if ((len) != 0) { \ 223 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \ 224 ASSERT(lpgaddr >= (seg)->s_base); \ 225 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \ 226 (len)), pgsz); \ 227 ASSERT(lpgeaddr > lpgaddr); \ 228 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \ 229 } else { \ 230 lpgeaddr = lpgaddr = (addr); \ 231 } \ 232 } 233 234 /*ARGSUSED*/ 235 static int 236 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags) 237 { 238 struct segvn_data *svd = buf; 239 240 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL); 241 mutex_init(&svd->segp_slock, NULL, MUTEX_DEFAULT, NULL); 242 svd->svn_trnext = svd->svn_trprev = NULL; 243 return (0); 244 } 245 246 /*ARGSUSED1*/ 247 static void 248 segvn_cache_destructor(void *buf, void *cdrarg) 249 { 250 struct segvn_data *svd = buf; 251 252 rw_destroy(&svd->lock); 253 mutex_destroy(&svd->segp_slock); 254 } 255 256 /*ARGSUSED*/ 257 static int 258 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags) 259 { 260 bzero(buf, sizeof (svntr_t)); 261 return (0); 262 } 263 264 /* 265 * Patching this variable to non-zero allows the system to run with 266 * stacks marked as "not executable". It's a bit of a kludge, but is 267 * provided as a tweakable for platforms that export those ABIs 268 * (e.g. sparc V8) that have executable stacks enabled by default. 269 * There are also some restrictions for platforms that don't actually 270 * implement 'noexec' protections. 271 * 272 * Once enabled, the system is (therefore) unable to provide a fully 273 * ABI-compliant execution environment, though practically speaking, 274 * most everything works. The exceptions are generally some interpreters 275 * and debuggers that create executable code on the stack and jump 276 * into it (without explicitly mprotecting the address range to include 277 * PROT_EXEC). 278 * 279 * One important class of applications that are disabled are those 280 * that have been transformed into malicious agents using one of the 281 * numerous "buffer overflow" attacks. See 4007890. 282 */ 283 int noexec_user_stack = 0; 284 int noexec_user_stack_log = 1; 285 286 int segvn_lpg_disable = 0; 287 uint_t segvn_maxpgszc = 0; 288 289 ulong_t segvn_vmpss_clrszc_cnt; 290 ulong_t segvn_vmpss_clrszc_err; 291 ulong_t segvn_fltvnpages_clrszc_cnt; 292 ulong_t segvn_fltvnpages_clrszc_err; 293 ulong_t segvn_setpgsz_align_err; 294 ulong_t segvn_setpgsz_anon_align_err; 295 ulong_t segvn_setpgsz_getattr_err; 296 ulong_t segvn_setpgsz_eof_err; 297 ulong_t segvn_faultvnmpss_align_err1; 298 ulong_t segvn_faultvnmpss_align_err2; 299 ulong_t segvn_faultvnmpss_align_err3; 300 ulong_t segvn_faultvnmpss_align_err4; 301 ulong_t segvn_faultvnmpss_align_err5; 302 ulong_t segvn_vmpss_pageio_deadlk_err; 303 304 int segvn_use_regions = 1; 305 306 /* 307 * Segvn supports text replication optimization for NUMA platforms. Text 308 * replica's are represented by anon maps (amp). There's one amp per text file 309 * region per lgroup. A process chooses the amp for each of its text mappings 310 * based on the lgroup assignment of its main thread (t_tid = 1). All 311 * processes that want a replica on a particular lgroup for the same text file 312 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table 313 * with vp,off,size,szc used as a key. Text replication segments are read only 314 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by 315 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode 316 * pages. Replication amp is assigned to a segment when it gets its first 317 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread 318 * rechecks periodically if the process still maps an amp local to the main 319 * thread. If not async thread forces process to remap to an amp in the new 320 * home lgroup of the main thread. Current text replication implementation 321 * only provides the benefit to workloads that do most of their work in the 322 * main thread of a process or all the threads of a process run in the same 323 * lgroup. To extend text replication benefit to different types of 324 * multithreaded workloads further work would be needed in the hat layer to 325 * allow the same virtual address in the same hat to simultaneously map 326 * different physical addresses (i.e. page table replication would be needed 327 * for x86). 328 * 329 * amp pages are used instead of vnode pages as long as segment has a very 330 * simple life cycle. It's created via segvn_create(), handles S_EXEC 331 * (S_READ) pagefaults and is fully unmapped. If anything more complicated 332 * happens such as protection is changed, real COW fault happens, pagesize is 333 * changed, MC_LOCK is requested or segment is partially unmapped we turn off 334 * text replication by converting the segment back to vnode only segment 335 * (unmap segment's address range and set svd->amp to NULL). 336 * 337 * The original file can be changed after amp is inserted into 338 * svntr_hashtab. Processes that are launched after the file is already 339 * changed can't use the replica's created prior to the file change. To 340 * implement this functionality hash entries are timestamped. Replica's can 341 * only be used if current file modification time is the same as the timestamp 342 * saved when hash entry was created. However just timestamps alone are not 343 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We 344 * deal with file changes via MAP_SHARED mappings differently. When writable 345 * MAP_SHARED mappings are created to vnodes marked as executable we mark all 346 * existing replica's for this vnode as not usable for future text 347 * mappings. And we don't create new replica's for files that currently have 348 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is 349 * true). 350 */ 351 352 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20) 353 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR; 354 355 static ulong_t svntr_hashtab_sz = 512; 356 static svntr_bucket_t *svntr_hashtab = NULL; 357 static struct kmem_cache *svntr_cache; 358 static svntr_stats_t *segvn_textrepl_stats; 359 static ksema_t segvn_trasync_sem; 360 361 int segvn_disable_textrepl = 0; 362 size_t textrepl_size_thresh = (size_t)-1; 363 size_t segvn_textrepl_bytes = 0; 364 size_t segvn_textrepl_max_bytes = 0; 365 clock_t segvn_update_textrepl_interval = 0; 366 int segvn_update_tr_time = 10; 367 int segvn_disable_textrepl_update = 0; 368 369 static void segvn_textrepl(struct seg *); 370 static void segvn_textunrepl(struct seg *, int); 371 static void segvn_inval_trcache(vnode_t *); 372 static void segvn_trasync_thread(void); 373 static void segvn_trupdate_wakeup(void *); 374 static void segvn_trupdate(void); 375 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *, 376 ulong_t); 377 378 /* 379 * Initialize segvn data structures 380 */ 381 void 382 segvn_init(void) 383 { 384 uint_t maxszc; 385 uint_t szc; 386 size_t pgsz; 387 388 segvn_cache = kmem_cache_create("segvn_cache", 389 sizeof (struct segvn_data), 0, 390 segvn_cache_constructor, segvn_cache_destructor, NULL, 391 NULL, NULL, 0); 392 393 if (segvn_lpg_disable != 0) 394 return; 395 szc = maxszc = page_num_pagesizes() - 1; 396 if (szc == 0) { 397 segvn_lpg_disable = 1; 398 return; 399 } 400 if (page_get_pagesize(0) != PAGESIZE) { 401 panic("segvn_init: bad szc 0"); 402 /*NOTREACHED*/ 403 } 404 while (szc != 0) { 405 pgsz = page_get_pagesize(szc); 406 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) { 407 panic("segvn_init: bad szc %d", szc); 408 /*NOTREACHED*/ 409 } 410 szc--; 411 } 412 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc) 413 segvn_maxpgszc = maxszc; 414 415 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL)) 416 segvn_use_regions = 0; 417 418 /* 419 * For now shared regions and text replication segvn support 420 * are mutually exclusive. This is acceptable because 421 * currently significant benefit from text replication was 422 * only observed on AMD64 NUMA platforms (due to relatively 423 * small L2$ size) and currently we don't support shared 424 * regions on x86. 425 */ 426 if (segvn_use_regions && !segvn_disable_textrepl) { 427 segvn_disable_textrepl = 1; 428 } 429 430 #if defined(_LP64) 431 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 && 432 !segvn_disable_textrepl) { 433 ulong_t i; 434 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t); 435 436 svntr_cache = kmem_cache_create("svntr_cache", 437 sizeof (svntr_t), 0, svntr_cache_constructor, NULL, 438 NULL, NULL, NULL, 0); 439 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP); 440 for (i = 0; i < svntr_hashtab_sz; i++) { 441 mutex_init(&svntr_hashtab[i].tr_lock, NULL, 442 MUTEX_DEFAULT, NULL); 443 } 444 segvn_textrepl_max_bytes = ptob(physmem) / 445 segvn_textrepl_max_bytes_factor; 446 segvn_textrepl_stats = kmem_zalloc(NCPU * 447 sizeof (svntr_stats_t), KM_SLEEP); 448 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL); 449 (void) thread_create(NULL, 0, segvn_trasync_thread, 450 NULL, 0, &p0, TS_RUN, minclsyspri); 451 } 452 #endif 453 } 454 455 #define SEGVN_PAGEIO ((void *)0x1) 456 #define SEGVN_NOPAGEIO ((void *)0x2) 457 458 static void 459 segvn_setvnode_mpss(vnode_t *vp) 460 { 461 int err; 462 463 ASSERT(vp->v_mpssdata == NULL || 464 vp->v_mpssdata == SEGVN_PAGEIO || 465 vp->v_mpssdata == SEGVN_NOPAGEIO); 466 467 if (vp->v_mpssdata == NULL) { 468 if (vn_vmpss_usepageio(vp)) { 469 err = VOP_PAGEIO(vp, (page_t *)NULL, 470 (u_offset_t)0, 0, 0, CRED()); 471 } else { 472 err = ENOSYS; 473 } 474 /* 475 * set v_mpssdata just once per vnode life 476 * so that it never changes. 477 */ 478 mutex_enter(&vp->v_lock); 479 if (vp->v_mpssdata == NULL) { 480 if (err == EINVAL) { 481 vp->v_mpssdata = SEGVN_PAGEIO; 482 } else { 483 vp->v_mpssdata = SEGVN_NOPAGEIO; 484 } 485 } 486 mutex_exit(&vp->v_lock); 487 } 488 } 489 490 int 491 segvn_create(struct seg *seg, void *argsp) 492 { 493 struct segvn_crargs *a = (struct segvn_crargs *)argsp; 494 struct segvn_data *svd; 495 size_t swresv = 0; 496 struct cred *cred; 497 struct anon_map *amp; 498 int error = 0; 499 size_t pgsz; 500 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT; 501 int use_rgn = 0; 502 int trok = 0; 503 504 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 505 506 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) { 507 panic("segvn_create type"); 508 /*NOTREACHED*/ 509 } 510 511 /* 512 * Check arguments. If a shared anon structure is given then 513 * it is illegal to also specify a vp. 514 */ 515 if (a->amp != NULL && a->vp != NULL) { 516 panic("segvn_create anon_map"); 517 /*NOTREACHED*/ 518 } 519 520 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) && 521 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) && 522 segvn_use_regions) { 523 use_rgn = 1; 524 } 525 526 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */ 527 if (a->type == MAP_SHARED) 528 a->flags &= ~MAP_NORESERVE; 529 530 if (a->szc != 0) { 531 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) || 532 (a->amp != NULL && a->type == MAP_PRIVATE) || 533 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) { 534 a->szc = 0; 535 } else { 536 if (a->szc > segvn_maxpgszc) 537 a->szc = segvn_maxpgszc; 538 pgsz = page_get_pagesize(a->szc); 539 if (!IS_P2ALIGNED(seg->s_base, pgsz) || 540 !IS_P2ALIGNED(seg->s_size, pgsz)) { 541 a->szc = 0; 542 } else if (a->vp != NULL) { 543 extern struct vnode kvp; 544 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) { 545 /* 546 * paranoid check. 547 * hat_page_demote() is not supported 548 * on swapfs pages. 549 */ 550 a->szc = 0; 551 } else if (map_addr_vacalign_check(seg->s_base, 552 a->offset & PAGEMASK)) { 553 a->szc = 0; 554 } 555 } else if (a->amp != NULL) { 556 pgcnt_t anum = btopr(a->offset); 557 pgcnt_t pgcnt = page_get_pagecnt(a->szc); 558 if (!IS_P2ALIGNED(anum, pgcnt)) { 559 a->szc = 0; 560 } 561 } 562 } 563 } 564 565 /* 566 * If segment may need private pages, reserve them now. 567 */ 568 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) || 569 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) { 570 if (anon_resv(seg->s_size) == 0) 571 return (EAGAIN); 572 swresv = seg->s_size; 573 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 574 seg, swresv, 1); 575 } 576 577 /* 578 * Reserve any mapping structures that may be required. 579 * 580 * Don't do it for segments that may use regions. It's currently a 581 * noop in the hat implementations anyway. 582 */ 583 if (!use_rgn) { 584 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP); 585 } 586 587 if (a->cred) { 588 cred = a->cred; 589 crhold(cred); 590 } else { 591 crhold(cred = CRED()); 592 } 593 594 /* Inform the vnode of the new mapping */ 595 if (a->vp != NULL) { 596 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK, 597 seg->s_as, seg->s_base, seg->s_size, a->prot, 598 a->maxprot, a->type, cred); 599 if (error) { 600 if (swresv != 0) { 601 anon_unresv(swresv); 602 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 603 "anon proc:%p %lu %u", 604 seg, swresv, 0); 605 } 606 crfree(cred); 607 if (!use_rgn) { 608 hat_unload(seg->s_as->a_hat, seg->s_base, 609 seg->s_size, HAT_UNLOAD_UNMAP); 610 } 611 return (error); 612 } 613 /* 614 * svntr_hashtab will be NULL if we support shared regions. 615 */ 616 trok = ((a->flags & MAP_TEXT) && 617 (seg->s_size > textrepl_size_thresh || 618 (a->flags & _MAP_TEXTREPL)) && 619 lgrp_optimizations() && svntr_hashtab != NULL && 620 a->type == MAP_PRIVATE && swresv == 0 && 621 !(a->flags & MAP_NORESERVE) && 622 seg->s_as != &kas && a->vp->v_type == VREG); 623 624 ASSERT(!trok || !use_rgn); 625 } 626 627 /* 628 * If more than one segment in the address space, and they're adjacent 629 * virtually, try to concatenate them. Don't concatenate if an 630 * explicit anon_map structure was supplied (e.g., SystemV shared 631 * memory) or if we'll use text replication for this segment. 632 */ 633 if (a->amp == NULL && !use_rgn && !trok) { 634 struct seg *pseg, *nseg; 635 struct segvn_data *psvd, *nsvd; 636 lgrp_mem_policy_t ppolicy, npolicy; 637 uint_t lgrp_mem_policy_flags = 0; 638 extern lgrp_mem_policy_t lgrp_mem_default_policy; 639 640 /* 641 * Memory policy flags (lgrp_mem_policy_flags) is valid when 642 * extending stack/heap segments. 643 */ 644 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) && 645 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) { 646 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags; 647 } else { 648 /* 649 * Get policy when not extending it from another segment 650 */ 651 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type); 652 } 653 654 /* 655 * First, try to concatenate the previous and new segments 656 */ 657 pseg = AS_SEGPREV(seg->s_as, seg); 658 if (pseg != NULL && 659 pseg->s_base + pseg->s_size == seg->s_base && 660 pseg->s_ops == &segvn_ops) { 661 /* 662 * Get memory allocation policy from previous segment. 663 * When extension is specified (e.g. for heap) apply 664 * this policy to the new segment regardless of the 665 * outcome of segment concatenation. Extension occurs 666 * for non-default policy otherwise default policy is 667 * used and is based on extended segment size. 668 */ 669 psvd = (struct segvn_data *)pseg->s_data; 670 ppolicy = psvd->policy_info.mem_policy; 671 if (lgrp_mem_policy_flags == 672 LGRP_MP_FLAG_EXTEND_UP) { 673 if (ppolicy != lgrp_mem_default_policy) { 674 mpolicy = ppolicy; 675 } else { 676 mpolicy = lgrp_mem_policy_default( 677 pseg->s_size + seg->s_size, 678 a->type); 679 } 680 } 681 682 if (mpolicy == ppolicy && 683 (pseg->s_size + seg->s_size <= 684 segvn_comb_thrshld || psvd->amp == NULL) && 685 segvn_extend_prev(pseg, seg, a, swresv) == 0) { 686 /* 687 * success! now try to concatenate 688 * with following seg 689 */ 690 crfree(cred); 691 nseg = AS_SEGNEXT(pseg->s_as, pseg); 692 if (nseg != NULL && 693 nseg != pseg && 694 nseg->s_ops == &segvn_ops && 695 pseg->s_base + pseg->s_size == 696 nseg->s_base) 697 (void) segvn_concat(pseg, nseg, 0); 698 ASSERT(pseg->s_szc == 0 || 699 (a->szc == pseg->s_szc && 700 IS_P2ALIGNED(pseg->s_base, pgsz) && 701 IS_P2ALIGNED(pseg->s_size, pgsz))); 702 return (0); 703 } 704 } 705 706 /* 707 * Failed, so try to concatenate with following seg 708 */ 709 nseg = AS_SEGNEXT(seg->s_as, seg); 710 if (nseg != NULL && 711 seg->s_base + seg->s_size == nseg->s_base && 712 nseg->s_ops == &segvn_ops) { 713 /* 714 * Get memory allocation policy from next segment. 715 * When extension is specified (e.g. for stack) apply 716 * this policy to the new segment regardless of the 717 * outcome of segment concatenation. Extension occurs 718 * for non-default policy otherwise default policy is 719 * used and is based on extended segment size. 720 */ 721 nsvd = (struct segvn_data *)nseg->s_data; 722 npolicy = nsvd->policy_info.mem_policy; 723 if (lgrp_mem_policy_flags == 724 LGRP_MP_FLAG_EXTEND_DOWN) { 725 if (npolicy != lgrp_mem_default_policy) { 726 mpolicy = npolicy; 727 } else { 728 mpolicy = lgrp_mem_policy_default( 729 nseg->s_size + seg->s_size, 730 a->type); 731 } 732 } 733 734 if (mpolicy == npolicy && 735 segvn_extend_next(seg, nseg, a, swresv) == 0) { 736 crfree(cred); 737 ASSERT(nseg->s_szc == 0 || 738 (a->szc == nseg->s_szc && 739 IS_P2ALIGNED(nseg->s_base, pgsz) && 740 IS_P2ALIGNED(nseg->s_size, pgsz))); 741 return (0); 742 } 743 } 744 } 745 746 if (a->vp != NULL) { 747 VN_HOLD(a->vp); 748 if (a->type == MAP_SHARED) 749 lgrp_shm_policy_init(NULL, a->vp); 750 } 751 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 752 753 seg->s_ops = &segvn_ops; 754 seg->s_data = (void *)svd; 755 seg->s_szc = a->szc; 756 757 svd->seg = seg; 758 svd->vp = a->vp; 759 /* 760 * Anonymous mappings have no backing file so the offset is meaningless. 761 */ 762 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0; 763 svd->prot = a->prot; 764 svd->maxprot = a->maxprot; 765 svd->pageprot = 0; 766 svd->type = a->type; 767 svd->vpage = NULL; 768 svd->cred = cred; 769 svd->advice = MADV_NORMAL; 770 svd->pageadvice = 0; 771 svd->flags = (ushort_t)a->flags; 772 svd->softlockcnt = 0; 773 svd->rcookie = HAT_INVALID_REGION_COOKIE; 774 775 if (a->szc != 0 && a->vp != NULL) { 776 segvn_setvnode_mpss(a->vp); 777 } 778 if (svd->type == MAP_SHARED && svd->vp != NULL && 779 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) { 780 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 781 segvn_inval_trcache(svd->vp); 782 } 783 784 amp = a->amp; 785 if ((svd->amp = amp) == NULL) { 786 svd->anon_index = 0; 787 if (svd->type == MAP_SHARED) { 788 svd->swresv = 0; 789 /* 790 * Shared mappings to a vp need no other setup. 791 * If we have a shared mapping to an anon_map object 792 * which hasn't been allocated yet, allocate the 793 * struct now so that it will be properly shared 794 * by remembering the swap reservation there. 795 */ 796 if (a->vp == NULL) { 797 svd->amp = anonmap_alloc(seg->s_size, swresv, 798 ANON_SLEEP); 799 svd->amp->a_szc = seg->s_szc; 800 } 801 } else { 802 /* 803 * Private mapping (with or without a vp). 804 * Allocate anon_map when needed. 805 */ 806 svd->swresv = swresv; 807 } 808 } else { 809 pgcnt_t anon_num; 810 811 /* 812 * Mapping to an existing anon_map structure without a vp. 813 * For now we will insure that the segment size isn't larger 814 * than the size - offset gives us. Later on we may wish to 815 * have the anon array dynamically allocated itself so that 816 * we don't always have to allocate all the anon pointer slots. 817 * This of course involves adding extra code to check that we 818 * aren't trying to use an anon pointer slot beyond the end 819 * of the currently allocated anon array. 820 */ 821 if ((amp->size - a->offset) < seg->s_size) { 822 panic("segvn_create anon_map size"); 823 /*NOTREACHED*/ 824 } 825 826 anon_num = btopr(a->offset); 827 828 if (a->type == MAP_SHARED) { 829 /* 830 * SHARED mapping to a given anon_map. 831 */ 832 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 833 amp->refcnt++; 834 if (a->szc > amp->a_szc) { 835 amp->a_szc = a->szc; 836 } 837 ANON_LOCK_EXIT(&->a_rwlock); 838 svd->anon_index = anon_num; 839 svd->swresv = 0; 840 } else { 841 /* 842 * PRIVATE mapping to a given anon_map. 843 * Make sure that all the needed anon 844 * structures are created (so that we will 845 * share the underlying pages if nothing 846 * is written by this mapping) and then 847 * duplicate the anon array as is done 848 * when a privately mapped segment is dup'ed. 849 */ 850 struct anon *ap; 851 caddr_t addr; 852 caddr_t eaddr; 853 ulong_t anon_idx; 854 int hat_flag = HAT_LOAD; 855 856 if (svd->flags & MAP_TEXT) { 857 hat_flag |= HAT_LOAD_TEXT; 858 } 859 860 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 861 svd->amp->a_szc = seg->s_szc; 862 svd->anon_index = 0; 863 svd->swresv = swresv; 864 865 /* 866 * Prevent 2 threads from allocating anon 867 * slots simultaneously. 868 */ 869 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 870 eaddr = seg->s_base + seg->s_size; 871 872 for (anon_idx = anon_num, addr = seg->s_base; 873 addr < eaddr; addr += PAGESIZE, anon_idx++) { 874 page_t *pp; 875 876 if ((ap = anon_get_ptr(amp->ahp, 877 anon_idx)) != NULL) 878 continue; 879 880 /* 881 * Allocate the anon struct now. 882 * Might as well load up translation 883 * to the page while we're at it... 884 */ 885 pp = anon_zero(seg, addr, &ap, cred); 886 if (ap == NULL || pp == NULL) { 887 panic("segvn_create anon_zero"); 888 /*NOTREACHED*/ 889 } 890 891 /* 892 * Re-acquire the anon_map lock and 893 * initialize the anon array entry. 894 */ 895 ASSERT(anon_get_ptr(amp->ahp, 896 anon_idx) == NULL); 897 (void) anon_set_ptr(amp->ahp, anon_idx, ap, 898 ANON_SLEEP); 899 900 ASSERT(seg->s_szc == 0); 901 ASSERT(!IS_VMODSORT(pp->p_vnode)); 902 903 ASSERT(use_rgn == 0); 904 hat_memload(seg->s_as->a_hat, addr, pp, 905 svd->prot & ~PROT_WRITE, hat_flag); 906 907 page_unlock(pp); 908 } 909 ASSERT(seg->s_szc == 0); 910 anon_dup(amp->ahp, anon_num, svd->amp->ahp, 911 0, seg->s_size); 912 ANON_LOCK_EXIT(&->a_rwlock); 913 } 914 } 915 916 /* 917 * Set default memory allocation policy for segment 918 * 919 * Always set policy for private memory at least for initialization 920 * even if this is a shared memory segment 921 */ 922 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size); 923 924 if (svd->type == MAP_SHARED) 925 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index, 926 svd->vp, svd->offset, seg->s_size); 927 928 if (use_rgn) { 929 ASSERT(!trok); 930 ASSERT(svd->amp == NULL); 931 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base, 932 seg->s_size, (void *)svd->vp, svd->offset, svd->prot, 933 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback, 934 HAT_REGION_TEXT); 935 } 936 937 ASSERT(!trok || !(svd->prot & PROT_WRITE)); 938 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF; 939 940 return (0); 941 } 942 943 /* 944 * Concatenate two existing segments, if possible. 945 * Return 0 on success, -1 if two segments are not compatible 946 * or -2 on memory allocation failure. 947 * If amp_cat == 1 then try and concat segments with anon maps 948 */ 949 static int 950 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat) 951 { 952 struct segvn_data *svd1 = seg1->s_data; 953 struct segvn_data *svd2 = seg2->s_data; 954 struct anon_map *amp1 = svd1->amp; 955 struct anon_map *amp2 = svd2->amp; 956 struct vpage *vpage1 = svd1->vpage; 957 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL; 958 size_t size, nvpsize; 959 pgcnt_t npages1, npages2; 960 961 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as); 962 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 963 ASSERT(seg1->s_ops == seg2->s_ops); 964 965 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) || 966 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 967 return (-1); 968 } 969 970 /* both segments exist, try to merge them */ 971 #define incompat(x) (svd1->x != svd2->x) 972 if (incompat(vp) || incompat(maxprot) || 973 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) || 974 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) || 975 incompat(type) || incompat(cred) || incompat(flags) || 976 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) || 977 (svd2->softlockcnt > 0)) 978 return (-1); 979 #undef incompat 980 981 /* 982 * vp == NULL implies zfod, offset doesn't matter 983 */ 984 if (svd1->vp != NULL && 985 svd1->offset + seg1->s_size != svd2->offset) { 986 return (-1); 987 } 988 989 /* 990 * Don't concatenate if either segment uses text replication. 991 */ 992 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) { 993 return (-1); 994 } 995 996 /* 997 * Fail early if we're not supposed to concatenate 998 * segments with non NULL amp. 999 */ 1000 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) { 1001 return (-1); 1002 } 1003 1004 if (svd1->vp == NULL && svd1->type == MAP_SHARED) { 1005 if (amp1 != amp2) { 1006 return (-1); 1007 } 1008 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) != 1009 svd2->anon_index) { 1010 return (-1); 1011 } 1012 ASSERT(amp1 == NULL || amp1->refcnt >= 2); 1013 } 1014 1015 /* 1016 * If either seg has vpages, create a new merged vpage array. 1017 */ 1018 if (vpage1 != NULL || vpage2 != NULL) { 1019 struct vpage *vp; 1020 1021 npages1 = seg_pages(seg1); 1022 npages2 = seg_pages(seg2); 1023 nvpsize = vpgtob(npages1 + npages2); 1024 1025 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) { 1026 return (-2); 1027 } 1028 1029 if (vpage1 != NULL) { 1030 bcopy(vpage1, nvpage, vpgtob(npages1)); 1031 } else { 1032 for (vp = nvpage; vp < nvpage + npages1; vp++) { 1033 VPP_SETPROT(vp, svd1->prot); 1034 VPP_SETADVICE(vp, svd1->advice); 1035 } 1036 } 1037 1038 if (vpage2 != NULL) { 1039 bcopy(vpage2, nvpage + npages1, vpgtob(npages2)); 1040 } else { 1041 for (vp = nvpage + npages1; 1042 vp < nvpage + npages1 + npages2; vp++) { 1043 VPP_SETPROT(vp, svd2->prot); 1044 VPP_SETADVICE(vp, svd2->advice); 1045 } 1046 } 1047 } 1048 1049 /* 1050 * If either segment has private pages, create a new merged anon 1051 * array. If mergeing shared anon segments just decrement anon map's 1052 * refcnt. 1053 */ 1054 if (amp1 != NULL && svd1->type == MAP_SHARED) { 1055 ASSERT(amp1 == amp2 && svd1->vp == NULL); 1056 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1057 ASSERT(amp1->refcnt >= 2); 1058 amp1->refcnt--; 1059 ANON_LOCK_EXIT(&1->a_rwlock); 1060 svd2->amp = NULL; 1061 } else if (amp1 != NULL || amp2 != NULL) { 1062 struct anon_hdr *nahp; 1063 struct anon_map *namp = NULL; 1064 size_t asize; 1065 1066 ASSERT(svd1->type == MAP_PRIVATE); 1067 1068 asize = seg1->s_size + seg2->s_size; 1069 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) { 1070 if (nvpage != NULL) { 1071 kmem_free(nvpage, nvpsize); 1072 } 1073 return (-2); 1074 } 1075 if (amp1 != NULL) { 1076 /* 1077 * XXX anon rwlock is not really needed because 1078 * this is a private segment and we are writers. 1079 */ 1080 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1081 ASSERT(amp1->refcnt == 1); 1082 if (anon_copy_ptr(amp1->ahp, svd1->anon_index, 1083 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) { 1084 anon_release(nahp, btop(asize)); 1085 ANON_LOCK_EXIT(&1->a_rwlock); 1086 if (nvpage != NULL) { 1087 kmem_free(nvpage, nvpsize); 1088 } 1089 return (-2); 1090 } 1091 } 1092 if (amp2 != NULL) { 1093 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1094 ASSERT(amp2->refcnt == 1); 1095 if (anon_copy_ptr(amp2->ahp, svd2->anon_index, 1096 nahp, btop(seg1->s_size), btop(seg2->s_size), 1097 ANON_NOSLEEP)) { 1098 anon_release(nahp, btop(asize)); 1099 ANON_LOCK_EXIT(&2->a_rwlock); 1100 if (amp1 != NULL) { 1101 ANON_LOCK_EXIT(&1->a_rwlock); 1102 } 1103 if (nvpage != NULL) { 1104 kmem_free(nvpage, nvpsize); 1105 } 1106 return (-2); 1107 } 1108 } 1109 if (amp1 != NULL) { 1110 namp = amp1; 1111 anon_release(amp1->ahp, btop(amp1->size)); 1112 } 1113 if (amp2 != NULL) { 1114 if (namp == NULL) { 1115 ASSERT(amp1 == NULL); 1116 namp = amp2; 1117 anon_release(amp2->ahp, btop(amp2->size)); 1118 } else { 1119 amp2->refcnt--; 1120 ANON_LOCK_EXIT(&2->a_rwlock); 1121 anonmap_free(amp2); 1122 } 1123 svd2->amp = NULL; /* needed for seg_free */ 1124 } 1125 namp->ahp = nahp; 1126 namp->size = asize; 1127 svd1->amp = namp; 1128 svd1->anon_index = 0; 1129 ANON_LOCK_EXIT(&namp->a_rwlock); 1130 } 1131 /* 1132 * Now free the old vpage structures. 1133 */ 1134 if (nvpage != NULL) { 1135 if (vpage1 != NULL) { 1136 kmem_free(vpage1, vpgtob(npages1)); 1137 } 1138 if (vpage2 != NULL) { 1139 svd2->vpage = NULL; 1140 kmem_free(vpage2, vpgtob(npages2)); 1141 } 1142 if (svd2->pageprot) { 1143 svd1->pageprot = 1; 1144 } 1145 if (svd2->pageadvice) { 1146 svd1->pageadvice = 1; 1147 } 1148 svd1->vpage = nvpage; 1149 } 1150 1151 /* all looks ok, merge segments */ 1152 svd1->swresv += svd2->swresv; 1153 svd2->swresv = 0; /* so seg_free doesn't release swap space */ 1154 size = seg2->s_size; 1155 seg_free(seg2); 1156 seg1->s_size += size; 1157 return (0); 1158 } 1159 1160 /* 1161 * Extend the previous segment (seg1) to include the 1162 * new segment (seg2 + a), if possible. 1163 * Return 0 on success. 1164 */ 1165 static int 1166 segvn_extend_prev(seg1, seg2, a, swresv) 1167 struct seg *seg1, *seg2; 1168 struct segvn_crargs *a; 1169 size_t swresv; 1170 { 1171 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data; 1172 size_t size; 1173 struct anon_map *amp1; 1174 struct vpage *new_vpage; 1175 1176 /* 1177 * We don't need any segment level locks for "segvn" data 1178 * since the address space is "write" locked. 1179 */ 1180 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 1181 1182 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) { 1183 return (-1); 1184 } 1185 1186 /* second segment is new, try to extend first */ 1187 /* XXX - should also check cred */ 1188 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot || 1189 (!svd1->pageprot && (svd1->prot != a->prot)) || 1190 svd1->type != a->type || svd1->flags != a->flags || 1191 seg1->s_szc != a->szc) 1192 return (-1); 1193 1194 /* vp == NULL implies zfod, offset doesn't matter */ 1195 if (svd1->vp != NULL && 1196 svd1->offset + seg1->s_size != (a->offset & PAGEMASK)) 1197 return (-1); 1198 1199 if (svd1->tr_state != SEGVN_TR_OFF) { 1200 return (-1); 1201 } 1202 1203 amp1 = svd1->amp; 1204 if (amp1) { 1205 pgcnt_t newpgs; 1206 1207 /* 1208 * Segment has private pages, can data structures 1209 * be expanded? 1210 * 1211 * Acquire the anon_map lock to prevent it from changing, 1212 * if it is shared. This ensures that the anon_map 1213 * will not change while a thread which has a read/write 1214 * lock on an address space references it. 1215 * XXX - Don't need the anon_map lock at all if "refcnt" 1216 * is 1. 1217 * 1218 * Can't grow a MAP_SHARED segment with an anonmap because 1219 * there may be existing anon slots where we want to extend 1220 * the segment and we wouldn't know what to do with them 1221 * (e.g., for tmpfs right thing is to just leave them there, 1222 * for /dev/zero they should be cleared out). 1223 */ 1224 if (svd1->type == MAP_SHARED) 1225 return (-1); 1226 1227 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1228 if (amp1->refcnt > 1) { 1229 ANON_LOCK_EXIT(&1->a_rwlock); 1230 return (-1); 1231 } 1232 newpgs = anon_grow(amp1->ahp, &svd1->anon_index, 1233 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP); 1234 1235 if (newpgs == 0) { 1236 ANON_LOCK_EXIT(&1->a_rwlock); 1237 return (-1); 1238 } 1239 amp1->size = ptob(newpgs); 1240 ANON_LOCK_EXIT(&1->a_rwlock); 1241 } 1242 if (svd1->vpage != NULL) { 1243 struct vpage *vp, *evp; 1244 new_vpage = 1245 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1246 KM_NOSLEEP); 1247 if (new_vpage == NULL) 1248 return (-1); 1249 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1))); 1250 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1))); 1251 svd1->vpage = new_vpage; 1252 1253 vp = new_vpage + seg_pages(seg1); 1254 evp = vp + seg_pages(seg2); 1255 for (; vp < evp; vp++) 1256 VPP_SETPROT(vp, a->prot); 1257 } 1258 size = seg2->s_size; 1259 seg_free(seg2); 1260 seg1->s_size += size; 1261 svd1->swresv += swresv; 1262 if (svd1->pageprot && (a->prot & PROT_WRITE) && 1263 svd1->type == MAP_SHARED && svd1->vp != NULL && 1264 (svd1->vp->v_flag & VVMEXEC)) { 1265 ASSERT(vn_is_mapped(svd1->vp, V_WRITE)); 1266 segvn_inval_trcache(svd1->vp); 1267 } 1268 return (0); 1269 } 1270 1271 /* 1272 * Extend the next segment (seg2) to include the 1273 * new segment (seg1 + a), if possible. 1274 * Return 0 on success. 1275 */ 1276 static int 1277 segvn_extend_next( 1278 struct seg *seg1, 1279 struct seg *seg2, 1280 struct segvn_crargs *a, 1281 size_t swresv) 1282 { 1283 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data; 1284 size_t size; 1285 struct anon_map *amp2; 1286 struct vpage *new_vpage; 1287 1288 /* 1289 * We don't need any segment level locks for "segvn" data 1290 * since the address space is "write" locked. 1291 */ 1292 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock)); 1293 1294 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 1295 return (-1); 1296 } 1297 1298 /* first segment is new, try to extend second */ 1299 /* XXX - should also check cred */ 1300 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot || 1301 (!svd2->pageprot && (svd2->prot != a->prot)) || 1302 svd2->type != a->type || svd2->flags != a->flags || 1303 seg2->s_szc != a->szc) 1304 return (-1); 1305 /* vp == NULL implies zfod, offset doesn't matter */ 1306 if (svd2->vp != NULL && 1307 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset) 1308 return (-1); 1309 1310 if (svd2->tr_state != SEGVN_TR_OFF) { 1311 return (-1); 1312 } 1313 1314 amp2 = svd2->amp; 1315 if (amp2) { 1316 pgcnt_t newpgs; 1317 1318 /* 1319 * Segment has private pages, can data structures 1320 * be expanded? 1321 * 1322 * Acquire the anon_map lock to prevent it from changing, 1323 * if it is shared. This ensures that the anon_map 1324 * will not change while a thread which has a read/write 1325 * lock on an address space references it. 1326 * 1327 * XXX - Don't need the anon_map lock at all if "refcnt" 1328 * is 1. 1329 */ 1330 if (svd2->type == MAP_SHARED) 1331 return (-1); 1332 1333 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1334 if (amp2->refcnt > 1) { 1335 ANON_LOCK_EXIT(&2->a_rwlock); 1336 return (-1); 1337 } 1338 newpgs = anon_grow(amp2->ahp, &svd2->anon_index, 1339 btop(seg2->s_size), btop(seg1->s_size), 1340 ANON_NOSLEEP | ANON_GROWDOWN); 1341 1342 if (newpgs == 0) { 1343 ANON_LOCK_EXIT(&2->a_rwlock); 1344 return (-1); 1345 } 1346 amp2->size = ptob(newpgs); 1347 ANON_LOCK_EXIT(&2->a_rwlock); 1348 } 1349 if (svd2->vpage != NULL) { 1350 struct vpage *vp, *evp; 1351 new_vpage = 1352 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1353 KM_NOSLEEP); 1354 if (new_vpage == NULL) { 1355 /* Not merging segments so adjust anon_index back */ 1356 if (amp2) 1357 svd2->anon_index += seg_pages(seg1); 1358 return (-1); 1359 } 1360 bcopy(svd2->vpage, new_vpage + seg_pages(seg1), 1361 vpgtob(seg_pages(seg2))); 1362 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2))); 1363 svd2->vpage = new_vpage; 1364 1365 vp = new_vpage; 1366 evp = vp + seg_pages(seg1); 1367 for (; vp < evp; vp++) 1368 VPP_SETPROT(vp, a->prot); 1369 } 1370 size = seg1->s_size; 1371 seg_free(seg1); 1372 seg2->s_size += size; 1373 seg2->s_base -= size; 1374 svd2->offset -= size; 1375 svd2->swresv += swresv; 1376 if (svd2->pageprot && (a->prot & PROT_WRITE) && 1377 svd2->type == MAP_SHARED && svd2->vp != NULL && 1378 (svd2->vp->v_flag & VVMEXEC)) { 1379 ASSERT(vn_is_mapped(svd2->vp, V_WRITE)); 1380 segvn_inval_trcache(svd2->vp); 1381 } 1382 return (0); 1383 } 1384 1385 static int 1386 segvn_dup(struct seg *seg, struct seg *newseg) 1387 { 1388 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1389 struct segvn_data *newsvd; 1390 pgcnt_t npages = seg_pages(seg); 1391 int error = 0; 1392 uint_t prot; 1393 size_t len; 1394 struct anon_map *amp; 1395 1396 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1397 1398 /* 1399 * If segment has anon reserved, reserve more for the new seg. 1400 * For a MAP_NORESERVE segment swresv will be a count of all the 1401 * allocated anon slots; thus we reserve for the child as many slots 1402 * as the parent has allocated. This semantic prevents the child or 1403 * parent from dieing during a copy-on-write fault caused by trying 1404 * to write a shared pre-existing anon page. 1405 */ 1406 if ((len = svd->swresv) != 0) { 1407 if (anon_resv(svd->swresv) == 0) 1408 return (ENOMEM); 1409 1410 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 1411 seg, len, 0); 1412 } 1413 1414 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 1415 1416 newseg->s_ops = &segvn_ops; 1417 newseg->s_data = (void *)newsvd; 1418 newseg->s_szc = seg->s_szc; 1419 1420 newsvd->seg = newseg; 1421 if ((newsvd->vp = svd->vp) != NULL) { 1422 VN_HOLD(svd->vp); 1423 if (svd->type == MAP_SHARED) 1424 lgrp_shm_policy_init(NULL, svd->vp); 1425 } 1426 newsvd->offset = svd->offset; 1427 newsvd->prot = svd->prot; 1428 newsvd->maxprot = svd->maxprot; 1429 newsvd->pageprot = svd->pageprot; 1430 newsvd->type = svd->type; 1431 newsvd->cred = svd->cred; 1432 crhold(newsvd->cred); 1433 newsvd->advice = svd->advice; 1434 newsvd->pageadvice = svd->pageadvice; 1435 newsvd->swresv = svd->swresv; 1436 newsvd->flags = svd->flags; 1437 newsvd->softlockcnt = 0; 1438 newsvd->policy_info = svd->policy_info; 1439 newsvd->rcookie = HAT_INVALID_REGION_COOKIE; 1440 1441 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) { 1442 /* 1443 * Not attaching to a shared anon object. 1444 */ 1445 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) || 1446 svd->tr_state == SEGVN_TR_OFF); 1447 if (svd->tr_state == SEGVN_TR_ON) { 1448 ASSERT(newsvd->vp != NULL && amp != NULL); 1449 newsvd->tr_state = SEGVN_TR_INIT; 1450 } else { 1451 newsvd->tr_state = svd->tr_state; 1452 } 1453 newsvd->amp = NULL; 1454 newsvd->anon_index = 0; 1455 } else { 1456 /* regions for now are only used on pure vnode segments */ 1457 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 1458 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1459 newsvd->tr_state = SEGVN_TR_OFF; 1460 if (svd->type == MAP_SHARED) { 1461 newsvd->amp = amp; 1462 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1463 amp->refcnt++; 1464 ANON_LOCK_EXIT(&->a_rwlock); 1465 newsvd->anon_index = svd->anon_index; 1466 } else { 1467 int reclaim = 1; 1468 1469 /* 1470 * Allocate and initialize new anon_map structure. 1471 */ 1472 newsvd->amp = anonmap_alloc(newseg->s_size, 0, 1473 ANON_SLEEP); 1474 newsvd->amp->a_szc = newseg->s_szc; 1475 newsvd->anon_index = 0; 1476 1477 /* 1478 * We don't have to acquire the anon_map lock 1479 * for the new segment (since it belongs to an 1480 * address space that is still not associated 1481 * with any process), or the segment in the old 1482 * address space (since all threads in it 1483 * are stopped while duplicating the address space). 1484 */ 1485 1486 /* 1487 * The goal of the following code is to make sure that 1488 * softlocked pages do not end up as copy on write 1489 * pages. This would cause problems where one 1490 * thread writes to a page that is COW and a different 1491 * thread in the same process has softlocked it. The 1492 * softlock lock would move away from this process 1493 * because the write would cause this process to get 1494 * a copy (without the softlock). 1495 * 1496 * The strategy here is to just break the 1497 * sharing on pages that could possibly be 1498 * softlocked. 1499 */ 1500 retry: 1501 if (svd->softlockcnt) { 1502 struct anon *ap, *newap; 1503 size_t i; 1504 uint_t vpprot; 1505 page_t *anon_pl[1+1], *pp; 1506 caddr_t addr; 1507 ulong_t old_idx = svd->anon_index; 1508 ulong_t new_idx = 0; 1509 1510 /* 1511 * The softlock count might be non zero 1512 * because some pages are still stuck in the 1513 * cache for lazy reclaim. Flush the cache 1514 * now. This should drop the count to zero. 1515 * [or there is really I/O going on to these 1516 * pages]. Note, we have the writers lock so 1517 * nothing gets inserted during the flush. 1518 */ 1519 if (reclaim == 1) { 1520 segvn_purge(seg); 1521 reclaim = 0; 1522 goto retry; 1523 } 1524 i = btopr(seg->s_size); 1525 addr = seg->s_base; 1526 /* 1527 * XXX break cow sharing using PAGESIZE 1528 * pages. They will be relocated into larger 1529 * pages at fault time. 1530 */ 1531 while (i-- > 0) { 1532 if (ap = anon_get_ptr(amp->ahp, 1533 old_idx)) { 1534 error = anon_getpage(&ap, 1535 &vpprot, anon_pl, PAGESIZE, 1536 seg, addr, S_READ, 1537 svd->cred); 1538 if (error) { 1539 newsvd->vpage = NULL; 1540 goto out; 1541 } 1542 /* 1543 * prot need not be computed 1544 * below 'cause anon_private is 1545 * going to ignore it anyway 1546 * as child doesn't inherit 1547 * pagelock from parent. 1548 */ 1549 prot = svd->pageprot ? 1550 VPP_PROT( 1551 &svd->vpage[ 1552 seg_page(seg, addr)]) 1553 : svd->prot; 1554 pp = anon_private(&newap, 1555 newseg, addr, prot, 1556 anon_pl[0], 0, 1557 newsvd->cred); 1558 if (pp == NULL) { 1559 /* no mem abort */ 1560 newsvd->vpage = NULL; 1561 error = ENOMEM; 1562 goto out; 1563 } 1564 (void) anon_set_ptr( 1565 newsvd->amp->ahp, new_idx, 1566 newap, ANON_SLEEP); 1567 page_unlock(pp); 1568 } 1569 addr += PAGESIZE; 1570 old_idx++; 1571 new_idx++; 1572 } 1573 } else { /* common case */ 1574 if (seg->s_szc != 0) { 1575 /* 1576 * If at least one of anon slots of a 1577 * large page exists then make sure 1578 * all anon slots of a large page 1579 * exist to avoid partial cow sharing 1580 * of a large page in the future. 1581 */ 1582 anon_dup_fill_holes(amp->ahp, 1583 svd->anon_index, newsvd->amp->ahp, 1584 0, seg->s_size, seg->s_szc, 1585 svd->vp != NULL); 1586 } else { 1587 anon_dup(amp->ahp, svd->anon_index, 1588 newsvd->amp->ahp, 0, seg->s_size); 1589 } 1590 1591 hat_clrattr(seg->s_as->a_hat, seg->s_base, 1592 seg->s_size, PROT_WRITE); 1593 } 1594 } 1595 } 1596 /* 1597 * If necessary, create a vpage structure for the new segment. 1598 * Do not copy any page lock indications. 1599 */ 1600 if (svd->vpage != NULL) { 1601 uint_t i; 1602 struct vpage *ovp = svd->vpage; 1603 struct vpage *nvp; 1604 1605 nvp = newsvd->vpage = 1606 kmem_alloc(vpgtob(npages), KM_SLEEP); 1607 for (i = 0; i < npages; i++) { 1608 *nvp = *ovp++; 1609 VPP_CLRPPLOCK(nvp++); 1610 } 1611 } else 1612 newsvd->vpage = NULL; 1613 1614 /* Inform the vnode of the new mapping */ 1615 if (newsvd->vp != NULL) { 1616 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset, 1617 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot, 1618 newsvd->maxprot, newsvd->type, newsvd->cred); 1619 } 1620 out: 1621 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1622 ASSERT(newsvd->amp == NULL); 1623 ASSERT(newsvd->tr_state == SEGVN_TR_OFF); 1624 newsvd->rcookie = svd->rcookie; 1625 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie); 1626 } 1627 return (error); 1628 } 1629 1630 1631 /* 1632 * callback function used by segvn_unmap to invoke free_vp_pages() for only 1633 * those pages actually processed by the HAT 1634 */ 1635 extern int free_pages; 1636 1637 static void 1638 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 1639 size_t r_size, void *r_obj, u_offset_t r_objoff) 1640 { 1641 u_offset_t off; 1642 size_t len; 1643 vnode_t *vp = (vnode_t *)r_obj; 1644 1645 ASSERT(eaddr > saddr); 1646 ASSERT(saddr >= r_saddr); 1647 ASSERT(saddr < r_saddr + r_size); 1648 ASSERT(eaddr > r_saddr); 1649 ASSERT(eaddr <= r_saddr + r_size); 1650 ASSERT(vp != NULL); 1651 1652 if (!free_pages) { 1653 return; 1654 } 1655 1656 len = eaddr - saddr; 1657 off = (saddr - r_saddr) + r_objoff; 1658 free_vp_pages(vp, off, len); 1659 } 1660 1661 static void 1662 segvn_hat_unload_callback(hat_callback_t *cb) 1663 { 1664 struct seg *seg = cb->hcb_data; 1665 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1666 size_t len; 1667 u_offset_t off; 1668 1669 ASSERT(svd->vp != NULL); 1670 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr); 1671 ASSERT(cb->hcb_start_addr >= seg->s_base); 1672 1673 len = cb->hcb_end_addr - cb->hcb_start_addr; 1674 off = cb->hcb_start_addr - seg->s_base; 1675 free_vp_pages(svd->vp, svd->offset + off, len); 1676 } 1677 1678 static int 1679 segvn_unmap(struct seg *seg, caddr_t addr, size_t len) 1680 { 1681 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1682 struct segvn_data *nsvd; 1683 struct seg *nseg; 1684 struct anon_map *amp; 1685 pgcnt_t opages; /* old segment size in pages */ 1686 pgcnt_t npages; /* new segment size in pages */ 1687 pgcnt_t dpages; /* pages being deleted (unmapped) */ 1688 hat_callback_t callback; /* used for free_vp_pages() */ 1689 hat_callback_t *cbp = NULL; 1690 caddr_t nbase; 1691 size_t nsize; 1692 size_t oswresv; 1693 int reclaim = 1; 1694 1695 /* 1696 * We don't need any segment level locks for "segvn" data 1697 * since the address space is "write" locked. 1698 */ 1699 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1700 1701 /* 1702 * Fail the unmap if pages are SOFTLOCKed through this mapping. 1703 * softlockcnt is protected from change by the as write lock. 1704 */ 1705 retry: 1706 if (svd->softlockcnt > 0) { 1707 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1708 /* 1709 * since we do have the writers lock nobody can fill 1710 * the cache during the purge. The flush either succeeds 1711 * or we still have pending I/Os. 1712 */ 1713 if (reclaim == 1) { 1714 segvn_purge(seg); 1715 reclaim = 0; 1716 goto retry; 1717 } 1718 return (EAGAIN); 1719 } 1720 1721 /* 1722 * Check for bad sizes 1723 */ 1724 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size || 1725 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) { 1726 panic("segvn_unmap"); 1727 /*NOTREACHED*/ 1728 } 1729 1730 if (seg->s_szc != 0) { 1731 size_t pgsz = page_get_pagesize(seg->s_szc); 1732 int err; 1733 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 1734 ASSERT(seg->s_base != addr || seg->s_size != len); 1735 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1736 ASSERT(svd->amp == NULL); 1737 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1738 hat_leave_region(seg->s_as->a_hat, 1739 svd->rcookie, HAT_REGION_TEXT); 1740 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1741 /* 1742 * could pass a flag to segvn_demote_range() 1743 * below to tell it not to do any unloads but 1744 * this case is rare enough to not bother for 1745 * now. 1746 */ 1747 } else if (svd->tr_state == SEGVN_TR_INIT) { 1748 svd->tr_state = SEGVN_TR_OFF; 1749 } else if (svd->tr_state == SEGVN_TR_ON) { 1750 ASSERT(svd->amp != NULL); 1751 segvn_textunrepl(seg, 1); 1752 ASSERT(svd->amp == NULL); 1753 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1754 } 1755 VM_STAT_ADD(segvnvmstats.demoterange[0]); 1756 err = segvn_demote_range(seg, addr, len, SDR_END, 0); 1757 if (err == 0) { 1758 return (IE_RETRY); 1759 } 1760 return (err); 1761 } 1762 } 1763 1764 /* Inform the vnode of the unmapping. */ 1765 if (svd->vp) { 1766 int error; 1767 1768 error = VOP_DELMAP(svd->vp, 1769 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base), 1770 seg->s_as, addr, len, svd->prot, svd->maxprot, 1771 svd->type, svd->cred); 1772 1773 if (error == EAGAIN) 1774 return (error); 1775 } 1776 1777 /* 1778 * Remove any page locks set through this mapping. 1779 * If text replication is not off no page locks could have been 1780 * established via this mapping. 1781 */ 1782 if (svd->tr_state == SEGVN_TR_OFF) { 1783 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0); 1784 } 1785 1786 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1787 ASSERT(svd->amp == NULL); 1788 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1789 ASSERT(svd->type == MAP_PRIVATE); 1790 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 1791 HAT_REGION_TEXT); 1792 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1793 } else if (svd->tr_state == SEGVN_TR_ON) { 1794 ASSERT(svd->amp != NULL); 1795 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE)); 1796 segvn_textunrepl(seg, 1); 1797 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 1798 } else { 1799 if (svd->tr_state != SEGVN_TR_OFF) { 1800 ASSERT(svd->tr_state == SEGVN_TR_INIT); 1801 svd->tr_state = SEGVN_TR_OFF; 1802 } 1803 /* 1804 * Unload any hardware translations in the range to be taken 1805 * out. Use a callback to invoke free_vp_pages() effectively. 1806 */ 1807 if (svd->vp != NULL && free_pages != 0) { 1808 callback.hcb_data = seg; 1809 callback.hcb_function = segvn_hat_unload_callback; 1810 cbp = &callback; 1811 } 1812 hat_unload_callback(seg->s_as->a_hat, addr, len, 1813 HAT_UNLOAD_UNMAP, cbp); 1814 1815 if (svd->type == MAP_SHARED && svd->vp != NULL && 1816 (svd->vp->v_flag & VVMEXEC) && 1817 ((svd->prot & PROT_WRITE) || svd->pageprot)) { 1818 segvn_inval_trcache(svd->vp); 1819 } 1820 } 1821 1822 /* 1823 * Check for entire segment 1824 */ 1825 if (addr == seg->s_base && len == seg->s_size) { 1826 seg_free(seg); 1827 return (0); 1828 } 1829 1830 opages = seg_pages(seg); 1831 dpages = btop(len); 1832 npages = opages - dpages; 1833 amp = svd->amp; 1834 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc); 1835 1836 /* 1837 * Check for beginning of segment 1838 */ 1839 if (addr == seg->s_base) { 1840 if (svd->vpage != NULL) { 1841 size_t nbytes; 1842 struct vpage *ovpage; 1843 1844 ovpage = svd->vpage; /* keep pointer to vpage */ 1845 1846 nbytes = vpgtob(npages); 1847 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 1848 bcopy(&ovpage[dpages], svd->vpage, nbytes); 1849 1850 /* free up old vpage */ 1851 kmem_free(ovpage, vpgtob(opages)); 1852 } 1853 if (amp != NULL) { 1854 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1855 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 1856 /* 1857 * Free up now unused parts of anon_map array. 1858 */ 1859 if (amp->a_szc == seg->s_szc) { 1860 if (seg->s_szc != 0) { 1861 anon_free_pages(amp->ahp, 1862 svd->anon_index, len, 1863 seg->s_szc); 1864 } else { 1865 anon_free(amp->ahp, 1866 svd->anon_index, 1867 len); 1868 } 1869 } else { 1870 ASSERT(svd->type == MAP_SHARED); 1871 ASSERT(amp->a_szc > seg->s_szc); 1872 anon_shmap_free_pages(amp, 1873 svd->anon_index, len); 1874 } 1875 1876 /* 1877 * Unreserve swap space for the 1878 * unmapped chunk of this segment in 1879 * case it's MAP_SHARED 1880 */ 1881 if (svd->type == MAP_SHARED) { 1882 anon_unresv(len); 1883 amp->swresv -= len; 1884 } 1885 } 1886 ANON_LOCK_EXIT(&->a_rwlock); 1887 svd->anon_index += dpages; 1888 } 1889 if (svd->vp != NULL) 1890 svd->offset += len; 1891 1892 if (svd->swresv) { 1893 if (svd->flags & MAP_NORESERVE) { 1894 ASSERT(amp); 1895 oswresv = svd->swresv; 1896 1897 svd->swresv = ptob(anon_pages(amp->ahp, 1898 svd->anon_index, npages)); 1899 anon_unresv(oswresv - svd->swresv); 1900 } else { 1901 anon_unresv(len); 1902 svd->swresv -= len; 1903 } 1904 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 1905 seg, len, 0); 1906 } 1907 1908 seg->s_base += len; 1909 seg->s_size -= len; 1910 return (0); 1911 } 1912 1913 /* 1914 * Check for end of segment 1915 */ 1916 if (addr + len == seg->s_base + seg->s_size) { 1917 if (svd->vpage != NULL) { 1918 size_t nbytes; 1919 struct vpage *ovpage; 1920 1921 ovpage = svd->vpage; /* keep pointer to vpage */ 1922 1923 nbytes = vpgtob(npages); 1924 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 1925 bcopy(ovpage, svd->vpage, nbytes); 1926 1927 /* free up old vpage */ 1928 kmem_free(ovpage, vpgtob(opages)); 1929 1930 } 1931 if (amp != NULL) { 1932 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1933 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 1934 /* 1935 * Free up now unused parts of anon_map array. 1936 */ 1937 ulong_t an_idx = svd->anon_index + npages; 1938 if (amp->a_szc == seg->s_szc) { 1939 if (seg->s_szc != 0) { 1940 anon_free_pages(amp->ahp, 1941 an_idx, len, 1942 seg->s_szc); 1943 } else { 1944 anon_free(amp->ahp, an_idx, 1945 len); 1946 } 1947 } else { 1948 ASSERT(svd->type == MAP_SHARED); 1949 ASSERT(amp->a_szc > seg->s_szc); 1950 anon_shmap_free_pages(amp, 1951 an_idx, len); 1952 } 1953 1954 /* 1955 * Unreserve swap space for the 1956 * unmapped chunk of this segment in 1957 * case it's MAP_SHARED 1958 */ 1959 if (svd->type == MAP_SHARED) { 1960 anon_unresv(len); 1961 amp->swresv -= len; 1962 } 1963 } 1964 ANON_LOCK_EXIT(&->a_rwlock); 1965 } 1966 1967 if (svd->swresv) { 1968 if (svd->flags & MAP_NORESERVE) { 1969 ASSERT(amp); 1970 oswresv = svd->swresv; 1971 svd->swresv = ptob(anon_pages(amp->ahp, 1972 svd->anon_index, npages)); 1973 anon_unresv(oswresv - svd->swresv); 1974 } else { 1975 anon_unresv(len); 1976 svd->swresv -= len; 1977 } 1978 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 1979 "anon proc:%p %lu %u", seg, len, 0); 1980 } 1981 1982 seg->s_size -= len; 1983 return (0); 1984 } 1985 1986 /* 1987 * The section to go is in the middle of the segment, 1988 * have to make it into two segments. nseg is made for 1989 * the high end while seg is cut down at the low end. 1990 */ 1991 nbase = addr + len; /* new seg base */ 1992 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */ 1993 seg->s_size = addr - seg->s_base; /* shrink old seg */ 1994 nseg = seg_alloc(seg->s_as, nbase, nsize); 1995 if (nseg == NULL) { 1996 panic("segvn_unmap seg_alloc"); 1997 /*NOTREACHED*/ 1998 } 1999 nseg->s_ops = seg->s_ops; 2000 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 2001 nseg->s_data = (void *)nsvd; 2002 nseg->s_szc = seg->s_szc; 2003 *nsvd = *svd; 2004 nsvd->seg = nseg; 2005 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base); 2006 nsvd->swresv = 0; 2007 nsvd->softlockcnt = 0; 2008 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 2009 2010 if (svd->vp != NULL) { 2011 VN_HOLD(nsvd->vp); 2012 if (nsvd->type == MAP_SHARED) 2013 lgrp_shm_policy_init(NULL, nsvd->vp); 2014 } 2015 crhold(svd->cred); 2016 2017 if (svd->vpage == NULL) { 2018 nsvd->vpage = NULL; 2019 } else { 2020 /* need to split vpage into two arrays */ 2021 size_t nbytes; 2022 struct vpage *ovpage; 2023 2024 ovpage = svd->vpage; /* keep pointer to vpage */ 2025 2026 npages = seg_pages(seg); /* seg has shrunk */ 2027 nbytes = vpgtob(npages); 2028 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2029 2030 bcopy(ovpage, svd->vpage, nbytes); 2031 2032 npages = seg_pages(nseg); 2033 nbytes = vpgtob(npages); 2034 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2035 2036 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes); 2037 2038 /* free up old vpage */ 2039 kmem_free(ovpage, vpgtob(opages)); 2040 } 2041 2042 if (amp == NULL) { 2043 nsvd->amp = NULL; 2044 nsvd->anon_index = 0; 2045 } else { 2046 /* 2047 * Need to create a new anon map for the new segment. 2048 * We'll also allocate a new smaller array for the old 2049 * smaller segment to save space. 2050 */ 2051 opages = btop((uintptr_t)(addr - seg->s_base)); 2052 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2053 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2054 /* 2055 * Free up now unused parts of anon_map array. 2056 */ 2057 ulong_t an_idx = svd->anon_index + opages; 2058 if (amp->a_szc == seg->s_szc) { 2059 if (seg->s_szc != 0) { 2060 anon_free_pages(amp->ahp, an_idx, len, 2061 seg->s_szc); 2062 } else { 2063 anon_free(amp->ahp, an_idx, 2064 len); 2065 } 2066 } else { 2067 ASSERT(svd->type == MAP_SHARED); 2068 ASSERT(amp->a_szc > seg->s_szc); 2069 anon_shmap_free_pages(amp, an_idx, len); 2070 } 2071 2072 /* 2073 * Unreserve swap space for the 2074 * unmapped chunk of this segment in 2075 * case it's MAP_SHARED 2076 */ 2077 if (svd->type == MAP_SHARED) { 2078 anon_unresv(len); 2079 amp->swresv -= len; 2080 } 2081 } 2082 nsvd->anon_index = svd->anon_index + 2083 btop((uintptr_t)(nseg->s_base - seg->s_base)); 2084 if (svd->type == MAP_SHARED) { 2085 amp->refcnt++; 2086 nsvd->amp = amp; 2087 } else { 2088 struct anon_map *namp; 2089 struct anon_hdr *nahp; 2090 2091 ASSERT(svd->type == MAP_PRIVATE); 2092 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 2093 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 2094 namp->a_szc = seg->s_szc; 2095 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp, 2096 0, btop(seg->s_size), ANON_SLEEP); 2097 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index, 2098 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 2099 anon_release(amp->ahp, btop(amp->size)); 2100 svd->anon_index = 0; 2101 nsvd->anon_index = 0; 2102 amp->ahp = nahp; 2103 amp->size = seg->s_size; 2104 nsvd->amp = namp; 2105 } 2106 ANON_LOCK_EXIT(&->a_rwlock); 2107 } 2108 if (svd->swresv) { 2109 if (svd->flags & MAP_NORESERVE) { 2110 ASSERT(amp); 2111 oswresv = svd->swresv; 2112 svd->swresv = ptob(anon_pages(amp->ahp, 2113 svd->anon_index, btop(seg->s_size))); 2114 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 2115 nsvd->anon_index, btop(nseg->s_size))); 2116 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2117 anon_unresv(oswresv - (svd->swresv + nsvd->swresv)); 2118 } else { 2119 if (seg->s_size + nseg->s_size + len != svd->swresv) { 2120 panic("segvn_unmap: " 2121 "cannot split swap reservation"); 2122 /*NOTREACHED*/ 2123 } 2124 anon_unresv(len); 2125 svd->swresv = seg->s_size; 2126 nsvd->swresv = nseg->s_size; 2127 } 2128 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2129 seg, len, 0); 2130 } 2131 2132 return (0); /* I'm glad that's all over with! */ 2133 } 2134 2135 static void 2136 segvn_free(struct seg *seg) 2137 { 2138 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2139 pgcnt_t npages = seg_pages(seg); 2140 struct anon_map *amp; 2141 size_t len; 2142 2143 /* 2144 * We don't need any segment level locks for "segvn" data 2145 * since the address space is "write" locked. 2146 */ 2147 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2148 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2149 2150 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2151 2152 /* 2153 * Be sure to unlock pages. XXX Why do things get free'ed instead 2154 * of unmapped? XXX 2155 */ 2156 (void) segvn_lockop(seg, seg->s_base, seg->s_size, 2157 0, MC_UNLOCK, NULL, 0); 2158 2159 /* 2160 * Deallocate the vpage and anon pointers if necessary and possible. 2161 */ 2162 if (svd->vpage != NULL) { 2163 kmem_free(svd->vpage, vpgtob(npages)); 2164 svd->vpage = NULL; 2165 } 2166 if ((amp = svd->amp) != NULL) { 2167 /* 2168 * If there are no more references to this anon_map 2169 * structure, then deallocate the structure after freeing 2170 * up all the anon slot pointers that we can. 2171 */ 2172 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2173 ASSERT(amp->a_szc >= seg->s_szc); 2174 if (--amp->refcnt == 0) { 2175 if (svd->type == MAP_PRIVATE) { 2176 /* 2177 * Private - we only need to anon_free 2178 * the part that this segment refers to. 2179 */ 2180 if (seg->s_szc != 0) { 2181 anon_free_pages(amp->ahp, 2182 svd->anon_index, seg->s_size, 2183 seg->s_szc); 2184 } else { 2185 anon_free(amp->ahp, svd->anon_index, 2186 seg->s_size); 2187 } 2188 } else { 2189 /* 2190 * Shared - anon_free the entire 2191 * anon_map's worth of stuff and 2192 * release any swap reservation. 2193 */ 2194 if (amp->a_szc != 0) { 2195 anon_shmap_free_pages(amp, 0, 2196 amp->size); 2197 } else { 2198 anon_free(amp->ahp, 0, amp->size); 2199 } 2200 if ((len = amp->swresv) != 0) { 2201 anon_unresv(len); 2202 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2203 "anon proc:%p %lu %u", 2204 seg, len, 0); 2205 } 2206 } 2207 svd->amp = NULL; 2208 ANON_LOCK_EXIT(&->a_rwlock); 2209 anonmap_free(amp); 2210 } else if (svd->type == MAP_PRIVATE) { 2211 /* 2212 * We had a private mapping which still has 2213 * a held anon_map so just free up all the 2214 * anon slot pointers that we were using. 2215 */ 2216 if (seg->s_szc != 0) { 2217 anon_free_pages(amp->ahp, svd->anon_index, 2218 seg->s_size, seg->s_szc); 2219 } else { 2220 anon_free(amp->ahp, svd->anon_index, 2221 seg->s_size); 2222 } 2223 ANON_LOCK_EXIT(&->a_rwlock); 2224 } else { 2225 ANON_LOCK_EXIT(&->a_rwlock); 2226 } 2227 } 2228 2229 /* 2230 * Release swap reservation. 2231 */ 2232 if ((len = svd->swresv) != 0) { 2233 anon_unresv(svd->swresv); 2234 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2235 seg, len, 0); 2236 svd->swresv = 0; 2237 } 2238 /* 2239 * Release claim on vnode, credentials, and finally free the 2240 * private data. 2241 */ 2242 if (svd->vp != NULL) { 2243 if (svd->type == MAP_SHARED) 2244 lgrp_shm_policy_fini(NULL, svd->vp); 2245 VN_RELE(svd->vp); 2246 svd->vp = NULL; 2247 } 2248 crfree(svd->cred); 2249 svd->cred = NULL; 2250 2251 seg->s_data = NULL; 2252 kmem_cache_free(segvn_cache, svd); 2253 } 2254 2255 #ifdef DEBUG 2256 uint32_t segvn_slock_mtbf = 0; 2257 #endif 2258 2259 ulong_t segvn_lpglck_limit = 0; 2260 2261 /* 2262 * Support routines used by segvn_pagelock() and softlock faults for anonymous 2263 * pages to implement availrmem accounting in a way that makes sure the 2264 * same memory is accounted just once for all softlock/pagelock purposes. 2265 * This prevents a bug when availrmem is quickly incorrectly exausted from 2266 * several pagelocks to different parts of the same large page since each 2267 * pagelock has to decrement availrmem by the size of the entire large 2268 * page. Note those pages are not COW shared until softunlock/pageunlock so 2269 * we don't need to use cow style accounting here. We also need to make sure 2270 * the entire large page is accounted even if softlock range is less than the 2271 * entire large page because large anon pages can't be demoted when any of 2272 * constituent pages is locked. The caller calls this routine for every page_t 2273 * it locks. The very first page in the range may not be the root page of a 2274 * large page. For all other pages it's guranteed we are going to visit the 2275 * root of a particular large page before any other constituent page as we are 2276 * locking sequential pages belonging to the same anon map. So we do all the 2277 * locking when the root is encountered except for the very first page. Since 2278 * softlocking is not supported (except S_READ_NOCOW special case) for vmpss 2279 * segments and since vnode pages can be demoted without locking all 2280 * constituent pages vnode pages don't come here. Unlocking relies on the 2281 * fact that pagesize can't change whenever any of constituent large pages is 2282 * locked at least SE_SHARED. This allows unlocking code to find the right 2283 * root and decrement availrmem by the same amount it was incremented when the 2284 * page was locked. 2285 */ 2286 static int 2287 segvn_slock_anonpages(page_t *pp, int first) 2288 { 2289 pgcnt_t pages; 2290 pfn_t pfn; 2291 uchar_t szc = pp->p_szc; 2292 2293 ASSERT(PAGE_LOCKED(pp)); 2294 ASSERT(pp->p_vnode != NULL); 2295 ASSERT(IS_SWAPFSVP(pp->p_vnode)); 2296 2297 /* 2298 * pagesize won't change as long as any constituent page is locked. 2299 */ 2300 pages = page_get_pagecnt(pp->p_szc); 2301 pfn = page_pptonum(pp); 2302 2303 if (!first) { 2304 if (!IS_P2ALIGNED(pfn, pages)) { 2305 #ifdef DEBUG 2306 pp = &pp[-(spgcnt_t)(pfn & (pages - 1))]; 2307 pfn = page_pptonum(pp); 2308 ASSERT(IS_P2ALIGNED(pfn, pages)); 2309 ASSERT(pp->p_szc == szc); 2310 ASSERT(pp->p_vnode != NULL); 2311 ASSERT(IS_SWAPFSVP(pp->p_vnode)); 2312 ASSERT(pp->p_slckcnt != 0); 2313 #endif /* DEBUG */ 2314 return (1); 2315 } 2316 } else if (!IS_P2ALIGNED(pfn, pages)) { 2317 pp = &pp[-(spgcnt_t)(pfn & (pages - 1))]; 2318 #ifdef DEBUG 2319 pfn = page_pptonum(pp); 2320 ASSERT(IS_P2ALIGNED(pfn, pages)); 2321 ASSERT(pp->p_szc == szc); 2322 ASSERT(pp->p_vnode != NULL); 2323 ASSERT(IS_SWAPFSVP(pp->p_vnode)); 2324 #endif /* DEBUG */ 2325 } 2326 2327 #ifdef DEBUG 2328 if (segvn_slock_mtbf && !(gethrtime() % segvn_slock_mtbf)) { 2329 return (0); 2330 } 2331 #endif /* DEBUG */ 2332 2333 /* 2334 * pp is a root page. 2335 * We haven't locked this large page yet. 2336 */ 2337 page_struct_lock(pp); 2338 if (pp->p_slckcnt != 0) { 2339 if (pp->p_slckcnt < PAGE_SLOCK_MAXIMUM) { 2340 pp->p_slckcnt++; 2341 page_struct_unlock(pp); 2342 return (1); 2343 } 2344 page_struct_unlock(pp); 2345 segvn_lpglck_limit++; 2346 return (0); 2347 } 2348 mutex_enter(&freemem_lock); 2349 if (availrmem < tune.t_minarmem + pages) { 2350 mutex_exit(&freemem_lock); 2351 page_struct_unlock(pp); 2352 return (0); 2353 } 2354 pp->p_slckcnt++; 2355 availrmem -= pages; 2356 mutex_exit(&freemem_lock); 2357 page_struct_unlock(pp); 2358 return (1); 2359 } 2360 2361 static void 2362 segvn_sunlock_anonpages(page_t *pp, int first) 2363 { 2364 pgcnt_t pages; 2365 pfn_t pfn; 2366 2367 ASSERT(PAGE_LOCKED(pp)); 2368 ASSERT(pp->p_vnode != NULL); 2369 ASSERT(IS_SWAPFSVP(pp->p_vnode)); 2370 2371 /* 2372 * pagesize won't change as long as any constituent page is locked. 2373 */ 2374 pages = page_get_pagecnt(pp->p_szc); 2375 pfn = page_pptonum(pp); 2376 2377 if (!first) { 2378 if (!IS_P2ALIGNED(pfn, pages)) { 2379 return; 2380 } 2381 } else if (!IS_P2ALIGNED(pfn, pages)) { 2382 pp = &pp[-(spgcnt_t)(pfn & (pages - 1))]; 2383 #ifdef DEBUG 2384 pfn = page_pptonum(pp); 2385 ASSERT(IS_P2ALIGNED(pfn, pages)); 2386 #endif /* DEBUG */ 2387 } 2388 ASSERT(pp->p_vnode != NULL); 2389 ASSERT(IS_SWAPFSVP(pp->p_vnode)); 2390 ASSERT(pp->p_slckcnt != 0); 2391 page_struct_lock(pp); 2392 if (--pp->p_slckcnt == 0) { 2393 mutex_enter(&freemem_lock); 2394 availrmem += pages; 2395 mutex_exit(&freemem_lock); 2396 } 2397 page_struct_unlock(pp); 2398 } 2399 2400 /* 2401 * Do a F_SOFTUNLOCK call over the range requested. The range must have 2402 * already been F_SOFTLOCK'ed. 2403 * Caller must always match addr and len of a softunlock with a previous 2404 * softlock with exactly the same addr and len. 2405 */ 2406 static void 2407 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw) 2408 { 2409 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2410 page_t *pp; 2411 caddr_t adr; 2412 struct vnode *vp; 2413 u_offset_t offset; 2414 ulong_t anon_index; 2415 struct anon_map *amp; 2416 struct anon *ap = NULL; 2417 2418 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2419 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 2420 2421 if ((amp = svd->amp) != NULL) 2422 anon_index = svd->anon_index + seg_page(seg, addr); 2423 2424 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 2425 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2426 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie); 2427 } else { 2428 hat_unlock(seg->s_as->a_hat, addr, len); 2429 } 2430 for (adr = addr; adr < addr + len; adr += PAGESIZE) { 2431 if (amp != NULL) { 2432 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2433 if ((ap = anon_get_ptr(amp->ahp, anon_index++)) 2434 != NULL) { 2435 swap_xlate(ap, &vp, &offset); 2436 } else { 2437 vp = svd->vp; 2438 offset = svd->offset + 2439 (uintptr_t)(adr - seg->s_base); 2440 } 2441 ANON_LOCK_EXIT(&->a_rwlock); 2442 } else { 2443 vp = svd->vp; 2444 offset = svd->offset + 2445 (uintptr_t)(adr - seg->s_base); 2446 } 2447 2448 /* 2449 * Use page_find() instead of page_lookup() to 2450 * find the page since we know that it is locked. 2451 */ 2452 pp = page_find(vp, offset); 2453 if (pp == NULL) { 2454 panic( 2455 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx", 2456 (void *)adr, (void *)ap, (void *)vp, offset); 2457 /*NOTREACHED*/ 2458 } 2459 2460 if (rw == S_WRITE) { 2461 hat_setrefmod(pp); 2462 if (seg->s_as->a_vbits) 2463 hat_setstat(seg->s_as, adr, PAGESIZE, 2464 P_REF | P_MOD); 2465 } else if (rw != S_OTHER) { 2466 hat_setref(pp); 2467 if (seg->s_as->a_vbits) 2468 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF); 2469 } 2470 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2471 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset); 2472 if (svd->vp == NULL) { 2473 segvn_sunlock_anonpages(pp, adr == addr); 2474 } 2475 page_unlock(pp); 2476 } 2477 mutex_enter(&freemem_lock); /* for availrmem */ 2478 if (svd->vp != NULL) { 2479 availrmem += btop(len); 2480 } 2481 segvn_pages_locked -= btop(len); 2482 svd->softlockcnt -= btop(len); 2483 mutex_exit(&freemem_lock); 2484 if (svd->softlockcnt == 0) { 2485 /* 2486 * All SOFTLOCKS are gone. Wakeup any waiting 2487 * unmappers so they can try again to unmap. 2488 * Check for waiters first without the mutex 2489 * held so we don't always grab the mutex on 2490 * softunlocks. 2491 */ 2492 if (AS_ISUNMAPWAIT(seg->s_as)) { 2493 mutex_enter(&seg->s_as->a_contents); 2494 if (AS_ISUNMAPWAIT(seg->s_as)) { 2495 AS_CLRUNMAPWAIT(seg->s_as); 2496 cv_broadcast(&seg->s_as->a_cv); 2497 } 2498 mutex_exit(&seg->s_as->a_contents); 2499 } 2500 } 2501 } 2502 2503 #define PAGE_HANDLED ((page_t *)-1) 2504 2505 /* 2506 * Release all the pages in the NULL terminated ppp list 2507 * which haven't already been converted to PAGE_HANDLED. 2508 */ 2509 static void 2510 segvn_pagelist_rele(page_t **ppp) 2511 { 2512 for (; *ppp != NULL; ppp++) { 2513 if (*ppp != PAGE_HANDLED) 2514 page_unlock(*ppp); 2515 } 2516 } 2517 2518 static int stealcow = 1; 2519 2520 /* 2521 * Workaround for viking chip bug. See bug id 1220902. 2522 * To fix this down in pagefault() would require importing so 2523 * much as and segvn code as to be unmaintainable. 2524 */ 2525 int enable_mbit_wa = 0; 2526 2527 /* 2528 * Handles all the dirty work of getting the right 2529 * anonymous pages and loading up the translations. 2530 * This routine is called only from segvn_fault() 2531 * when looping over the range of addresses requested. 2532 * 2533 * The basic algorithm here is: 2534 * If this is an anon_zero case 2535 * Call anon_zero to allocate page 2536 * Load up translation 2537 * Return 2538 * endif 2539 * If this is an anon page 2540 * Use anon_getpage to get the page 2541 * else 2542 * Find page in pl[] list passed in 2543 * endif 2544 * If not a cow 2545 * Load up the translation to the page 2546 * return 2547 * endif 2548 * Call anon_private to handle cow 2549 * Load up (writable) translation to new page 2550 */ 2551 static faultcode_t 2552 segvn_faultpage( 2553 struct hat *hat, /* the hat to use for mapping */ 2554 struct seg *seg, /* seg_vn of interest */ 2555 caddr_t addr, /* address in as */ 2556 u_offset_t off, /* offset in vp */ 2557 struct vpage *vpage, /* pointer to vpage for vp, off */ 2558 page_t *pl[], /* object source page pointer */ 2559 uint_t vpprot, /* access allowed to object pages */ 2560 enum fault_type type, /* type of fault */ 2561 enum seg_rw rw, /* type of access at fault */ 2562 int brkcow, /* we may need to break cow */ 2563 int first) /* first page for this fault if 1 */ 2564 { 2565 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2566 page_t *pp, **ppp; 2567 uint_t pageflags = 0; 2568 page_t *anon_pl[1 + 1]; 2569 page_t *opp = NULL; /* original page */ 2570 uint_t prot; 2571 int err; 2572 int cow; 2573 int claim; 2574 int steal = 0; 2575 ulong_t anon_index; 2576 struct anon *ap, *oldap; 2577 struct anon_map *amp; 2578 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 2579 int anon_lock = 0; 2580 anon_sync_obj_t cookie; 2581 2582 if (svd->flags & MAP_TEXT) { 2583 hat_flag |= HAT_LOAD_TEXT; 2584 } 2585 2586 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 2587 ASSERT(seg->s_szc == 0); 2588 ASSERT(svd->tr_state != SEGVN_TR_INIT); 2589 2590 /* 2591 * Initialize protection value for this page. 2592 * If we have per page protection values check it now. 2593 */ 2594 if (svd->pageprot) { 2595 uint_t protchk; 2596 2597 switch (rw) { 2598 case S_READ: 2599 protchk = PROT_READ; 2600 break; 2601 case S_WRITE: 2602 protchk = PROT_WRITE; 2603 break; 2604 case S_EXEC: 2605 protchk = PROT_EXEC; 2606 break; 2607 case S_OTHER: 2608 default: 2609 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 2610 break; 2611 } 2612 2613 prot = VPP_PROT(vpage); 2614 if ((prot & protchk) == 0) 2615 return (FC_PROT); /* illegal access type */ 2616 } else { 2617 prot = svd->prot; 2618 } 2619 2620 if (type == F_SOFTLOCK && svd->vp != NULL) { 2621 mutex_enter(&freemem_lock); 2622 if (availrmem <= tune.t_minarmem) { 2623 mutex_exit(&freemem_lock); 2624 return (FC_MAKE_ERR(ENOMEM)); /* out of real memory */ 2625 } else { 2626 availrmem--; 2627 svd->softlockcnt++; 2628 segvn_pages_locked++; 2629 } 2630 mutex_exit(&freemem_lock); 2631 } 2632 2633 /* 2634 * Always acquire the anon array lock to prevent 2 threads from 2635 * allocating separate anon slots for the same "addr". 2636 */ 2637 2638 if ((amp = svd->amp) != NULL) { 2639 ASSERT(RW_READ_HELD(&->a_rwlock)); 2640 anon_index = svd->anon_index + seg_page(seg, addr); 2641 anon_array_enter(amp, anon_index, &cookie); 2642 anon_lock = 1; 2643 } 2644 2645 if (svd->vp == NULL && amp != NULL) { 2646 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) { 2647 /* 2648 * Allocate a (normally) writable anonymous page of 2649 * zeroes. If no advance reservations, reserve now. 2650 */ 2651 if (svd->flags & MAP_NORESERVE) { 2652 if (anon_resv_zone(ptob(1), 2653 seg->s_as->a_proc->p_zone)) { 2654 atomic_add_long(&svd->swresv, ptob(1)); 2655 } else { 2656 err = ENOMEM; 2657 goto out; 2658 } 2659 } 2660 if ((pp = anon_zero(seg, addr, &ap, 2661 svd->cred)) == NULL) { 2662 err = ENOMEM; 2663 goto out; /* out of swap space */ 2664 } 2665 /* 2666 * Re-acquire the anon_map lock and 2667 * initialize the anon array entry. 2668 */ 2669 (void) anon_set_ptr(amp->ahp, anon_index, ap, 2670 ANON_SLEEP); 2671 2672 ASSERT(pp->p_szc == 0); 2673 2674 /* 2675 * Handle pages that have been marked for migration 2676 */ 2677 if (lgrp_optimizations()) 2678 page_migrate(seg, addr, &pp, 1); 2679 2680 if (type == F_SOFTLOCK) { 2681 if (!segvn_slock_anonpages(pp, first)) { 2682 page_unlock(pp); 2683 err = ENOMEM; 2684 goto out; 2685 } else { 2686 mutex_enter(&freemem_lock); 2687 svd->softlockcnt++; 2688 segvn_pages_locked++; 2689 mutex_exit(&freemem_lock); 2690 } 2691 } 2692 2693 if (enable_mbit_wa) { 2694 if (rw == S_WRITE) 2695 hat_setmod(pp); 2696 else if (!hat_ismod(pp)) 2697 prot &= ~PROT_WRITE; 2698 } 2699 /* 2700 * If AS_PAGLCK is set in a_flags (via memcntl(2) 2701 * with MC_LOCKAS, MCL_FUTURE) and this is a 2702 * MAP_NORESERVE segment, we may need to 2703 * permanently lock the page as it is being faulted 2704 * for the first time. The following text applies 2705 * only to MAP_NORESERVE segments: 2706 * 2707 * As per memcntl(2), if this segment was created 2708 * after MCL_FUTURE was applied (a "future" 2709 * segment), its pages must be locked. If this 2710 * segment existed at MCL_FUTURE application (a 2711 * "past" segment), the interface is unclear. 2712 * 2713 * We decide to lock only if vpage is present: 2714 * 2715 * - "future" segments will have a vpage array (see 2716 * as_map), and so will be locked as required 2717 * 2718 * - "past" segments may not have a vpage array, 2719 * depending on whether events (such as 2720 * mprotect) have occurred. Locking if vpage 2721 * exists will preserve legacy behavior. Not 2722 * locking if vpage is absent, will not break 2723 * the interface or legacy behavior. Note that 2724 * allocating vpage here if it's absent requires 2725 * upgrading the segvn reader lock, the cost of 2726 * which does not seem worthwhile. 2727 * 2728 * Usually testing and setting VPP_ISPPLOCK and 2729 * VPP_SETPPLOCK requires holding the segvn lock as 2730 * writer, but in this case all readers are 2731 * serializing on the anon array lock. 2732 */ 2733 if (AS_ISPGLCK(seg->s_as) && vpage != NULL && 2734 (svd->flags & MAP_NORESERVE) && 2735 !VPP_ISPPLOCK(vpage)) { 2736 proc_t *p = seg->s_as->a_proc; 2737 ASSERT(svd->type == MAP_PRIVATE); 2738 mutex_enter(&p->p_lock); 2739 if (rctl_incr_locked_mem(p, NULL, PAGESIZE, 2740 1) == 0) { 2741 claim = VPP_PROT(vpage) & PROT_WRITE; 2742 if (page_pp_lock(pp, claim, 0)) { 2743 VPP_SETPPLOCK(vpage); 2744 } else { 2745 rctl_decr_locked_mem(p, NULL, 2746 PAGESIZE, 1); 2747 } 2748 } 2749 mutex_exit(&p->p_lock); 2750 } 2751 2752 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2753 hat_memload(hat, addr, pp, prot, hat_flag); 2754 2755 if (!(hat_flag & HAT_LOAD_LOCK)) 2756 page_unlock(pp); 2757 2758 anon_array_exit(&cookie); 2759 return (0); 2760 } 2761 } 2762 2763 /* 2764 * Obtain the page structure via anon_getpage() if it is 2765 * a private copy of an object (the result of a previous 2766 * copy-on-write). 2767 */ 2768 if (amp != NULL) { 2769 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) { 2770 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE, 2771 seg, addr, rw, svd->cred); 2772 if (err) 2773 goto out; 2774 2775 if (svd->type == MAP_SHARED) { 2776 /* 2777 * If this is a shared mapping to an 2778 * anon_map, then ignore the write 2779 * permissions returned by anon_getpage(). 2780 * They apply to the private mappings 2781 * of this anon_map. 2782 */ 2783 vpprot |= PROT_WRITE; 2784 } 2785 opp = anon_pl[0]; 2786 } 2787 } 2788 2789 /* 2790 * Search the pl[] list passed in if it is from the 2791 * original object (i.e., not a private copy). 2792 */ 2793 if (opp == NULL) { 2794 /* 2795 * Find original page. We must be bringing it in 2796 * from the list in pl[]. 2797 */ 2798 for (ppp = pl; (opp = *ppp) != NULL; ppp++) { 2799 if (opp == PAGE_HANDLED) 2800 continue; 2801 ASSERT(opp->p_vnode == svd->vp); /* XXX */ 2802 if (opp->p_offset == off) 2803 break; 2804 } 2805 if (opp == NULL) { 2806 panic("segvn_faultpage not found"); 2807 /*NOTREACHED*/ 2808 } 2809 *ppp = PAGE_HANDLED; 2810 2811 } 2812 2813 ASSERT(PAGE_LOCKED(opp)); 2814 2815 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2816 "segvn_fault:pp %p vp %p offset %llx", 2817 opp, NULL, 0); 2818 2819 /* 2820 * The fault is treated as a copy-on-write fault if a 2821 * write occurs on a private segment and the object 2822 * page (i.e., mapping) is write protected. We assume 2823 * that fatal protection checks have already been made. 2824 */ 2825 2826 if (brkcow) { 2827 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2828 cow = !(vpprot & PROT_WRITE); 2829 } else if (svd->tr_state == SEGVN_TR_ON) { 2830 /* 2831 * If we are doing text replication COW on first touch. 2832 */ 2833 ASSERT(amp != NULL); 2834 ASSERT(svd->vp != NULL); 2835 ASSERT(rw != S_WRITE); 2836 cow = (ap == NULL); 2837 } else { 2838 cow = 0; 2839 } 2840 2841 /* 2842 * If not a copy-on-write case load the translation 2843 * and return. 2844 */ 2845 if (cow == 0) { 2846 2847 /* 2848 * Handle pages that have been marked for migration 2849 */ 2850 if (lgrp_optimizations()) 2851 page_migrate(seg, addr, &opp, 1); 2852 2853 if (type == F_SOFTLOCK && svd->vp == NULL) { 2854 2855 ASSERT(opp->p_szc == 0 || 2856 (svd->type == MAP_SHARED && 2857 amp != NULL && amp->a_szc != 0)); 2858 2859 if (!segvn_slock_anonpages(opp, first)) { 2860 page_unlock(opp); 2861 err = ENOMEM; 2862 goto out; 2863 } else { 2864 mutex_enter(&freemem_lock); 2865 svd->softlockcnt++; 2866 segvn_pages_locked++; 2867 mutex_exit(&freemem_lock); 2868 } 2869 } 2870 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) { 2871 if (rw == S_WRITE) 2872 hat_setmod(opp); 2873 else if (rw != S_OTHER && !hat_ismod(opp)) 2874 prot &= ~PROT_WRITE; 2875 } 2876 2877 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 2878 (!svd->pageprot && svd->prot == (prot & vpprot))); 2879 ASSERT(amp == NULL || 2880 svd->rcookie == HAT_INVALID_REGION_COOKIE); 2881 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag, 2882 svd->rcookie); 2883 2884 if (!(hat_flag & HAT_LOAD_LOCK)) 2885 page_unlock(opp); 2886 2887 if (anon_lock) { 2888 anon_array_exit(&cookie); 2889 } 2890 return (0); 2891 } 2892 2893 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2894 2895 hat_setref(opp); 2896 2897 ASSERT(amp != NULL && anon_lock); 2898 2899 /* 2900 * Steal the page only if it isn't a private page 2901 * since stealing a private page is not worth the effort. 2902 */ 2903 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) 2904 steal = 1; 2905 2906 /* 2907 * Steal the original page if the following conditions are true: 2908 * 2909 * We are low on memory, the page is not private, page is not large, 2910 * not shared, not modified, not `locked' or if we have it `locked' 2911 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies 2912 * that the page is not shared) and if it doesn't have any 2913 * translations. page_struct_lock isn't needed to look at p_cowcnt 2914 * and p_lckcnt because we first get exclusive lock on page. 2915 */ 2916 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD); 2917 2918 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 && 2919 page_tryupgrade(opp) && !hat_ismod(opp) && 2920 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) || 2921 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 && 2922 vpage != NULL && VPP_ISPPLOCK(vpage)))) { 2923 /* 2924 * Check if this page has other translations 2925 * after unloading our translation. 2926 */ 2927 if (hat_page_is_mapped(opp)) { 2928 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2929 hat_unload(seg->s_as->a_hat, addr, PAGESIZE, 2930 HAT_UNLOAD); 2931 } 2932 2933 /* 2934 * hat_unload() might sync back someone else's recent 2935 * modification, so check again. 2936 */ 2937 if (!hat_ismod(opp) && !hat_page_is_mapped(opp)) 2938 pageflags |= STEAL_PAGE; 2939 } 2940 2941 /* 2942 * If we have a vpage pointer, see if it indicates that we have 2943 * ``locked'' the page we map -- if so, tell anon_private to 2944 * transfer the locking resource to the new page. 2945 * 2946 * See Statement at the beginning of segvn_lockop regarding 2947 * the way lockcnts/cowcnts are handled during COW. 2948 * 2949 */ 2950 if (vpage != NULL && VPP_ISPPLOCK(vpage)) 2951 pageflags |= LOCK_PAGE; 2952 2953 /* 2954 * Allocate a private page and perform the copy. 2955 * For MAP_NORESERVE reserve swap space now, unless this 2956 * is a cow fault on an existing anon page in which case 2957 * MAP_NORESERVE will have made advance reservations. 2958 */ 2959 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) { 2960 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) { 2961 atomic_add_long(&svd->swresv, ptob(1)); 2962 } else { 2963 page_unlock(opp); 2964 err = ENOMEM; 2965 goto out; 2966 } 2967 } 2968 oldap = ap; 2969 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred); 2970 if (pp == NULL) { 2971 err = ENOMEM; /* out of swap space */ 2972 goto out; 2973 } 2974 2975 /* 2976 * If we copied away from an anonymous page, then 2977 * we are one step closer to freeing up an anon slot. 2978 * 2979 * NOTE: The original anon slot must be released while 2980 * holding the "anon_map" lock. This is necessary to prevent 2981 * other threads from obtaining a pointer to the anon slot 2982 * which may be freed if its "refcnt" is 1. 2983 */ 2984 if (oldap != NULL) 2985 anon_decref(oldap); 2986 2987 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 2988 2989 /* 2990 * Handle pages that have been marked for migration 2991 */ 2992 if (lgrp_optimizations()) 2993 page_migrate(seg, addr, &pp, 1); 2994 2995 ASSERT(pp->p_szc == 0); 2996 if (type == F_SOFTLOCK && svd->vp == NULL) { 2997 if (!segvn_slock_anonpages(pp, first)) { 2998 page_unlock(pp); 2999 err = ENOMEM; 3000 goto out; 3001 } else { 3002 mutex_enter(&freemem_lock); 3003 svd->softlockcnt++; 3004 segvn_pages_locked++; 3005 mutex_exit(&freemem_lock); 3006 } 3007 } 3008 3009 ASSERT(!IS_VMODSORT(pp->p_vnode)); 3010 if (enable_mbit_wa) { 3011 if (rw == S_WRITE) 3012 hat_setmod(pp); 3013 else if (!hat_ismod(pp)) 3014 prot &= ~PROT_WRITE; 3015 } 3016 3017 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 3018 hat_memload(hat, addr, pp, prot, hat_flag); 3019 3020 if (!(hat_flag & HAT_LOAD_LOCK)) 3021 page_unlock(pp); 3022 3023 ASSERT(anon_lock); 3024 anon_array_exit(&cookie); 3025 return (0); 3026 out: 3027 if (anon_lock) 3028 anon_array_exit(&cookie); 3029 3030 if (type == F_SOFTLOCK && svd->vp != NULL) { 3031 mutex_enter(&freemem_lock); 3032 availrmem++; 3033 segvn_pages_locked--; 3034 svd->softlockcnt--; 3035 mutex_exit(&freemem_lock); 3036 } 3037 return (FC_MAKE_ERR(err)); 3038 } 3039 3040 /* 3041 * relocate a bunch of smaller targ pages into one large repl page. all targ 3042 * pages must be complete pages smaller than replacement pages. 3043 * it's assumed that no page's szc can change since they are all PAGESIZE or 3044 * complete large pages locked SHARED. 3045 */ 3046 static void 3047 segvn_relocate_pages(page_t **targ, page_t *replacement) 3048 { 3049 page_t *pp; 3050 pgcnt_t repl_npgs, curnpgs; 3051 pgcnt_t i; 3052 uint_t repl_szc = replacement->p_szc; 3053 page_t *first_repl = replacement; 3054 page_t *repl; 3055 spgcnt_t npgs; 3056 3057 VM_STAT_ADD(segvnvmstats.relocatepages[0]); 3058 3059 ASSERT(repl_szc != 0); 3060 npgs = repl_npgs = page_get_pagecnt(repl_szc); 3061 3062 i = 0; 3063 while (repl_npgs) { 3064 spgcnt_t nreloc; 3065 int err; 3066 ASSERT(replacement != NULL); 3067 pp = targ[i]; 3068 ASSERT(pp->p_szc < repl_szc); 3069 ASSERT(PAGE_EXCL(pp)); 3070 ASSERT(!PP_ISFREE(pp)); 3071 curnpgs = page_get_pagecnt(pp->p_szc); 3072 if (curnpgs == 1) { 3073 VM_STAT_ADD(segvnvmstats.relocatepages[1]); 3074 repl = replacement; 3075 page_sub(&replacement, repl); 3076 ASSERT(PAGE_EXCL(repl)); 3077 ASSERT(!PP_ISFREE(repl)); 3078 ASSERT(repl->p_szc == repl_szc); 3079 } else { 3080 page_t *repl_savepp; 3081 int j; 3082 VM_STAT_ADD(segvnvmstats.relocatepages[2]); 3083 repl_savepp = replacement; 3084 for (j = 0; j < curnpgs; j++) { 3085 repl = replacement; 3086 page_sub(&replacement, repl); 3087 ASSERT(PAGE_EXCL(repl)); 3088 ASSERT(!PP_ISFREE(repl)); 3089 ASSERT(repl->p_szc == repl_szc); 3090 ASSERT(page_pptonum(targ[i + j]) == 3091 page_pptonum(targ[i]) + j); 3092 } 3093 repl = repl_savepp; 3094 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs)); 3095 } 3096 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL); 3097 if (err || nreloc != curnpgs) { 3098 panic("segvn_relocate_pages: " 3099 "page_relocate failed err=%d curnpgs=%ld " 3100 "nreloc=%ld", err, curnpgs, nreloc); 3101 } 3102 ASSERT(curnpgs <= repl_npgs); 3103 repl_npgs -= curnpgs; 3104 i += curnpgs; 3105 } 3106 ASSERT(replacement == NULL); 3107 3108 repl = first_repl; 3109 repl_npgs = npgs; 3110 for (i = 0; i < repl_npgs; i++) { 3111 ASSERT(PAGE_EXCL(repl)); 3112 ASSERT(!PP_ISFREE(repl)); 3113 targ[i] = repl; 3114 page_downgrade(targ[i]); 3115 repl++; 3116 } 3117 } 3118 3119 /* 3120 * Check if all pages in ppa array are complete smaller than szc pages and 3121 * their roots will still be aligned relative to their current size if the 3122 * entire ppa array is relocated into one szc page. If these conditions are 3123 * not met return 0. 3124 * 3125 * If all pages are properly aligned attempt to upgrade their locks 3126 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0. 3127 * upgrdfail was set to 0 by caller. 3128 * 3129 * Return 1 if all pages are aligned and locked exclusively. 3130 * 3131 * If all pages in ppa array happen to be physically contiguous to make one 3132 * szc page and all exclusive locks are successfully obtained promote the page 3133 * size to szc and set *pszc to szc. Return 1 with pages locked shared. 3134 */ 3135 static int 3136 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc) 3137 { 3138 page_t *pp; 3139 pfn_t pfn; 3140 pgcnt_t totnpgs = page_get_pagecnt(szc); 3141 pfn_t first_pfn; 3142 int contig = 1; 3143 pgcnt_t i; 3144 pgcnt_t j; 3145 uint_t curszc; 3146 pgcnt_t curnpgs; 3147 int root = 0; 3148 3149 ASSERT(szc > 0); 3150 3151 VM_STAT_ADD(segvnvmstats.fullszcpages[0]); 3152 3153 for (i = 0; i < totnpgs; i++) { 3154 pp = ppa[i]; 3155 ASSERT(PAGE_SHARED(pp)); 3156 ASSERT(!PP_ISFREE(pp)); 3157 pfn = page_pptonum(pp); 3158 if (i == 0) { 3159 if (!IS_P2ALIGNED(pfn, totnpgs)) { 3160 contig = 0; 3161 } else { 3162 first_pfn = pfn; 3163 } 3164 } else if (contig && pfn != first_pfn + i) { 3165 contig = 0; 3166 } 3167 if (pp->p_szc == 0) { 3168 if (root) { 3169 VM_STAT_ADD(segvnvmstats.fullszcpages[1]); 3170 return (0); 3171 } 3172 } else if (!root) { 3173 if ((curszc = pp->p_szc) >= szc) { 3174 VM_STAT_ADD(segvnvmstats.fullszcpages[2]); 3175 return (0); 3176 } 3177 if (curszc == 0) { 3178 /* 3179 * p_szc changed means we don't have all pages 3180 * locked. return failure. 3181 */ 3182 VM_STAT_ADD(segvnvmstats.fullszcpages[3]); 3183 return (0); 3184 } 3185 curnpgs = page_get_pagecnt(curszc); 3186 if (!IS_P2ALIGNED(pfn, curnpgs) || 3187 !IS_P2ALIGNED(i, curnpgs)) { 3188 VM_STAT_ADD(segvnvmstats.fullszcpages[4]); 3189 return (0); 3190 } 3191 root = 1; 3192 } else { 3193 ASSERT(i > 0); 3194 VM_STAT_ADD(segvnvmstats.fullszcpages[5]); 3195 if (pp->p_szc != curszc) { 3196 VM_STAT_ADD(segvnvmstats.fullszcpages[6]); 3197 return (0); 3198 } 3199 if (pfn - 1 != page_pptonum(ppa[i - 1])) { 3200 panic("segvn_full_szcpages: " 3201 "large page not physically contiguous"); 3202 } 3203 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) { 3204 root = 0; 3205 } 3206 } 3207 } 3208 3209 for (i = 0; i < totnpgs; i++) { 3210 ASSERT(ppa[i]->p_szc < szc); 3211 if (!page_tryupgrade(ppa[i])) { 3212 for (j = 0; j < i; j++) { 3213 page_downgrade(ppa[j]); 3214 } 3215 *pszc = ppa[i]->p_szc; 3216 *upgrdfail = 1; 3217 VM_STAT_ADD(segvnvmstats.fullszcpages[7]); 3218 return (0); 3219 } 3220 } 3221 3222 /* 3223 * When a page is put a free cachelist its szc is set to 0. if file 3224 * system reclaimed pages from cachelist targ pages will be physically 3225 * contiguous with 0 p_szc. in this case just upgrade szc of targ 3226 * pages without any relocations. 3227 * To avoid any hat issues with previous small mappings 3228 * hat_pageunload() the target pages first. 3229 */ 3230 if (contig) { 3231 VM_STAT_ADD(segvnvmstats.fullszcpages[8]); 3232 for (i = 0; i < totnpgs; i++) { 3233 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD); 3234 } 3235 for (i = 0; i < totnpgs; i++) { 3236 ppa[i]->p_szc = szc; 3237 } 3238 for (i = 0; i < totnpgs; i++) { 3239 ASSERT(PAGE_EXCL(ppa[i])); 3240 page_downgrade(ppa[i]); 3241 } 3242 if (pszc != NULL) { 3243 *pszc = szc; 3244 } 3245 } 3246 VM_STAT_ADD(segvnvmstats.fullszcpages[9]); 3247 return (1); 3248 } 3249 3250 /* 3251 * Create physically contiguous pages for [vp, off] - [vp, off + 3252 * page_size(szc)) range and for private segment return them in ppa array. 3253 * Pages are created either via IO or relocations. 3254 * 3255 * Return 1 on sucess and 0 on failure. 3256 * 3257 * If physically contiguos pages already exist for this range return 1 without 3258 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa 3259 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE(). 3260 */ 3261 3262 static int 3263 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off, 3264 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc, 3265 int *downsize) 3266 3267 { 3268 page_t *pplist = *ppplist; 3269 size_t pgsz = page_get_pagesize(szc); 3270 pgcnt_t pages = btop(pgsz); 3271 ulong_t start_off = off; 3272 u_offset_t eoff = off + pgsz; 3273 spgcnt_t nreloc; 3274 u_offset_t io_off = off; 3275 size_t io_len; 3276 page_t *io_pplist = NULL; 3277 page_t *done_pplist = NULL; 3278 pgcnt_t pgidx = 0; 3279 page_t *pp; 3280 page_t *newpp; 3281 page_t *targpp; 3282 int io_err = 0; 3283 int i; 3284 pfn_t pfn; 3285 ulong_t ppages; 3286 page_t *targ_pplist = NULL; 3287 page_t *repl_pplist = NULL; 3288 page_t *tmp_pplist; 3289 int nios = 0; 3290 uint_t pszc; 3291 struct vattr va; 3292 3293 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]); 3294 3295 ASSERT(szc != 0); 3296 ASSERT(pplist->p_szc == szc); 3297 3298 /* 3299 * downsize will be set to 1 only if we fail to lock pages. this will 3300 * allow subsequent faults to try to relocate the page again. If we 3301 * fail due to misalignment don't downsize and let the caller map the 3302 * whole region with small mappings to avoid more faults into the area 3303 * where we can't get large pages anyway. 3304 */ 3305 *downsize = 0; 3306 3307 while (off < eoff) { 3308 newpp = pplist; 3309 ASSERT(newpp != NULL); 3310 ASSERT(PAGE_EXCL(newpp)); 3311 ASSERT(!PP_ISFREE(newpp)); 3312 /* 3313 * we pass NULL for nrelocp to page_lookup_create() 3314 * so that it doesn't relocate. We relocate here 3315 * later only after we make sure we can lock all 3316 * pages in the range we handle and they are all 3317 * aligned. 3318 */ 3319 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0); 3320 ASSERT(pp != NULL); 3321 ASSERT(!PP_ISFREE(pp)); 3322 ASSERT(pp->p_vnode == vp); 3323 ASSERT(pp->p_offset == off); 3324 if (pp == newpp) { 3325 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]); 3326 page_sub(&pplist, pp); 3327 ASSERT(PAGE_EXCL(pp)); 3328 ASSERT(page_iolock_assert(pp)); 3329 page_list_concat(&io_pplist, &pp); 3330 off += PAGESIZE; 3331 continue; 3332 } 3333 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]); 3334 pfn = page_pptonum(pp); 3335 pszc = pp->p_szc; 3336 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL && 3337 IS_P2ALIGNED(pfn, pages)) { 3338 ASSERT(repl_pplist == NULL); 3339 ASSERT(done_pplist == NULL); 3340 ASSERT(pplist == *ppplist); 3341 page_unlock(pp); 3342 page_free_replacement_page(pplist); 3343 page_create_putback(pages); 3344 *ppplist = NULL; 3345 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]); 3346 return (1); 3347 } 3348 if (pszc >= szc) { 3349 page_unlock(pp); 3350 segvn_faultvnmpss_align_err1++; 3351 goto out; 3352 } 3353 ppages = page_get_pagecnt(pszc); 3354 if (!IS_P2ALIGNED(pfn, ppages)) { 3355 ASSERT(pszc > 0); 3356 /* 3357 * sizing down to pszc won't help. 3358 */ 3359 page_unlock(pp); 3360 segvn_faultvnmpss_align_err2++; 3361 goto out; 3362 } 3363 pfn = page_pptonum(newpp); 3364 if (!IS_P2ALIGNED(pfn, ppages)) { 3365 ASSERT(pszc > 0); 3366 /* 3367 * sizing down to pszc won't help. 3368 */ 3369 page_unlock(pp); 3370 segvn_faultvnmpss_align_err3++; 3371 goto out; 3372 } 3373 if (!PAGE_EXCL(pp)) { 3374 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]); 3375 page_unlock(pp); 3376 *downsize = 1; 3377 *ret_pszc = pp->p_szc; 3378 goto out; 3379 } 3380 targpp = pp; 3381 if (io_pplist != NULL) { 3382 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]); 3383 io_len = off - io_off; 3384 /* 3385 * Some file systems like NFS don't check EOF 3386 * conditions in VOP_PAGEIO(). Check it here 3387 * now that pages are locked SE_EXCL. Any file 3388 * truncation will wait until the pages are 3389 * unlocked so no need to worry that file will 3390 * be truncated after we check its size here. 3391 * XXX fix NFS to remove this check. 3392 */ 3393 va.va_mask = AT_SIZE; 3394 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred) != 0) { 3395 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]); 3396 page_unlock(targpp); 3397 goto out; 3398 } 3399 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3400 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]); 3401 *downsize = 1; 3402 *ret_pszc = 0; 3403 page_unlock(targpp); 3404 goto out; 3405 } 3406 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3407 B_READ, svd->cred); 3408 if (io_err) { 3409 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]); 3410 page_unlock(targpp); 3411 if (io_err == EDEADLK) { 3412 segvn_vmpss_pageio_deadlk_err++; 3413 } 3414 goto out; 3415 } 3416 nios++; 3417 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]); 3418 while (io_pplist != NULL) { 3419 pp = io_pplist; 3420 page_sub(&io_pplist, pp); 3421 ASSERT(page_iolock_assert(pp)); 3422 page_io_unlock(pp); 3423 pgidx = (pp->p_offset - start_off) >> 3424 PAGESHIFT; 3425 ASSERT(pgidx < pages); 3426 ppa[pgidx] = pp; 3427 page_list_concat(&done_pplist, &pp); 3428 } 3429 } 3430 pp = targpp; 3431 ASSERT(PAGE_EXCL(pp)); 3432 ASSERT(pp->p_szc <= pszc); 3433 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) { 3434 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]); 3435 page_unlock(pp); 3436 *downsize = 1; 3437 *ret_pszc = pp->p_szc; 3438 goto out; 3439 } 3440 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]); 3441 /* 3442 * page szc chould have changed before the entire group was 3443 * locked. reread page szc. 3444 */ 3445 pszc = pp->p_szc; 3446 ppages = page_get_pagecnt(pszc); 3447 3448 /* link just the roots */ 3449 page_list_concat(&targ_pplist, &pp); 3450 page_sub(&pplist, newpp); 3451 page_list_concat(&repl_pplist, &newpp); 3452 off += PAGESIZE; 3453 while (--ppages != 0) { 3454 newpp = pplist; 3455 page_sub(&pplist, newpp); 3456 off += PAGESIZE; 3457 } 3458 io_off = off; 3459 } 3460 if (io_pplist != NULL) { 3461 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]); 3462 io_len = eoff - io_off; 3463 va.va_mask = AT_SIZE; 3464 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred) != 0) { 3465 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]); 3466 goto out; 3467 } 3468 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3469 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]); 3470 *downsize = 1; 3471 *ret_pszc = 0; 3472 goto out; 3473 } 3474 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3475 B_READ, svd->cred); 3476 if (io_err) { 3477 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]); 3478 if (io_err == EDEADLK) { 3479 segvn_vmpss_pageio_deadlk_err++; 3480 } 3481 goto out; 3482 } 3483 nios++; 3484 while (io_pplist != NULL) { 3485 pp = io_pplist; 3486 page_sub(&io_pplist, pp); 3487 ASSERT(page_iolock_assert(pp)); 3488 page_io_unlock(pp); 3489 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3490 ASSERT(pgidx < pages); 3491 ppa[pgidx] = pp; 3492 } 3493 } 3494 /* 3495 * we're now bound to succeed or panic. 3496 * remove pages from done_pplist. it's not needed anymore. 3497 */ 3498 while (done_pplist != NULL) { 3499 pp = done_pplist; 3500 page_sub(&done_pplist, pp); 3501 } 3502 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]); 3503 ASSERT(pplist == NULL); 3504 *ppplist = NULL; 3505 while (targ_pplist != NULL) { 3506 int ret; 3507 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]); 3508 ASSERT(repl_pplist); 3509 pp = targ_pplist; 3510 page_sub(&targ_pplist, pp); 3511 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3512 newpp = repl_pplist; 3513 page_sub(&repl_pplist, newpp); 3514 #ifdef DEBUG 3515 pfn = page_pptonum(pp); 3516 pszc = pp->p_szc; 3517 ppages = page_get_pagecnt(pszc); 3518 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3519 pfn = page_pptonum(newpp); 3520 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3521 ASSERT(P2PHASE(pfn, pages) == pgidx); 3522 #endif 3523 nreloc = 0; 3524 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL); 3525 if (ret != 0 || nreloc == 0) { 3526 panic("segvn_fill_vp_pages: " 3527 "page_relocate failed"); 3528 } 3529 pp = newpp; 3530 while (nreloc-- != 0) { 3531 ASSERT(PAGE_EXCL(pp)); 3532 ASSERT(pp->p_vnode == vp); 3533 ASSERT(pgidx == 3534 ((pp->p_offset - start_off) >> PAGESHIFT)); 3535 ppa[pgidx++] = pp; 3536 pp++; 3537 } 3538 } 3539 3540 if (svd->type == MAP_PRIVATE) { 3541 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]); 3542 for (i = 0; i < pages; i++) { 3543 ASSERT(ppa[i] != NULL); 3544 ASSERT(PAGE_EXCL(ppa[i])); 3545 ASSERT(ppa[i]->p_vnode == vp); 3546 ASSERT(ppa[i]->p_offset == 3547 start_off + (i << PAGESHIFT)); 3548 page_downgrade(ppa[i]); 3549 } 3550 ppa[pages] = NULL; 3551 } else { 3552 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]); 3553 /* 3554 * the caller will still call VOP_GETPAGE() for shared segments 3555 * to check FS write permissions. For private segments we map 3556 * file read only anyway. so no VOP_GETPAGE is needed. 3557 */ 3558 for (i = 0; i < pages; i++) { 3559 ASSERT(ppa[i] != NULL); 3560 ASSERT(PAGE_EXCL(ppa[i])); 3561 ASSERT(ppa[i]->p_vnode == vp); 3562 ASSERT(ppa[i]->p_offset == 3563 start_off + (i << PAGESHIFT)); 3564 page_unlock(ppa[i]); 3565 } 3566 ppa[0] = NULL; 3567 } 3568 3569 return (1); 3570 out: 3571 /* 3572 * Do the cleanup. Unlock target pages we didn't relocate. They are 3573 * linked on targ_pplist by root pages. reassemble unused replacement 3574 * and io pages back to pplist. 3575 */ 3576 if (io_pplist != NULL) { 3577 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]); 3578 pp = io_pplist; 3579 do { 3580 ASSERT(pp->p_vnode == vp); 3581 ASSERT(pp->p_offset == io_off); 3582 ASSERT(page_iolock_assert(pp)); 3583 page_io_unlock(pp); 3584 page_hashout(pp, NULL); 3585 io_off += PAGESIZE; 3586 } while ((pp = pp->p_next) != io_pplist); 3587 page_list_concat(&io_pplist, &pplist); 3588 pplist = io_pplist; 3589 } 3590 tmp_pplist = NULL; 3591 while (targ_pplist != NULL) { 3592 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]); 3593 pp = targ_pplist; 3594 ASSERT(PAGE_EXCL(pp)); 3595 page_sub(&targ_pplist, pp); 3596 3597 pszc = pp->p_szc; 3598 ppages = page_get_pagecnt(pszc); 3599 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3600 3601 if (pszc != 0) { 3602 group_page_unlock(pp); 3603 } 3604 page_unlock(pp); 3605 3606 pp = repl_pplist; 3607 ASSERT(pp != NULL); 3608 ASSERT(PAGE_EXCL(pp)); 3609 ASSERT(pp->p_szc == szc); 3610 page_sub(&repl_pplist, pp); 3611 3612 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3613 3614 /* relink replacement page */ 3615 page_list_concat(&tmp_pplist, &pp); 3616 while (--ppages != 0) { 3617 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]); 3618 pp++; 3619 ASSERT(PAGE_EXCL(pp)); 3620 ASSERT(pp->p_szc == szc); 3621 page_list_concat(&tmp_pplist, &pp); 3622 } 3623 } 3624 if (tmp_pplist != NULL) { 3625 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]); 3626 page_list_concat(&tmp_pplist, &pplist); 3627 pplist = tmp_pplist; 3628 } 3629 /* 3630 * at this point all pages are either on done_pplist or 3631 * pplist. They can't be all on done_pplist otherwise 3632 * we'd've been done. 3633 */ 3634 ASSERT(pplist != NULL); 3635 if (nios != 0) { 3636 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]); 3637 pp = pplist; 3638 do { 3639 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]); 3640 ASSERT(pp->p_szc == szc); 3641 ASSERT(PAGE_EXCL(pp)); 3642 ASSERT(pp->p_vnode != vp); 3643 pp->p_szc = 0; 3644 } while ((pp = pp->p_next) != pplist); 3645 3646 pp = done_pplist; 3647 do { 3648 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]); 3649 ASSERT(pp->p_szc == szc); 3650 ASSERT(PAGE_EXCL(pp)); 3651 ASSERT(pp->p_vnode == vp); 3652 pp->p_szc = 0; 3653 } while ((pp = pp->p_next) != done_pplist); 3654 3655 while (pplist != NULL) { 3656 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]); 3657 pp = pplist; 3658 page_sub(&pplist, pp); 3659 page_free(pp, 0); 3660 } 3661 3662 while (done_pplist != NULL) { 3663 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]); 3664 pp = done_pplist; 3665 page_sub(&done_pplist, pp); 3666 page_unlock(pp); 3667 } 3668 *ppplist = NULL; 3669 return (0); 3670 } 3671 ASSERT(pplist == *ppplist); 3672 if (io_err) { 3673 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]); 3674 /* 3675 * don't downsize on io error. 3676 * see if vop_getpage succeeds. 3677 * pplist may still be used in this case 3678 * for relocations. 3679 */ 3680 return (0); 3681 } 3682 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]); 3683 page_free_replacement_page(pplist); 3684 page_create_putback(pages); 3685 *ppplist = NULL; 3686 return (0); 3687 } 3688 3689 int segvn_anypgsz = 0; 3690 3691 #define SEGVN_RESTORE_SOFTLOCK(type, pages) \ 3692 if ((type) == F_SOFTLOCK) { \ 3693 mutex_enter(&freemem_lock); \ 3694 availrmem += (pages); \ 3695 segvn_pages_locked -= (pages); \ 3696 svd->softlockcnt -= (pages); \ 3697 mutex_exit(&freemem_lock); \ 3698 } 3699 3700 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \ 3701 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \ 3702 if ((rw) == S_WRITE) { \ 3703 for (i = 0; i < (pages); i++) { \ 3704 ASSERT((ppa)[i]->p_vnode == \ 3705 (ppa)[0]->p_vnode); \ 3706 hat_setmod((ppa)[i]); \ 3707 } \ 3708 } else if ((rw) != S_OTHER && \ 3709 ((prot) & (vpprot) & PROT_WRITE)) { \ 3710 for (i = 0; i < (pages); i++) { \ 3711 ASSERT((ppa)[i]->p_vnode == \ 3712 (ppa)[0]->p_vnode); \ 3713 if (!hat_ismod((ppa)[i])) { \ 3714 prot &= ~PROT_WRITE; \ 3715 break; \ 3716 } \ 3717 } \ 3718 } \ 3719 } 3720 3721 #ifdef VM_STATS 3722 3723 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \ 3724 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]); 3725 3726 #else /* VM_STATS */ 3727 3728 #define SEGVN_VMSTAT_FLTVNPAGES(idx) 3729 3730 #endif 3731 3732 static faultcode_t 3733 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 3734 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 3735 caddr_t eaddr, int brkcow) 3736 { 3737 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 3738 struct anon_map *amp = svd->amp; 3739 uchar_t segtype = svd->type; 3740 uint_t szc = seg->s_szc; 3741 size_t pgsz = page_get_pagesize(szc); 3742 size_t maxpgsz = pgsz; 3743 pgcnt_t pages = btop(pgsz); 3744 pgcnt_t maxpages = pages; 3745 size_t ppasize = (pages + 1) * sizeof (page_t *); 3746 caddr_t a = lpgaddr; 3747 caddr_t maxlpgeaddr = lpgeaddr; 3748 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base); 3749 ulong_t aindx = svd->anon_index + seg_page(seg, a); 3750 struct vpage *vpage = (svd->vpage != NULL) ? 3751 &svd->vpage[seg_page(seg, a)] : NULL; 3752 vnode_t *vp = svd->vp; 3753 page_t **ppa; 3754 uint_t pszc; 3755 size_t ppgsz; 3756 pgcnt_t ppages; 3757 faultcode_t err = 0; 3758 int ierr; 3759 int vop_size_err = 0; 3760 uint_t protchk, prot, vpprot; 3761 ulong_t i; 3762 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 3763 anon_sync_obj_t an_cookie; 3764 enum seg_rw arw; 3765 int alloc_failed = 0; 3766 int adjszc_chk; 3767 struct vattr va; 3768 int xhat = 0; 3769 page_t *pplist; 3770 pfn_t pfn; 3771 int physcontig; 3772 int upgrdfail; 3773 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */ 3774 int tron = (svd->tr_state == SEGVN_TR_ON); 3775 3776 ASSERT(szc != 0); 3777 ASSERT(vp != NULL); 3778 ASSERT(brkcow == 0 || amp != NULL); 3779 ASSERT(tron == 0 || amp != NULL); 3780 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 3781 ASSERT(!(svd->flags & MAP_NORESERVE)); 3782 ASSERT(type != F_SOFTUNLOCK); 3783 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 3784 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages)); 3785 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 3786 ASSERT(seg->s_szc < NBBY * sizeof (int)); 3787 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz); 3788 ASSERT(svd->tr_state != SEGVN_TR_INIT); 3789 3790 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]); 3791 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]); 3792 3793 if (svd->flags & MAP_TEXT) { 3794 hat_flag |= HAT_LOAD_TEXT; 3795 } 3796 3797 if (svd->pageprot) { 3798 switch (rw) { 3799 case S_READ: 3800 protchk = PROT_READ; 3801 break; 3802 case S_WRITE: 3803 protchk = PROT_WRITE; 3804 break; 3805 case S_EXEC: 3806 protchk = PROT_EXEC; 3807 break; 3808 case S_OTHER: 3809 default: 3810 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 3811 break; 3812 } 3813 } else { 3814 prot = svd->prot; 3815 /* caller has already done segment level protection check. */ 3816 } 3817 3818 if (seg->s_as->a_hat != hat) { 3819 xhat = 1; 3820 } 3821 3822 if (rw == S_WRITE && segtype == MAP_PRIVATE) { 3823 SEGVN_VMSTAT_FLTVNPAGES(2); 3824 arw = S_READ; 3825 } else { 3826 arw = rw; 3827 } 3828 3829 ppa = kmem_alloc(ppasize, KM_SLEEP); 3830 3831 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]); 3832 3833 for (;;) { 3834 adjszc_chk = 0; 3835 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) { 3836 if (adjszc_chk) { 3837 while (szc < seg->s_szc) { 3838 uintptr_t e; 3839 uint_t tszc; 3840 tszc = segvn_anypgsz_vnode ? szc + 1 : 3841 seg->s_szc; 3842 ppgsz = page_get_pagesize(tszc); 3843 if (!IS_P2ALIGNED(a, ppgsz) || 3844 ((alloc_failed >> tszc) & 3845 0x1)) { 3846 break; 3847 } 3848 SEGVN_VMSTAT_FLTVNPAGES(4); 3849 szc = tszc; 3850 pgsz = ppgsz; 3851 pages = btop(pgsz); 3852 e = P2ROUNDUP((uintptr_t)eaddr, pgsz); 3853 lpgeaddr = (caddr_t)e; 3854 } 3855 } 3856 3857 again: 3858 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) { 3859 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 3860 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 3861 anon_array_enter(amp, aindx, &an_cookie); 3862 if (anon_get_ptr(amp->ahp, aindx) != NULL) { 3863 SEGVN_VMSTAT_FLTVNPAGES(5); 3864 ASSERT(anon_pages(amp->ahp, aindx, 3865 maxpages) == maxpages); 3866 anon_array_exit(&an_cookie); 3867 ANON_LOCK_EXIT(&->a_rwlock); 3868 err = segvn_fault_anonpages(hat, seg, 3869 a, a + maxpgsz, type, rw, 3870 MAX(a, addr), 3871 MIN(a + maxpgsz, eaddr), brkcow); 3872 if (err != 0) { 3873 SEGVN_VMSTAT_FLTVNPAGES(6); 3874 goto out; 3875 } 3876 if (szc < seg->s_szc) { 3877 szc = seg->s_szc; 3878 pgsz = maxpgsz; 3879 pages = maxpages; 3880 lpgeaddr = maxlpgeaddr; 3881 } 3882 goto next; 3883 } else { 3884 ASSERT(anon_pages(amp->ahp, aindx, 3885 maxpages) == 0); 3886 SEGVN_VMSTAT_FLTVNPAGES(7); 3887 anon_array_exit(&an_cookie); 3888 ANON_LOCK_EXIT(&->a_rwlock); 3889 } 3890 } 3891 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz)); 3892 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz)); 3893 3894 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 3895 ASSERT(vpage != NULL); 3896 prot = VPP_PROT(vpage); 3897 ASSERT(sameprot(seg, a, maxpgsz)); 3898 if ((prot & protchk) == 0) { 3899 SEGVN_VMSTAT_FLTVNPAGES(8); 3900 err = FC_PROT; 3901 goto out; 3902 } 3903 } 3904 if (type == F_SOFTLOCK) { 3905 mutex_enter(&freemem_lock); 3906 if (availrmem < tune.t_minarmem + pages) { 3907 mutex_exit(&freemem_lock); 3908 err = FC_MAKE_ERR(ENOMEM); 3909 goto out; 3910 } else { 3911 availrmem -= pages; 3912 segvn_pages_locked += pages; 3913 svd->softlockcnt += pages; 3914 } 3915 mutex_exit(&freemem_lock); 3916 } 3917 3918 pplist = NULL; 3919 physcontig = 0; 3920 ppa[0] = NULL; 3921 if (!brkcow && !tron && szc && 3922 !page_exists_physcontig(vp, off, szc, 3923 segtype == MAP_PRIVATE ? ppa : NULL)) { 3924 SEGVN_VMSTAT_FLTVNPAGES(9); 3925 if (page_alloc_pages(vp, seg, a, &pplist, NULL, 3926 szc, 0, 0) && type != F_SOFTLOCK) { 3927 SEGVN_VMSTAT_FLTVNPAGES(10); 3928 pszc = 0; 3929 ierr = -1; 3930 alloc_failed |= (1 << szc); 3931 break; 3932 } 3933 if (pplist != NULL && 3934 vp->v_mpssdata == SEGVN_PAGEIO) { 3935 int downsize; 3936 SEGVN_VMSTAT_FLTVNPAGES(11); 3937 physcontig = segvn_fill_vp_pages(svd, 3938 vp, off, szc, ppa, &pplist, 3939 &pszc, &downsize); 3940 ASSERT(!physcontig || pplist == NULL); 3941 if (!physcontig && downsize && 3942 type != F_SOFTLOCK) { 3943 ASSERT(pplist == NULL); 3944 SEGVN_VMSTAT_FLTVNPAGES(12); 3945 ierr = -1; 3946 break; 3947 } 3948 ASSERT(!physcontig || 3949 segtype == MAP_PRIVATE || 3950 ppa[0] == NULL); 3951 if (physcontig && ppa[0] == NULL) { 3952 physcontig = 0; 3953 } 3954 } 3955 } else if (!brkcow && !tron && szc && ppa[0] != NULL) { 3956 SEGVN_VMSTAT_FLTVNPAGES(13); 3957 ASSERT(segtype == MAP_PRIVATE); 3958 physcontig = 1; 3959 } 3960 3961 if (!physcontig) { 3962 SEGVN_VMSTAT_FLTVNPAGES(14); 3963 ppa[0] = NULL; 3964 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz, 3965 &vpprot, ppa, pgsz, seg, a, arw, 3966 svd->cred); 3967 #ifdef DEBUG 3968 if (ierr == 0) { 3969 for (i = 0; i < pages; i++) { 3970 ASSERT(PAGE_LOCKED(ppa[i])); 3971 ASSERT(!PP_ISFREE(ppa[i])); 3972 ASSERT(ppa[i]->p_vnode == vp); 3973 ASSERT(ppa[i]->p_offset == 3974 off + (i << PAGESHIFT)); 3975 } 3976 } 3977 #endif /* DEBUG */ 3978 if (segtype == MAP_PRIVATE) { 3979 SEGVN_VMSTAT_FLTVNPAGES(15); 3980 vpprot &= ~PROT_WRITE; 3981 } 3982 } else { 3983 ASSERT(segtype == MAP_PRIVATE); 3984 SEGVN_VMSTAT_FLTVNPAGES(16); 3985 vpprot = PROT_ALL & ~PROT_WRITE; 3986 ierr = 0; 3987 } 3988 3989 if (ierr != 0) { 3990 SEGVN_VMSTAT_FLTVNPAGES(17); 3991 if (pplist != NULL) { 3992 SEGVN_VMSTAT_FLTVNPAGES(18); 3993 page_free_replacement_page(pplist); 3994 page_create_putback(pages); 3995 } 3996 SEGVN_RESTORE_SOFTLOCK(type, pages); 3997 if (a + pgsz <= eaddr) { 3998 SEGVN_VMSTAT_FLTVNPAGES(19); 3999 err = FC_MAKE_ERR(ierr); 4000 goto out; 4001 } 4002 va.va_mask = AT_SIZE; 4003 if (VOP_GETATTR(vp, &va, 0, svd->cred) != 0) { 4004 SEGVN_VMSTAT_FLTVNPAGES(20); 4005 err = FC_MAKE_ERR(EIO); 4006 goto out; 4007 } 4008 if (btopr(va.va_size) >= btopr(off + pgsz)) { 4009 SEGVN_VMSTAT_FLTVNPAGES(21); 4010 err = FC_MAKE_ERR(ierr); 4011 goto out; 4012 } 4013 if (btopr(va.va_size) < 4014 btopr(off + (eaddr - a))) { 4015 SEGVN_VMSTAT_FLTVNPAGES(22); 4016 err = FC_MAKE_ERR(ierr); 4017 goto out; 4018 } 4019 if (brkcow || tron || type == F_SOFTLOCK) { 4020 /* can't reduce map area */ 4021 SEGVN_VMSTAT_FLTVNPAGES(23); 4022 vop_size_err = 1; 4023 goto out; 4024 } 4025 SEGVN_VMSTAT_FLTVNPAGES(24); 4026 ASSERT(szc != 0); 4027 pszc = 0; 4028 ierr = -1; 4029 break; 4030 } 4031 4032 if (amp != NULL) { 4033 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4034 anon_array_enter(amp, aindx, &an_cookie); 4035 } 4036 if (amp != NULL && 4037 anon_get_ptr(amp->ahp, aindx) != NULL) { 4038 ulong_t taindx = P2ALIGN(aindx, maxpages); 4039 4040 SEGVN_VMSTAT_FLTVNPAGES(25); 4041 ASSERT(anon_pages(amp->ahp, taindx, 4042 maxpages) == maxpages); 4043 for (i = 0; i < pages; i++) { 4044 page_unlock(ppa[i]); 4045 } 4046 anon_array_exit(&an_cookie); 4047 ANON_LOCK_EXIT(&->a_rwlock); 4048 if (pplist != NULL) { 4049 page_free_replacement_page(pplist); 4050 page_create_putback(pages); 4051 } 4052 SEGVN_RESTORE_SOFTLOCK(type, pages); 4053 if (szc < seg->s_szc) { 4054 SEGVN_VMSTAT_FLTVNPAGES(26); 4055 /* 4056 * For private segments SOFTLOCK 4057 * either always breaks cow (any rw 4058 * type except S_READ_NOCOW) or 4059 * address space is locked as writer 4060 * (S_READ_NOCOW case) and anon slots 4061 * can't show up on second check. 4062 * Therefore if we are here for 4063 * SOFTLOCK case it must be a cow 4064 * break but cow break never reduces 4065 * szc. text replication (tron) in 4066 * this case works as cow break. 4067 * Thus the assert below. 4068 */ 4069 ASSERT(!brkcow && !tron && 4070 type != F_SOFTLOCK); 4071 pszc = seg->s_szc; 4072 ierr = -2; 4073 break; 4074 } 4075 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4076 goto again; 4077 } 4078 #ifdef DEBUG 4079 if (amp != NULL) { 4080 ulong_t taindx = P2ALIGN(aindx, maxpages); 4081 ASSERT(!anon_pages(amp->ahp, taindx, maxpages)); 4082 } 4083 #endif /* DEBUG */ 4084 4085 if (brkcow || tron) { 4086 ASSERT(amp != NULL); 4087 ASSERT(pplist == NULL); 4088 ASSERT(szc == seg->s_szc); 4089 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4090 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 4091 SEGVN_VMSTAT_FLTVNPAGES(27); 4092 ierr = anon_map_privatepages(amp, aindx, szc, 4093 seg, a, prot, ppa, vpage, segvn_anypgsz, 4094 tron ? PG_LOCAL : 0, svd->cred); 4095 if (ierr != 0) { 4096 SEGVN_VMSTAT_FLTVNPAGES(28); 4097 anon_array_exit(&an_cookie); 4098 ANON_LOCK_EXIT(&->a_rwlock); 4099 SEGVN_RESTORE_SOFTLOCK(type, pages); 4100 err = FC_MAKE_ERR(ierr); 4101 goto out; 4102 } 4103 4104 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4105 /* 4106 * p_szc can't be changed for locked 4107 * swapfs pages. 4108 */ 4109 ASSERT(svd->rcookie == 4110 HAT_INVALID_REGION_COOKIE); 4111 hat_memload_array(hat, a, pgsz, ppa, prot, 4112 hat_flag); 4113 4114 if (!(hat_flag & HAT_LOAD_LOCK)) { 4115 SEGVN_VMSTAT_FLTVNPAGES(29); 4116 for (i = 0; i < pages; i++) { 4117 page_unlock(ppa[i]); 4118 } 4119 } 4120 anon_array_exit(&an_cookie); 4121 ANON_LOCK_EXIT(&->a_rwlock); 4122 goto next; 4123 } 4124 4125 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 4126 (!svd->pageprot && svd->prot == (prot & vpprot))); 4127 4128 pfn = page_pptonum(ppa[0]); 4129 /* 4130 * hat_page_demote() needs an SE_EXCL lock on one of 4131 * constituent page_t's and it decreases root's p_szc 4132 * last. This means if root's p_szc is equal szc and 4133 * all its constituent pages are locked 4134 * hat_page_demote() that could have changed p_szc to 4135 * szc is already done and no new have page_demote() 4136 * can start for this large page. 4137 */ 4138 4139 /* 4140 * we need to make sure same mapping size is used for 4141 * the same address range if there's a possibility the 4142 * adddress is already mapped because hat layer panics 4143 * when translation is loaded for the range already 4144 * mapped with a different page size. We achieve it 4145 * by always using largest page size possible subject 4146 * to the constraints of page size, segment page size 4147 * and page alignment. Since mappings are invalidated 4148 * when those constraints change and make it 4149 * impossible to use previously used mapping size no 4150 * mapping size conflicts should happen. 4151 */ 4152 4153 chkszc: 4154 if ((pszc = ppa[0]->p_szc) == szc && 4155 IS_P2ALIGNED(pfn, pages)) { 4156 4157 SEGVN_VMSTAT_FLTVNPAGES(30); 4158 #ifdef DEBUG 4159 for (i = 0; i < pages; i++) { 4160 ASSERT(PAGE_LOCKED(ppa[i])); 4161 ASSERT(!PP_ISFREE(ppa[i])); 4162 ASSERT(page_pptonum(ppa[i]) == 4163 pfn + i); 4164 ASSERT(ppa[i]->p_szc == szc); 4165 ASSERT(ppa[i]->p_vnode == vp); 4166 ASSERT(ppa[i]->p_offset == 4167 off + (i << PAGESHIFT)); 4168 } 4169 #endif /* DEBUG */ 4170 /* 4171 * All pages are of szc we need and they are 4172 * all locked so they can't change szc. load 4173 * translations. 4174 * 4175 * if page got promoted since last check 4176 * we don't need pplist. 4177 */ 4178 if (pplist != NULL) { 4179 page_free_replacement_page(pplist); 4180 page_create_putback(pages); 4181 } 4182 if (PP_ISMIGRATE(ppa[0])) { 4183 page_migrate(seg, a, ppa, pages); 4184 } 4185 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4186 prot, vpprot); 4187 if (!xhat) { 4188 hat_memload_array_region(hat, a, pgsz, 4189 ppa, prot & vpprot, hat_flag, 4190 svd->rcookie); 4191 } else { 4192 /* 4193 * avoid large xhat mappings to FS 4194 * pages so that hat_page_demote() 4195 * doesn't need to check for xhat 4196 * large mappings. 4197 * Don't use regions with xhats. 4198 */ 4199 for (i = 0; i < pages; i++) { 4200 hat_memload(hat, 4201 a + (i << PAGESHIFT), 4202 ppa[i], prot & vpprot, 4203 hat_flag); 4204 } 4205 } 4206 4207 if (!(hat_flag & HAT_LOAD_LOCK)) { 4208 for (i = 0; i < pages; i++) { 4209 page_unlock(ppa[i]); 4210 } 4211 } 4212 if (amp != NULL) { 4213 anon_array_exit(&an_cookie); 4214 ANON_LOCK_EXIT(&->a_rwlock); 4215 } 4216 goto next; 4217 } 4218 4219 /* 4220 * See if upsize is possible. 4221 */ 4222 if (pszc > szc && szc < seg->s_szc && 4223 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) { 4224 pgcnt_t aphase; 4225 uint_t pszc1 = MIN(pszc, seg->s_szc); 4226 ppgsz = page_get_pagesize(pszc1); 4227 ppages = btop(ppgsz); 4228 aphase = btop(P2PHASE((uintptr_t)a, ppgsz)); 4229 4230 ASSERT(type != F_SOFTLOCK); 4231 4232 SEGVN_VMSTAT_FLTVNPAGES(31); 4233 if (aphase != P2PHASE(pfn, ppages)) { 4234 segvn_faultvnmpss_align_err4++; 4235 } else { 4236 SEGVN_VMSTAT_FLTVNPAGES(32); 4237 if (pplist != NULL) { 4238 page_t *pl = pplist; 4239 page_free_replacement_page(pl); 4240 page_create_putback(pages); 4241 } 4242 for (i = 0; i < pages; i++) { 4243 page_unlock(ppa[i]); 4244 } 4245 if (amp != NULL) { 4246 anon_array_exit(&an_cookie); 4247 ANON_LOCK_EXIT(&->a_rwlock); 4248 } 4249 pszc = pszc1; 4250 ierr = -2; 4251 break; 4252 } 4253 } 4254 4255 /* 4256 * check if we should use smallest mapping size. 4257 */ 4258 upgrdfail = 0; 4259 if (szc == 0 || xhat || 4260 (pszc >= szc && 4261 !IS_P2ALIGNED(pfn, pages)) || 4262 (pszc < szc && 4263 !segvn_full_szcpages(ppa, szc, &upgrdfail, 4264 &pszc))) { 4265 4266 if (upgrdfail && type != F_SOFTLOCK) { 4267 /* 4268 * segvn_full_szcpages failed to lock 4269 * all pages EXCL. Size down. 4270 */ 4271 ASSERT(pszc < szc); 4272 4273 SEGVN_VMSTAT_FLTVNPAGES(33); 4274 4275 if (pplist != NULL) { 4276 page_t *pl = pplist; 4277 page_free_replacement_page(pl); 4278 page_create_putback(pages); 4279 } 4280 4281 for (i = 0; i < pages; i++) { 4282 page_unlock(ppa[i]); 4283 } 4284 if (amp != NULL) { 4285 anon_array_exit(&an_cookie); 4286 ANON_LOCK_EXIT(&->a_rwlock); 4287 } 4288 ierr = -1; 4289 break; 4290 } 4291 if (szc != 0 && !xhat && !upgrdfail) { 4292 segvn_faultvnmpss_align_err5++; 4293 } 4294 SEGVN_VMSTAT_FLTVNPAGES(34); 4295 if (pplist != NULL) { 4296 page_free_replacement_page(pplist); 4297 page_create_putback(pages); 4298 } 4299 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4300 prot, vpprot); 4301 if (upgrdfail && segvn_anypgsz_vnode) { 4302 /* SOFTLOCK case */ 4303 hat_memload_array_region(hat, a, pgsz, 4304 ppa, prot & vpprot, hat_flag, 4305 svd->rcookie); 4306 } else { 4307 for (i = 0; i < pages; i++) { 4308 hat_memload_region(hat, 4309 a + (i << PAGESHIFT), 4310 ppa[i], prot & vpprot, 4311 hat_flag, svd->rcookie); 4312 } 4313 } 4314 if (!(hat_flag & HAT_LOAD_LOCK)) { 4315 for (i = 0; i < pages; i++) { 4316 page_unlock(ppa[i]); 4317 } 4318 } 4319 if (amp != NULL) { 4320 anon_array_exit(&an_cookie); 4321 ANON_LOCK_EXIT(&->a_rwlock); 4322 } 4323 goto next; 4324 } 4325 4326 if (pszc == szc) { 4327 /* 4328 * segvn_full_szcpages() upgraded pages szc. 4329 */ 4330 ASSERT(pszc == ppa[0]->p_szc); 4331 ASSERT(IS_P2ALIGNED(pfn, pages)); 4332 goto chkszc; 4333 } 4334 4335 if (pszc > szc) { 4336 kmutex_t *szcmtx; 4337 SEGVN_VMSTAT_FLTVNPAGES(35); 4338 /* 4339 * p_szc of ppa[0] can change since we haven't 4340 * locked all constituent pages. Call 4341 * page_lock_szc() to prevent szc changes. 4342 * This should be a rare case that happens when 4343 * multiple segments use a different page size 4344 * to map the same file offsets. 4345 */ 4346 szcmtx = page_szc_lock(ppa[0]); 4347 pszc = ppa[0]->p_szc; 4348 ASSERT(szcmtx != NULL || pszc == 0); 4349 ASSERT(ppa[0]->p_szc <= pszc); 4350 if (pszc <= szc) { 4351 SEGVN_VMSTAT_FLTVNPAGES(36); 4352 if (szcmtx != NULL) { 4353 mutex_exit(szcmtx); 4354 } 4355 goto chkszc; 4356 } 4357 if (pplist != NULL) { 4358 /* 4359 * page got promoted since last check. 4360 * we don't need preaalocated large 4361 * page. 4362 */ 4363 SEGVN_VMSTAT_FLTVNPAGES(37); 4364 page_free_replacement_page(pplist); 4365 page_create_putback(pages); 4366 } 4367 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4368 prot, vpprot); 4369 hat_memload_array_region(hat, a, pgsz, ppa, 4370 prot & vpprot, hat_flag, svd->rcookie); 4371 mutex_exit(szcmtx); 4372 if (!(hat_flag & HAT_LOAD_LOCK)) { 4373 for (i = 0; i < pages; i++) { 4374 page_unlock(ppa[i]); 4375 } 4376 } 4377 if (amp != NULL) { 4378 anon_array_exit(&an_cookie); 4379 ANON_LOCK_EXIT(&->a_rwlock); 4380 } 4381 goto next; 4382 } 4383 4384 /* 4385 * if page got demoted since last check 4386 * we could have not allocated larger page. 4387 * allocate now. 4388 */ 4389 if (pplist == NULL && 4390 page_alloc_pages(vp, seg, a, &pplist, NULL, 4391 szc, 0, 0) && type != F_SOFTLOCK) { 4392 SEGVN_VMSTAT_FLTVNPAGES(38); 4393 for (i = 0; i < pages; i++) { 4394 page_unlock(ppa[i]); 4395 } 4396 if (amp != NULL) { 4397 anon_array_exit(&an_cookie); 4398 ANON_LOCK_EXIT(&->a_rwlock); 4399 } 4400 ierr = -1; 4401 alloc_failed |= (1 << szc); 4402 break; 4403 } 4404 4405 SEGVN_VMSTAT_FLTVNPAGES(39); 4406 4407 if (pplist != NULL) { 4408 segvn_relocate_pages(ppa, pplist); 4409 #ifdef DEBUG 4410 } else { 4411 ASSERT(type == F_SOFTLOCK); 4412 SEGVN_VMSTAT_FLTVNPAGES(40); 4413 #endif /* DEBUG */ 4414 } 4415 4416 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot); 4417 4418 if (pplist == NULL && segvn_anypgsz_vnode == 0) { 4419 ASSERT(type == F_SOFTLOCK); 4420 for (i = 0; i < pages; i++) { 4421 ASSERT(ppa[i]->p_szc < szc); 4422 hat_memload_region(hat, 4423 a + (i << PAGESHIFT), 4424 ppa[i], prot & vpprot, hat_flag, 4425 svd->rcookie); 4426 } 4427 } else { 4428 ASSERT(pplist != NULL || type == F_SOFTLOCK); 4429 hat_memload_array_region(hat, a, pgsz, ppa, 4430 prot & vpprot, hat_flag, svd->rcookie); 4431 } 4432 if (!(hat_flag & HAT_LOAD_LOCK)) { 4433 for (i = 0; i < pages; i++) { 4434 ASSERT(PAGE_SHARED(ppa[i])); 4435 page_unlock(ppa[i]); 4436 } 4437 } 4438 if (amp != NULL) { 4439 anon_array_exit(&an_cookie); 4440 ANON_LOCK_EXIT(&->a_rwlock); 4441 } 4442 4443 next: 4444 if (vpage != NULL) { 4445 vpage += pages; 4446 } 4447 adjszc_chk = 1; 4448 } 4449 if (a == lpgeaddr) 4450 break; 4451 ASSERT(a < lpgeaddr); 4452 4453 ASSERT(!brkcow && !tron && type != F_SOFTLOCK); 4454 4455 /* 4456 * ierr == -1 means we failed to map with a large page. 4457 * (either due to allocation/relocation failures or 4458 * misalignment with other mappings to this file. 4459 * 4460 * ierr == -2 means some other thread allocated a large page 4461 * after we gave up tp map with a large page. retry with 4462 * larger mapping. 4463 */ 4464 ASSERT(ierr == -1 || ierr == -2); 4465 ASSERT(ierr == -2 || szc != 0); 4466 ASSERT(ierr == -1 || szc < seg->s_szc); 4467 if (ierr == -2) { 4468 SEGVN_VMSTAT_FLTVNPAGES(41); 4469 ASSERT(pszc > szc && pszc <= seg->s_szc); 4470 szc = pszc; 4471 } else if (segvn_anypgsz_vnode) { 4472 SEGVN_VMSTAT_FLTVNPAGES(42); 4473 szc--; 4474 } else { 4475 SEGVN_VMSTAT_FLTVNPAGES(43); 4476 ASSERT(pszc < szc); 4477 /* 4478 * other process created pszc large page. 4479 * but we still have to drop to 0 szc. 4480 */ 4481 szc = 0; 4482 } 4483 4484 pgsz = page_get_pagesize(szc); 4485 pages = btop(pgsz); 4486 if (ierr == -2) { 4487 /* 4488 * Size up case. Note lpgaddr may only be needed for 4489 * softlock case so we don't adjust it here. 4490 */ 4491 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4492 ASSERT(a >= lpgaddr); 4493 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4494 off = svd->offset + (uintptr_t)(a - seg->s_base); 4495 aindx = svd->anon_index + seg_page(seg, a); 4496 vpage = (svd->vpage != NULL) ? 4497 &svd->vpage[seg_page(seg, a)] : NULL; 4498 } else { 4499 /* 4500 * Size down case. Note lpgaddr may only be needed for 4501 * softlock case so we don't adjust it here. 4502 */ 4503 ASSERT(IS_P2ALIGNED(a, pgsz)); 4504 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4505 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4506 ASSERT(a < lpgeaddr); 4507 if (a < addr) { 4508 SEGVN_VMSTAT_FLTVNPAGES(44); 4509 /* 4510 * The beginning of the large page region can 4511 * be pulled to the right to make a smaller 4512 * region. We haven't yet faulted a single 4513 * page. 4514 */ 4515 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4516 ASSERT(a >= lpgaddr); 4517 off = svd->offset + 4518 (uintptr_t)(a - seg->s_base); 4519 aindx = svd->anon_index + seg_page(seg, a); 4520 vpage = (svd->vpage != NULL) ? 4521 &svd->vpage[seg_page(seg, a)] : NULL; 4522 } 4523 } 4524 } 4525 out: 4526 kmem_free(ppa, ppasize); 4527 if (!err && !vop_size_err) { 4528 SEGVN_VMSTAT_FLTVNPAGES(45); 4529 return (0); 4530 } 4531 if (type == F_SOFTLOCK && a > lpgaddr) { 4532 SEGVN_VMSTAT_FLTVNPAGES(46); 4533 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4534 } 4535 if (!vop_size_err) { 4536 SEGVN_VMSTAT_FLTVNPAGES(47); 4537 return (err); 4538 } 4539 ASSERT(brkcow || tron || type == F_SOFTLOCK); 4540 /* 4541 * Large page end is mapped beyond the end of file and it's a cow 4542 * fault (can be a text replication induced cow) or softlock so we can't 4543 * reduce the map area. For now just demote the segment. This should 4544 * really only happen if the end of the file changed after the mapping 4545 * was established since when large page segments are created we make 4546 * sure they don't extend beyond the end of the file. 4547 */ 4548 SEGVN_VMSTAT_FLTVNPAGES(48); 4549 4550 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4551 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4552 err = 0; 4553 if (seg->s_szc != 0) { 4554 segvn_fltvnpages_clrszc_cnt++; 4555 ASSERT(svd->softlockcnt == 0); 4556 err = segvn_clrszc(seg); 4557 if (err != 0) { 4558 segvn_fltvnpages_clrszc_err++; 4559 } 4560 } 4561 ASSERT(err || seg->s_szc == 0); 4562 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock); 4563 /* segvn_fault will do its job as if szc had been zero to begin with */ 4564 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err)); 4565 } 4566 4567 /* 4568 * This routine will attempt to fault in one large page. 4569 * it will use smaller pages if that fails. 4570 * It should only be called for pure anonymous segments. 4571 */ 4572 static faultcode_t 4573 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 4574 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 4575 caddr_t eaddr, int brkcow) 4576 { 4577 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4578 struct anon_map *amp = svd->amp; 4579 uchar_t segtype = svd->type; 4580 uint_t szc = seg->s_szc; 4581 size_t pgsz = page_get_pagesize(szc); 4582 size_t maxpgsz = pgsz; 4583 pgcnt_t pages = btop(pgsz); 4584 size_t ppasize = pages * sizeof (page_t *); 4585 caddr_t a = lpgaddr; 4586 ulong_t aindx = svd->anon_index + seg_page(seg, a); 4587 struct vpage *vpage = (svd->vpage != NULL) ? 4588 &svd->vpage[seg_page(seg, a)] : NULL; 4589 page_t **ppa; 4590 uint_t ppa_szc; 4591 faultcode_t err; 4592 int ierr; 4593 uint_t protchk, prot, vpprot; 4594 ulong_t i; 4595 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 4596 anon_sync_obj_t cookie; 4597 int first = 1; 4598 int adjszc_chk; 4599 int purged = 0; 4600 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0; 4601 4602 ASSERT(szc != 0); 4603 ASSERT(amp != NULL); 4604 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 4605 ASSERT(!(svd->flags & MAP_NORESERVE)); 4606 ASSERT(type != F_SOFTUNLOCK); 4607 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4608 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF); 4609 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4610 4611 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 4612 4613 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]); 4614 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]); 4615 4616 if (svd->flags & MAP_TEXT) { 4617 hat_flag |= HAT_LOAD_TEXT; 4618 } 4619 4620 if (svd->pageprot) { 4621 switch (rw) { 4622 case S_READ: 4623 protchk = PROT_READ; 4624 break; 4625 case S_WRITE: 4626 protchk = PROT_WRITE; 4627 break; 4628 case S_EXEC: 4629 protchk = PROT_EXEC; 4630 break; 4631 case S_OTHER: 4632 default: 4633 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 4634 break; 4635 } 4636 VM_STAT_ADD(segvnvmstats.fltanpages[2]); 4637 } else { 4638 prot = svd->prot; 4639 /* caller has already done segment level protection check. */ 4640 } 4641 4642 ppa = kmem_alloc(ppasize, KM_SLEEP); 4643 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4644 for (;;) { 4645 adjszc_chk = 0; 4646 for (; a < lpgeaddr; a += pgsz, aindx += pages) { 4647 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 4648 VM_STAT_ADD(segvnvmstats.fltanpages[3]); 4649 ASSERT(vpage != NULL); 4650 prot = VPP_PROT(vpage); 4651 ASSERT(sameprot(seg, a, maxpgsz)); 4652 if ((prot & protchk) == 0) { 4653 err = FC_PROT; 4654 goto error; 4655 } 4656 } 4657 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) && 4658 pgsz < maxpgsz) { 4659 ASSERT(a > lpgaddr); 4660 szc = seg->s_szc; 4661 pgsz = maxpgsz; 4662 pages = btop(pgsz); 4663 ASSERT(IS_P2ALIGNED(aindx, pages)); 4664 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, 4665 pgsz); 4666 } 4667 if (type == F_SOFTLOCK && svd->vp != NULL) { 4668 mutex_enter(&freemem_lock); 4669 if (availrmem < tune.t_minarmem + pages) { 4670 mutex_exit(&freemem_lock); 4671 err = FC_MAKE_ERR(ENOMEM); 4672 goto error; 4673 } else { 4674 availrmem -= pages; 4675 segvn_pages_locked += pages; 4676 svd->softlockcnt += pages; 4677 } 4678 mutex_exit(&freemem_lock); 4679 } 4680 anon_array_enter(amp, aindx, &cookie); 4681 ppa_szc = (uint_t)-1; 4682 ierr = anon_map_getpages(amp, aindx, szc, seg, a, 4683 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow, 4684 segvn_anypgsz, pgflags, svd->cred); 4685 if (ierr != 0) { 4686 anon_array_exit(&cookie); 4687 VM_STAT_ADD(segvnvmstats.fltanpages[4]); 4688 if (type == F_SOFTLOCK && svd->vp != NULL) { 4689 VM_STAT_ADD(segvnvmstats.fltanpages[5]); 4690 mutex_enter(&freemem_lock); 4691 availrmem += pages; 4692 segvn_pages_locked -= pages; 4693 svd->softlockcnt -= pages; 4694 mutex_exit(&freemem_lock); 4695 } 4696 if (ierr > 0) { 4697 VM_STAT_ADD(segvnvmstats.fltanpages[6]); 4698 err = FC_MAKE_ERR(ierr); 4699 goto error; 4700 } 4701 break; 4702 } 4703 4704 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4705 4706 ASSERT(segtype == MAP_SHARED || 4707 ppa[0]->p_szc <= szc); 4708 ASSERT(segtype == MAP_PRIVATE || 4709 ppa[0]->p_szc >= szc); 4710 4711 /* 4712 * Handle pages that have been marked for migration 4713 */ 4714 if (lgrp_optimizations()) 4715 page_migrate(seg, a, ppa, pages); 4716 4717 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 4718 if (type == F_SOFTLOCK && svd->vp == NULL) { 4719 /* 4720 * If all pages in ppa array belong to the same 4721 * large page call segvn_slock_anonpages() 4722 * just for ppa[0]. 4723 */ 4724 for (i = 0; i < pages; i++) { 4725 if (!segvn_slock_anonpages(ppa[i], 4726 i == 0 && first)) { 4727 ulong_t j; 4728 for (j = 0; j < i; j++) { 4729 segvn_sunlock_anonpages( 4730 ppa[j], 4731 j == 0 && 4732 first); 4733 page_unlock(ppa[j]); 4734 } 4735 for (j = i; j < pages; j++) { 4736 page_unlock(ppa[j]); 4737 } 4738 anon_array_exit(&cookie); 4739 err = FC_MAKE_ERR(ENOMEM); 4740 goto error; 4741 } 4742 if (i == 0 && ppa[0]->p_szc >= szc) { 4743 ASSERT(!(page_pptonum(ppa[0]) & 4744 (pages - 1))); 4745 break; 4746 } 4747 } 4748 first = 0; 4749 mutex_enter(&freemem_lock); 4750 svd->softlockcnt += pages; 4751 segvn_pages_locked += pages; 4752 mutex_exit(&freemem_lock); 4753 } 4754 4755 if (segtype == MAP_SHARED) { 4756 vpprot |= PROT_WRITE; 4757 } 4758 4759 hat_memload_array(hat, a, pgsz, ppa, 4760 prot & vpprot, hat_flag); 4761 4762 if (hat_flag & HAT_LOAD_LOCK) { 4763 VM_STAT_ADD(segvnvmstats.fltanpages[7]); 4764 } else { 4765 VM_STAT_ADD(segvnvmstats.fltanpages[8]); 4766 for (i = 0; i < pages; i++) 4767 page_unlock(ppa[i]); 4768 } 4769 if (vpage != NULL) 4770 vpage += pages; 4771 4772 anon_array_exit(&cookie); 4773 adjszc_chk = 1; 4774 } 4775 if (a == lpgeaddr) 4776 break; 4777 ASSERT(a < lpgeaddr); 4778 /* 4779 * ierr == -1 means we failed to allocate a large page. 4780 * so do a size down operation. 4781 * 4782 * ierr == -2 means some other process that privately shares 4783 * pages with this process has allocated a larger page and we 4784 * need to retry with larger pages. So do a size up 4785 * operation. This relies on the fact that large pages are 4786 * never partially shared i.e. if we share any constituent 4787 * page of a large page with another process we must share the 4788 * entire large page. Note this cannot happen for SOFTLOCK 4789 * case, unless current address (a) is at the beginning of the 4790 * next page size boundary because the other process couldn't 4791 * have relocated locked pages. 4792 */ 4793 ASSERT(ierr == -1 || ierr == -2); 4794 /* 4795 * For the very first relocation failure try to purge this 4796 * segment's cache so that the relocator can obtain an 4797 * exclusive lock on pages we want to relocate. 4798 */ 4799 if (!purged && ierr == -1 && ppa_szc != (uint_t)-1 && 4800 svd->softlockcnt != 0) { 4801 purged = 1; 4802 segvn_purge(seg); 4803 continue; 4804 } 4805 4806 if (segvn_anypgsz) { 4807 ASSERT(ierr == -2 || szc != 0); 4808 ASSERT(ierr == -1 || szc < seg->s_szc); 4809 szc = (ierr == -1) ? szc - 1 : szc + 1; 4810 } else { 4811 /* 4812 * For non COW faults and segvn_anypgsz == 0 4813 * we need to be careful not to loop forever 4814 * if existing page is found with szc other 4815 * than 0 or seg->s_szc. This could be due 4816 * to page relocations on behalf of DR or 4817 * more likely large page creation. For this 4818 * case simply re-size to existing page's szc 4819 * if returned by anon_map_getpages(). 4820 */ 4821 if (ppa_szc == (uint_t)-1) { 4822 szc = (ierr == -1) ? 0 : seg->s_szc; 4823 } else { 4824 ASSERT(ppa_szc <= seg->s_szc); 4825 ASSERT(ierr == -2 || ppa_szc < szc); 4826 ASSERT(ierr == -1 || ppa_szc > szc); 4827 szc = ppa_szc; 4828 } 4829 } 4830 4831 pgsz = page_get_pagesize(szc); 4832 pages = btop(pgsz); 4833 ASSERT(type != F_SOFTLOCK || ierr == -1 || 4834 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz))); 4835 if (type == F_SOFTLOCK) { 4836 /* 4837 * For softlocks we cannot reduce the fault area 4838 * (calculated based on the largest page size for this 4839 * segment) for size down and a is already next 4840 * page size aligned as assertted above for size 4841 * ups. Therefore just continue in case of softlock. 4842 */ 4843 VM_STAT_ADD(segvnvmstats.fltanpages[9]); 4844 continue; /* keep lint happy */ 4845 } else if (ierr == -2) { 4846 4847 /* 4848 * Size up case. Note lpgaddr may only be needed for 4849 * softlock case so we don't adjust it here. 4850 */ 4851 VM_STAT_ADD(segvnvmstats.fltanpages[10]); 4852 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4853 ASSERT(a >= lpgaddr); 4854 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4855 aindx = svd->anon_index + seg_page(seg, a); 4856 vpage = (svd->vpage != NULL) ? 4857 &svd->vpage[seg_page(seg, a)] : NULL; 4858 } else { 4859 /* 4860 * Size down case. Note lpgaddr may only be needed for 4861 * softlock case so we don't adjust it here. 4862 */ 4863 VM_STAT_ADD(segvnvmstats.fltanpages[11]); 4864 ASSERT(IS_P2ALIGNED(a, pgsz)); 4865 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4866 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4867 ASSERT(a < lpgeaddr); 4868 if (a < addr) { 4869 /* 4870 * The beginning of the large page region can 4871 * be pulled to the right to make a smaller 4872 * region. We haven't yet faulted a single 4873 * page. 4874 */ 4875 VM_STAT_ADD(segvnvmstats.fltanpages[12]); 4876 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4877 ASSERT(a >= lpgaddr); 4878 aindx = svd->anon_index + seg_page(seg, a); 4879 vpage = (svd->vpage != NULL) ? 4880 &svd->vpage[seg_page(seg, a)] : NULL; 4881 } 4882 } 4883 } 4884 VM_STAT_ADD(segvnvmstats.fltanpages[13]); 4885 ANON_LOCK_EXIT(&->a_rwlock); 4886 kmem_free(ppa, ppasize); 4887 return (0); 4888 error: 4889 VM_STAT_ADD(segvnvmstats.fltanpages[14]); 4890 ANON_LOCK_EXIT(&->a_rwlock); 4891 kmem_free(ppa, ppasize); 4892 if (type == F_SOFTLOCK && a > lpgaddr) { 4893 VM_STAT_ADD(segvnvmstats.fltanpages[15]); 4894 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4895 } 4896 return (err); 4897 } 4898 4899 int fltadvice = 1; /* set to free behind pages for sequential access */ 4900 4901 /* 4902 * This routine is called via a machine specific fault handling routine. 4903 * It is also called by software routines wishing to lock or unlock 4904 * a range of addresses. 4905 * 4906 * Here is the basic algorithm: 4907 * If unlocking 4908 * Call segvn_softunlock 4909 * Return 4910 * endif 4911 * Checking and set up work 4912 * If we will need some non-anonymous pages 4913 * Call VOP_GETPAGE over the range of non-anonymous pages 4914 * endif 4915 * Loop over all addresses requested 4916 * Call segvn_faultpage passing in page list 4917 * to load up translations and handle anonymous pages 4918 * endloop 4919 * Load up translation to any additional pages in page list not 4920 * already handled that fit into this segment 4921 */ 4922 static faultcode_t 4923 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len, 4924 enum fault_type type, enum seg_rw rw) 4925 { 4926 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4927 page_t **plp, **ppp, *pp; 4928 u_offset_t off; 4929 caddr_t a; 4930 struct vpage *vpage; 4931 uint_t vpprot, prot; 4932 int err; 4933 page_t *pl[PVN_GETPAGE_NUM + 1]; 4934 size_t plsz, pl_alloc_sz; 4935 size_t page; 4936 ulong_t anon_index; 4937 struct anon_map *amp; 4938 int dogetpage = 0; 4939 caddr_t lpgaddr, lpgeaddr; 4940 size_t pgsz; 4941 anon_sync_obj_t cookie; 4942 int brkcow = BREAK_COW_SHARE(rw, type, svd->type); 4943 4944 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 4945 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE); 4946 4947 /* 4948 * First handle the easy stuff 4949 */ 4950 if (type == F_SOFTUNLOCK) { 4951 if (rw == S_READ_NOCOW) { 4952 rw = S_READ; 4953 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 4954 } 4955 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 4956 pgsz = (seg->s_szc == 0) ? PAGESIZE : 4957 page_get_pagesize(seg->s_szc); 4958 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]); 4959 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 4960 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw); 4961 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4962 return (0); 4963 } 4964 4965 ASSERT(svd->tr_state == SEGVN_TR_OFF || 4966 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 4967 if (brkcow == 0) { 4968 if (svd->tr_state == SEGVN_TR_INIT) { 4969 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4970 if (svd->tr_state == SEGVN_TR_INIT) { 4971 ASSERT(svd->vp != NULL && svd->amp == NULL); 4972 ASSERT(svd->flags & MAP_TEXT); 4973 ASSERT(svd->type == MAP_PRIVATE); 4974 segvn_textrepl(seg); 4975 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4976 ASSERT(svd->tr_state != SEGVN_TR_ON || 4977 svd->amp != NULL); 4978 } 4979 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4980 } 4981 } else if (svd->tr_state != SEGVN_TR_OFF) { 4982 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4983 4984 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) { 4985 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 4986 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4987 return (FC_PROT); 4988 } 4989 4990 if (svd->tr_state == SEGVN_TR_ON) { 4991 ASSERT(svd->vp != NULL && svd->amp != NULL); 4992 segvn_textunrepl(seg, 0); 4993 ASSERT(svd->amp == NULL && 4994 svd->tr_state == SEGVN_TR_OFF); 4995 } else if (svd->tr_state != SEGVN_TR_OFF) { 4996 svd->tr_state = SEGVN_TR_OFF; 4997 } 4998 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 4999 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5000 } 5001 5002 top: 5003 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 5004 5005 /* 5006 * If we have the same protections for the entire segment, 5007 * insure that the access being attempted is legitimate. 5008 */ 5009 5010 if (svd->pageprot == 0) { 5011 uint_t protchk; 5012 5013 switch (rw) { 5014 case S_READ: 5015 case S_READ_NOCOW: 5016 protchk = PROT_READ; 5017 break; 5018 case S_WRITE: 5019 protchk = PROT_WRITE; 5020 break; 5021 case S_EXEC: 5022 protchk = PROT_EXEC; 5023 break; 5024 case S_OTHER: 5025 default: 5026 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 5027 break; 5028 } 5029 5030 if ((svd->prot & protchk) == 0) { 5031 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5032 return (FC_PROT); /* illegal access type */ 5033 } 5034 } 5035 5036 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5037 /* this must be SOFTLOCK S_READ fault */ 5038 ASSERT(svd->amp == NULL); 5039 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5040 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5041 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5042 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5043 /* 5044 * this must be the first ever non S_READ_NOCOW 5045 * softlock for this segment. 5046 */ 5047 ASSERT(svd->softlockcnt == 0); 5048 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5049 HAT_REGION_TEXT); 5050 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5051 } 5052 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5053 goto top; 5054 } 5055 5056 /* 5057 * We can't allow the long term use of softlocks for vmpss segments, 5058 * because in some file truncation cases we should be able to demote 5059 * the segment, which requires that there are no softlocks. The 5060 * only case where it's ok to allow a SOFTLOCK fault against a vmpss 5061 * segment is S_READ_NOCOW, where the caller holds the address space 5062 * locked as writer and calls softunlock before dropping the as lock. 5063 * S_READ_NOCOW is used by /proc to read memory from another user. 5064 * 5065 * Another deadlock between SOFTLOCK and file truncation can happen 5066 * because segvn_fault_vnodepages() calls the FS one pagesize at 5067 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages() 5068 * can cause a deadlock because the first set of page_t's remain 5069 * locked SE_SHARED. To avoid this, we demote segments on a first 5070 * SOFTLOCK if they have a length greater than the segment's 5071 * page size. 5072 * 5073 * So for now, we only avoid demoting a segment on a SOFTLOCK when 5074 * the access type is S_READ_NOCOW and the fault length is less than 5075 * or equal to the segment's page size. While this is quite restrictive, 5076 * it should be the most common case of SOFTLOCK against a vmpss 5077 * segment. 5078 * 5079 * For S_READ_NOCOW, it's safe not to do a copy on write because the 5080 * caller makes sure no COW will be caused by another thread for a 5081 * softlocked page. 5082 */ 5083 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) { 5084 int demote = 0; 5085 5086 if (rw != S_READ_NOCOW) { 5087 demote = 1; 5088 } 5089 if (!demote && len > PAGESIZE) { 5090 pgsz = page_get_pagesize(seg->s_szc); 5091 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, 5092 lpgeaddr); 5093 if (lpgeaddr - lpgaddr > pgsz) { 5094 demote = 1; 5095 } 5096 } 5097 5098 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5099 5100 if (demote) { 5101 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5102 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5103 if (seg->s_szc != 0) { 5104 segvn_vmpss_clrszc_cnt++; 5105 ASSERT(svd->softlockcnt == 0); 5106 err = segvn_clrszc(seg); 5107 if (err) { 5108 segvn_vmpss_clrszc_err++; 5109 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5110 return (FC_MAKE_ERR(err)); 5111 } 5112 } 5113 ASSERT(seg->s_szc == 0); 5114 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5115 goto top; 5116 } 5117 } 5118 5119 /* 5120 * Check to see if we need to allocate an anon_map structure. 5121 */ 5122 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) { 5123 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5124 /* 5125 * Drop the "read" lock on the segment and acquire 5126 * the "write" version since we have to allocate the 5127 * anon_map. 5128 */ 5129 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5130 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5131 5132 if (svd->amp == NULL) { 5133 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 5134 svd->amp->a_szc = seg->s_szc; 5135 } 5136 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5137 5138 /* 5139 * Start all over again since segment protections 5140 * may have changed after we dropped the "read" lock. 5141 */ 5142 goto top; 5143 } 5144 5145 /* 5146 * S_READ_NOCOW vs S_READ distinction was 5147 * only needed for the code above. After 5148 * that we treat it as S_READ. 5149 */ 5150 if (rw == S_READ_NOCOW) { 5151 ASSERT(type == F_SOFTLOCK); 5152 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5153 rw = S_READ; 5154 } 5155 5156 amp = svd->amp; 5157 5158 /* 5159 * MADV_SEQUENTIAL work is ignored for large page segments. 5160 */ 5161 if (seg->s_szc != 0) { 5162 pgsz = page_get_pagesize(seg->s_szc); 5163 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 5164 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 5165 if (svd->vp == NULL) { 5166 err = segvn_fault_anonpages(hat, seg, lpgaddr, 5167 lpgeaddr, type, rw, addr, addr + len, brkcow); 5168 } else { 5169 err = segvn_fault_vnodepages(hat, seg, lpgaddr, 5170 lpgeaddr, type, rw, addr, addr + len, brkcow); 5171 if (err == IE_RETRY) { 5172 ASSERT(seg->s_szc == 0); 5173 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 5174 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5175 goto top; 5176 } 5177 } 5178 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5179 return (err); 5180 } 5181 5182 page = seg_page(seg, addr); 5183 if (amp != NULL) { 5184 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5185 anon_index = svd->anon_index + page; 5186 5187 if (type == F_PROT && rw == S_READ && 5188 svd->tr_state == SEGVN_TR_OFF && 5189 svd->type == MAP_PRIVATE && svd->pageprot == 0) { 5190 size_t index = anon_index; 5191 struct anon *ap; 5192 5193 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5194 /* 5195 * The fast path could apply to S_WRITE also, except 5196 * that the protection fault could be caused by lazy 5197 * tlb flush when ro->rw. In this case, the pte is 5198 * RW already. But RO in the other cpu's tlb causes 5199 * the fault. Since hat_chgprot won't do anything if 5200 * pte doesn't change, we may end up faulting 5201 * indefinitely until the RO tlb entry gets replaced. 5202 */ 5203 for (a = addr; a < addr + len; a += PAGESIZE, index++) { 5204 anon_array_enter(amp, index, &cookie); 5205 ap = anon_get_ptr(amp->ahp, index); 5206 anon_array_exit(&cookie); 5207 if ((ap == NULL) || (ap->an_refcnt != 1)) { 5208 ANON_LOCK_EXIT(&->a_rwlock); 5209 goto slow; 5210 } 5211 } 5212 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot); 5213 ANON_LOCK_EXIT(&->a_rwlock); 5214 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5215 return (0); 5216 } 5217 } 5218 slow: 5219 5220 if (svd->vpage == NULL) 5221 vpage = NULL; 5222 else 5223 vpage = &svd->vpage[page]; 5224 5225 off = svd->offset + (uintptr_t)(addr - seg->s_base); 5226 5227 /* 5228 * If MADV_SEQUENTIAL has been set for the particular page we 5229 * are faulting on, free behind all pages in the segment and put 5230 * them on the free list. 5231 */ 5232 5233 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) { 5234 struct vpage *vpp; 5235 ulong_t fanon_index; 5236 size_t fpage; 5237 u_offset_t pgoff, fpgoff; 5238 struct vnode *fvp; 5239 struct anon *fap = NULL; 5240 5241 if (svd->advice == MADV_SEQUENTIAL || 5242 (svd->pageadvice && 5243 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) { 5244 pgoff = off - PAGESIZE; 5245 fpage = page - 1; 5246 if (vpage != NULL) 5247 vpp = &svd->vpage[fpage]; 5248 if (amp != NULL) 5249 fanon_index = svd->anon_index + fpage; 5250 5251 while (pgoff > svd->offset) { 5252 if (svd->advice != MADV_SEQUENTIAL && 5253 (!svd->pageadvice || (vpage && 5254 VPP_ADVICE(vpp) != MADV_SEQUENTIAL))) 5255 break; 5256 5257 /* 5258 * If this is an anon page, we must find the 5259 * correct <vp, offset> for it 5260 */ 5261 fap = NULL; 5262 if (amp != NULL) { 5263 ANON_LOCK_ENTER(&->a_rwlock, 5264 RW_READER); 5265 anon_array_enter(amp, fanon_index, 5266 &cookie); 5267 fap = anon_get_ptr(amp->ahp, 5268 fanon_index); 5269 if (fap != NULL) { 5270 swap_xlate(fap, &fvp, &fpgoff); 5271 } else { 5272 fpgoff = pgoff; 5273 fvp = svd->vp; 5274 } 5275 anon_array_exit(&cookie); 5276 ANON_LOCK_EXIT(&->a_rwlock); 5277 } else { 5278 fpgoff = pgoff; 5279 fvp = svd->vp; 5280 } 5281 if (fvp == NULL) 5282 break; /* XXX */ 5283 /* 5284 * Skip pages that are free or have an 5285 * "exclusive" lock. 5286 */ 5287 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED); 5288 if (pp == NULL) 5289 break; 5290 /* 5291 * We don't need the page_struct_lock to test 5292 * as this is only advisory; even if we 5293 * acquire it someone might race in and lock 5294 * the page after we unlock and before the 5295 * PUTPAGE, then VOP_PUTPAGE will do nothing. 5296 */ 5297 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) { 5298 /* 5299 * Hold the vnode before releasing 5300 * the page lock to prevent it from 5301 * being freed and re-used by some 5302 * other thread. 5303 */ 5304 VN_HOLD(fvp); 5305 page_unlock(pp); 5306 /* 5307 * We should build a page list 5308 * to kluster putpages XXX 5309 */ 5310 (void) VOP_PUTPAGE(fvp, 5311 (offset_t)fpgoff, PAGESIZE, 5312 (B_DONTNEED|B_FREE|B_ASYNC), 5313 svd->cred); 5314 VN_RELE(fvp); 5315 } else { 5316 /* 5317 * XXX - Should the loop terminate if 5318 * the page is `locked'? 5319 */ 5320 page_unlock(pp); 5321 } 5322 --vpp; 5323 --fanon_index; 5324 pgoff -= PAGESIZE; 5325 } 5326 } 5327 } 5328 5329 plp = pl; 5330 *plp = NULL; 5331 pl_alloc_sz = 0; 5332 5333 /* 5334 * See if we need to call VOP_GETPAGE for 5335 * *any* of the range being faulted on. 5336 * We can skip all of this work if there 5337 * was no original vnode. 5338 */ 5339 if (svd->vp != NULL) { 5340 u_offset_t vp_off; 5341 size_t vp_len; 5342 struct anon *ap; 5343 vnode_t *vp; 5344 5345 vp_off = off; 5346 vp_len = len; 5347 5348 if (amp == NULL) 5349 dogetpage = 1; 5350 else { 5351 /* 5352 * Only acquire reader lock to prevent amp->ahp 5353 * from being changed. It's ok to miss pages, 5354 * hence we don't do anon_array_enter 5355 */ 5356 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5357 ap = anon_get_ptr(amp->ahp, anon_index); 5358 5359 if (len <= PAGESIZE) 5360 /* inline non_anon() */ 5361 dogetpage = (ap == NULL); 5362 else 5363 dogetpage = non_anon(amp->ahp, anon_index, 5364 &vp_off, &vp_len); 5365 ANON_LOCK_EXIT(&->a_rwlock); 5366 } 5367 5368 if (dogetpage) { 5369 enum seg_rw arw; 5370 struct as *as = seg->s_as; 5371 5372 if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) { 5373 /* 5374 * Page list won't fit in local array, 5375 * allocate one of the needed size. 5376 */ 5377 pl_alloc_sz = 5378 (btop(len) + 1) * sizeof (page_t *); 5379 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP); 5380 plp[0] = NULL; 5381 plsz = len; 5382 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE || 5383 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER || 5384 (((size_t)(addr + PAGESIZE) < 5385 (size_t)(seg->s_base + seg->s_size)) && 5386 hat_probe(as->a_hat, addr + PAGESIZE))) { 5387 /* 5388 * Ask VOP_GETPAGE to return the exact number 5389 * of pages if 5390 * (a) this is a COW fault, or 5391 * (b) this is a software fault, or 5392 * (c) next page is already mapped. 5393 */ 5394 plsz = len; 5395 } else { 5396 /* 5397 * Ask VOP_GETPAGE to return adjacent pages 5398 * within the segment. 5399 */ 5400 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t) 5401 ((seg->s_base + seg->s_size) - addr)); 5402 ASSERT((addr + plsz) <= 5403 (seg->s_base + seg->s_size)); 5404 } 5405 5406 /* 5407 * Need to get some non-anonymous pages. 5408 * We need to make only one call to GETPAGE to do 5409 * this to prevent certain deadlocking conditions 5410 * when we are doing locking. In this case 5411 * non_anon() should have picked up the smallest 5412 * range which includes all the non-anonymous 5413 * pages in the requested range. We have to 5414 * be careful regarding which rw flag to pass in 5415 * because on a private mapping, the underlying 5416 * object is never allowed to be written. 5417 */ 5418 if (rw == S_WRITE && svd->type == MAP_PRIVATE) { 5419 arw = S_READ; 5420 } else { 5421 arw = rw; 5422 } 5423 vp = svd->vp; 5424 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5425 "segvn_getpage:seg %p addr %p vp %p", 5426 seg, addr, vp); 5427 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len, 5428 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw, 5429 svd->cred); 5430 if (err) { 5431 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5432 segvn_pagelist_rele(plp); 5433 if (pl_alloc_sz) 5434 kmem_free(plp, pl_alloc_sz); 5435 return (FC_MAKE_ERR(err)); 5436 } 5437 if (svd->type == MAP_PRIVATE) 5438 vpprot &= ~PROT_WRITE; 5439 } 5440 } 5441 5442 /* 5443 * N.B. at this time the plp array has all the needed non-anon 5444 * pages in addition to (possibly) having some adjacent pages. 5445 */ 5446 5447 /* 5448 * Always acquire the anon_array_lock to prevent 5449 * 2 threads from allocating separate anon slots for 5450 * the same "addr". 5451 * 5452 * If this is a copy-on-write fault and we don't already 5453 * have the anon_array_lock, acquire it to prevent the 5454 * fault routine from handling multiple copy-on-write faults 5455 * on the same "addr" in the same address space. 5456 * 5457 * Only one thread should deal with the fault since after 5458 * it is handled, the other threads can acquire a translation 5459 * to the newly created private page. This prevents two or 5460 * more threads from creating different private pages for the 5461 * same fault. 5462 * 5463 * We grab "serialization" lock here if this is a MAP_PRIVATE segment 5464 * to prevent deadlock between this thread and another thread 5465 * which has soft-locked this page and wants to acquire serial_lock. 5466 * ( bug 4026339 ) 5467 * 5468 * The fix for bug 4026339 becomes unnecessary when using the 5469 * locking scheme with per amp rwlock and a global set of hash 5470 * lock, anon_array_lock. If we steal a vnode page when low 5471 * on memory and upgrad the page lock through page_rename, 5472 * then the page is PAGE_HANDLED, nothing needs to be done 5473 * for this page after returning from segvn_faultpage. 5474 * 5475 * But really, the page lock should be downgraded after 5476 * the stolen page is page_rename'd. 5477 */ 5478 5479 if (amp != NULL) 5480 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5481 5482 /* 5483 * Ok, now loop over the address range and handle faults 5484 */ 5485 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) { 5486 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot, 5487 type, rw, brkcow, a == addr); 5488 if (err) { 5489 if (amp != NULL) 5490 ANON_LOCK_EXIT(&->a_rwlock); 5491 if (type == F_SOFTLOCK && a > addr) { 5492 segvn_softunlock(seg, addr, (a - addr), 5493 S_OTHER); 5494 } 5495 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5496 segvn_pagelist_rele(plp); 5497 if (pl_alloc_sz) 5498 kmem_free(plp, pl_alloc_sz); 5499 return (err); 5500 } 5501 if (vpage) { 5502 vpage++; 5503 } else if (svd->vpage) { 5504 page = seg_page(seg, addr); 5505 vpage = &svd->vpage[++page]; 5506 } 5507 } 5508 5509 /* Didn't get pages from the underlying fs so we're done */ 5510 if (!dogetpage) 5511 goto done; 5512 5513 /* 5514 * Now handle any other pages in the list returned. 5515 * If the page can be used, load up the translations now. 5516 * Note that the for loop will only be entered if "plp" 5517 * is pointing to a non-NULL page pointer which means that 5518 * VOP_GETPAGE() was called and vpprot has been initialized. 5519 */ 5520 if (svd->pageprot == 0) 5521 prot = svd->prot & vpprot; 5522 5523 5524 /* 5525 * Large Files: diff should be unsigned value because we started 5526 * supporting > 2GB segment sizes from 2.5.1 and when a 5527 * large file of size > 2GB gets mapped to address space 5528 * the diff value can be > 2GB. 5529 */ 5530 5531 for (ppp = plp; (pp = *ppp) != NULL; ppp++) { 5532 size_t diff; 5533 struct anon *ap; 5534 int anon_index; 5535 anon_sync_obj_t cookie; 5536 int hat_flag = HAT_LOAD_ADV; 5537 5538 if (svd->flags & MAP_TEXT) { 5539 hat_flag |= HAT_LOAD_TEXT; 5540 } 5541 5542 if (pp == PAGE_HANDLED) 5543 continue; 5544 5545 if (svd->tr_state != SEGVN_TR_ON && 5546 pp->p_offset >= svd->offset && 5547 pp->p_offset < svd->offset + seg->s_size) { 5548 5549 diff = pp->p_offset - svd->offset; 5550 5551 /* 5552 * Large Files: Following is the assertion 5553 * validating the above cast. 5554 */ 5555 ASSERT(svd->vp == pp->p_vnode); 5556 5557 page = btop(diff); 5558 if (svd->pageprot) 5559 prot = VPP_PROT(&svd->vpage[page]) & vpprot; 5560 5561 /* 5562 * Prevent other threads in the address space from 5563 * creating private pages (i.e., allocating anon slots) 5564 * while we are in the process of loading translations 5565 * to additional pages returned by the underlying 5566 * object. 5567 */ 5568 if (amp != NULL) { 5569 anon_index = svd->anon_index + page; 5570 anon_array_enter(amp, anon_index, &cookie); 5571 ap = anon_get_ptr(amp->ahp, anon_index); 5572 } 5573 if ((amp == NULL) || (ap == NULL)) { 5574 if (IS_VMODSORT(pp->p_vnode) || 5575 enable_mbit_wa) { 5576 if (rw == S_WRITE) 5577 hat_setmod(pp); 5578 else if (rw != S_OTHER && 5579 !hat_ismod(pp)) 5580 prot &= ~PROT_WRITE; 5581 } 5582 /* 5583 * Skip mapping read ahead pages marked 5584 * for migration, so they will get migrated 5585 * properly on fault 5586 */ 5587 ASSERT(amp == NULL || 5588 svd->rcookie == HAT_INVALID_REGION_COOKIE); 5589 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) { 5590 hat_memload_region(hat, 5591 seg->s_base + diff, 5592 pp, prot, hat_flag, 5593 svd->rcookie); 5594 } 5595 } 5596 if (amp != NULL) 5597 anon_array_exit(&cookie); 5598 } 5599 page_unlock(pp); 5600 } 5601 done: 5602 if (amp != NULL) 5603 ANON_LOCK_EXIT(&->a_rwlock); 5604 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5605 if (pl_alloc_sz) 5606 kmem_free(plp, pl_alloc_sz); 5607 return (0); 5608 } 5609 5610 /* 5611 * This routine is used to start I/O on pages asynchronously. XXX it will 5612 * only create PAGESIZE pages. At fault time they will be relocated into 5613 * larger pages. 5614 */ 5615 static faultcode_t 5616 segvn_faulta(struct seg *seg, caddr_t addr) 5617 { 5618 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5619 int err; 5620 struct anon_map *amp; 5621 vnode_t *vp; 5622 5623 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5624 5625 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 5626 if ((amp = svd->amp) != NULL) { 5627 struct anon *ap; 5628 5629 /* 5630 * Reader lock to prevent amp->ahp from being changed. 5631 * This is advisory, it's ok to miss a page, so 5632 * we don't do anon_array_enter lock. 5633 */ 5634 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5635 if ((ap = anon_get_ptr(amp->ahp, 5636 svd->anon_index + seg_page(seg, addr))) != NULL) { 5637 5638 err = anon_getpage(&ap, NULL, NULL, 5639 0, seg, addr, S_READ, svd->cred); 5640 5641 ANON_LOCK_EXIT(&->a_rwlock); 5642 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5643 if (err) 5644 return (FC_MAKE_ERR(err)); 5645 return (0); 5646 } 5647 ANON_LOCK_EXIT(&->a_rwlock); 5648 } 5649 5650 if (svd->vp == NULL) { 5651 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5652 return (0); /* zfod page - do nothing now */ 5653 } 5654 5655 vp = svd->vp; 5656 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5657 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp); 5658 err = VOP_GETPAGE(vp, 5659 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)), 5660 PAGESIZE, NULL, NULL, 0, seg, addr, 5661 S_OTHER, svd->cred); 5662 5663 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5664 if (err) 5665 return (FC_MAKE_ERR(err)); 5666 return (0); 5667 } 5668 5669 static int 5670 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 5671 { 5672 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5673 struct vpage *svp, *evp; 5674 struct vnode *vp; 5675 size_t pgsz; 5676 pgcnt_t pgcnt; 5677 anon_sync_obj_t cookie; 5678 int unload_done = 0; 5679 5680 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5681 5682 if ((svd->maxprot & prot) != prot) 5683 return (EACCES); /* violated maxprot */ 5684 5685 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5686 5687 /* return if prot is the same */ 5688 if (!svd->pageprot && svd->prot == prot) { 5689 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5690 return (0); 5691 } 5692 5693 /* 5694 * Since we change protections we first have to flush the cache. 5695 * This makes sure all the pagelock calls have to recheck 5696 * protections. 5697 */ 5698 if (svd->softlockcnt > 0) { 5699 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5700 /* 5701 * Since we do have the segvn writers lock nobody can fill 5702 * the cache with entries belonging to this seg during 5703 * the purge. The flush either succeeds or we still have 5704 * pending I/Os. 5705 */ 5706 segvn_purge(seg); 5707 if (svd->softlockcnt > 0) { 5708 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5709 return (EAGAIN); 5710 } 5711 } 5712 5713 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5714 ASSERT(svd->amp == NULL); 5715 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5716 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5717 HAT_REGION_TEXT); 5718 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5719 unload_done = 1; 5720 } else if (svd->tr_state == SEGVN_TR_INIT) { 5721 svd->tr_state = SEGVN_TR_OFF; 5722 } else if (svd->tr_state == SEGVN_TR_ON) { 5723 ASSERT(svd->amp != NULL); 5724 segvn_textunrepl(seg, 0); 5725 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 5726 unload_done = 1; 5727 } 5728 5729 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED && 5730 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) { 5731 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 5732 segvn_inval_trcache(svd->vp); 5733 } 5734 if (seg->s_szc != 0) { 5735 int err; 5736 pgsz = page_get_pagesize(seg->s_szc); 5737 pgcnt = pgsz >> PAGESHIFT; 5738 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 5739 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 5740 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5741 ASSERT(seg->s_base != addr || seg->s_size != len); 5742 /* 5743 * If we are holding the as lock as a reader then 5744 * we need to return IE_RETRY and let the as 5745 * layer drop and re-aquire the lock as a writer. 5746 */ 5747 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) 5748 return (IE_RETRY); 5749 VM_STAT_ADD(segvnvmstats.demoterange[1]); 5750 if (svd->type == MAP_PRIVATE || svd->vp != NULL) { 5751 err = segvn_demote_range(seg, addr, len, 5752 SDR_END, 0); 5753 } else { 5754 uint_t szcvec = map_pgszcvec(seg->s_base, 5755 pgsz, (uintptr_t)seg->s_base, 5756 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0); 5757 err = segvn_demote_range(seg, addr, len, 5758 SDR_END, szcvec); 5759 } 5760 if (err == 0) 5761 return (IE_RETRY); 5762 if (err == ENOMEM) 5763 return (IE_NOMEM); 5764 return (err); 5765 } 5766 } 5767 5768 5769 /* 5770 * If it's a private mapping and we're making it writable 5771 * and no swap space has been reserved, have to reserve 5772 * it all now. If it's a private mapping to a file (i.e., vp != NULL) 5773 * and we're removing write permission on the entire segment and 5774 * we haven't modified any pages, we can release the swap space. 5775 */ 5776 if (svd->type == MAP_PRIVATE) { 5777 if (prot & PROT_WRITE) { 5778 size_t sz; 5779 if (svd->swresv == 0 && !(svd->flags & MAP_NORESERVE)) { 5780 if (anon_resv_zone(seg->s_size, 5781 seg->s_as->a_proc->p_zone) == 0) { 5782 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5783 return (IE_NOMEM); 5784 } 5785 sz = svd->swresv = seg->s_size; 5786 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 5787 "anon proc:%p %lu %u", 5788 seg, sz, 1); 5789 } 5790 } else { 5791 /* 5792 * Swap space is released only if this segment 5793 * does not map anonymous memory, since read faults 5794 * on such segments still need an anon slot to read 5795 * in the data. 5796 */ 5797 if (svd->swresv != 0 && svd->vp != NULL && 5798 svd->amp == NULL && addr == seg->s_base && 5799 len == seg->s_size && svd->pageprot == 0) { 5800 anon_unresv_zone(svd->swresv, 5801 seg->s_as->a_proc->p_zone); 5802 svd->swresv = 0; 5803 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 5804 "anon proc:%p %lu %u", 5805 seg, 0, 0); 5806 } 5807 } 5808 } 5809 5810 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) { 5811 if (svd->prot == prot) { 5812 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5813 return (0); /* all done */ 5814 } 5815 svd->prot = (uchar_t)prot; 5816 } else if (svd->type == MAP_PRIVATE) { 5817 struct anon *ap = NULL; 5818 page_t *pp; 5819 u_offset_t offset, off; 5820 struct anon_map *amp; 5821 ulong_t anon_idx = 0; 5822 5823 /* 5824 * A vpage structure exists or else the change does not 5825 * involve the entire segment. Establish a vpage structure 5826 * if none is there. Then, for each page in the range, 5827 * adjust its individual permissions. Note that write- 5828 * enabling a MAP_PRIVATE page can affect the claims for 5829 * locked down memory. Overcommitting memory terminates 5830 * the operation. 5831 */ 5832 segvn_vpage(seg); 5833 svd->pageprot = 1; 5834 if ((amp = svd->amp) != NULL) { 5835 anon_idx = svd->anon_index + seg_page(seg, addr); 5836 ASSERT(seg->s_szc == 0 || 5837 IS_P2ALIGNED(anon_idx, pgcnt)); 5838 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5839 } 5840 5841 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 5842 evp = &svd->vpage[seg_page(seg, addr + len)]; 5843 5844 /* 5845 * See Statement at the beginning of segvn_lockop regarding 5846 * the way cowcnts and lckcnts are handled. 5847 */ 5848 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 5849 5850 if (seg->s_szc != 0) { 5851 if (amp != NULL) { 5852 anon_array_enter(amp, anon_idx, 5853 &cookie); 5854 } 5855 if (IS_P2ALIGNED(anon_idx, pgcnt) && 5856 !segvn_claim_pages(seg, svp, offset, 5857 anon_idx, prot)) { 5858 if (amp != NULL) { 5859 anon_array_exit(&cookie); 5860 } 5861 break; 5862 } 5863 if (amp != NULL) { 5864 anon_array_exit(&cookie); 5865 } 5866 anon_idx++; 5867 } else { 5868 if (amp != NULL) { 5869 anon_array_enter(amp, anon_idx, 5870 &cookie); 5871 ap = anon_get_ptr(amp->ahp, anon_idx++); 5872 } 5873 5874 if (VPP_ISPPLOCK(svp) && 5875 VPP_PROT(svp) != prot) { 5876 5877 if (amp == NULL || ap == NULL) { 5878 vp = svd->vp; 5879 off = offset; 5880 } else 5881 swap_xlate(ap, &vp, &off); 5882 if (amp != NULL) 5883 anon_array_exit(&cookie); 5884 5885 if ((pp = page_lookup(vp, off, 5886 SE_SHARED)) == NULL) { 5887 panic("segvn_setprot: no page"); 5888 /*NOTREACHED*/ 5889 } 5890 ASSERT(seg->s_szc == 0); 5891 if ((VPP_PROT(svp) ^ prot) & 5892 PROT_WRITE) { 5893 if (prot & PROT_WRITE) { 5894 if (!page_addclaim(pp)) { 5895 page_unlock(pp); 5896 break; 5897 } 5898 } else { 5899 if (!page_subclaim(pp)) { 5900 page_unlock(pp); 5901 break; 5902 } 5903 } 5904 } 5905 page_unlock(pp); 5906 } else if (amp != NULL) 5907 anon_array_exit(&cookie); 5908 } 5909 VPP_SETPROT(svp, prot); 5910 offset += PAGESIZE; 5911 } 5912 if (amp != NULL) 5913 ANON_LOCK_EXIT(&->a_rwlock); 5914 5915 /* 5916 * Did we terminate prematurely? If so, simply unload 5917 * the translations to the things we've updated so far. 5918 */ 5919 if (svp != evp) { 5920 if (unload_done) { 5921 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5922 return (IE_NOMEM); 5923 } 5924 len = (svp - &svd->vpage[seg_page(seg, addr)]) * 5925 PAGESIZE; 5926 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz)); 5927 if (len != 0) 5928 hat_unload(seg->s_as->a_hat, addr, 5929 len, HAT_UNLOAD); 5930 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5931 return (IE_NOMEM); 5932 } 5933 } else { 5934 segvn_vpage(seg); 5935 svd->pageprot = 1; 5936 evp = &svd->vpage[seg_page(seg, addr + len)]; 5937 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 5938 VPP_SETPROT(svp, prot); 5939 } 5940 } 5941 5942 if (unload_done) { 5943 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5944 return (0); 5945 } 5946 5947 if (((prot & PROT_WRITE) != 0 && 5948 (svd->vp != NULL || svd->type == MAP_PRIVATE)) || 5949 (prot & ~PROT_USER) == PROT_NONE) { 5950 /* 5951 * Either private or shared data with write access (in 5952 * which case we need to throw out all former translations 5953 * so that we get the right translations set up on fault 5954 * and we don't allow write access to any copy-on-write pages 5955 * that might be around or to prevent write access to pages 5956 * representing holes in a file), or we don't have permission 5957 * to access the memory at all (in which case we have to 5958 * unload any current translations that might exist). 5959 */ 5960 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 5961 } else { 5962 /* 5963 * A shared mapping or a private mapping in which write 5964 * protection is going to be denied - just change all the 5965 * protections over the range of addresses in question. 5966 * segvn does not support any other attributes other 5967 * than prot so we can use hat_chgattr. 5968 */ 5969 hat_chgattr(seg->s_as->a_hat, addr, len, prot); 5970 } 5971 5972 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5973 5974 return (0); 5975 } 5976 5977 /* 5978 * segvn_setpagesize is called via SEGOP_SETPAGESIZE from as_setpagesize, 5979 * to determine if the seg is capable of mapping the requested szc. 5980 */ 5981 static int 5982 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 5983 { 5984 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5985 struct segvn_data *nsvd; 5986 struct anon_map *amp = svd->amp; 5987 struct seg *nseg; 5988 caddr_t eaddr = addr + len, a; 5989 size_t pgsz = page_get_pagesize(szc); 5990 pgcnt_t pgcnt = page_get_pagecnt(szc); 5991 int err; 5992 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base); 5993 extern struct vnode kvp; 5994 5995 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5996 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 5997 5998 if (seg->s_szc == szc || segvn_lpg_disable != 0) { 5999 return (0); 6000 } 6001 6002 /* 6003 * addr should always be pgsz aligned but eaddr may be misaligned if 6004 * it's at the end of the segment. 6005 * 6006 * XXX we should assert this condition since as_setpagesize() logic 6007 * guarantees it. 6008 */ 6009 if (!IS_P2ALIGNED(addr, pgsz) || 6010 (!IS_P2ALIGNED(eaddr, pgsz) && 6011 eaddr != seg->s_base + seg->s_size)) { 6012 6013 segvn_setpgsz_align_err++; 6014 return (EINVAL); 6015 } 6016 6017 if (amp != NULL && svd->type == MAP_SHARED) { 6018 ulong_t an_idx = svd->anon_index + seg_page(seg, addr); 6019 if (!IS_P2ALIGNED(an_idx, pgcnt)) { 6020 6021 segvn_setpgsz_anon_align_err++; 6022 return (EINVAL); 6023 } 6024 } 6025 6026 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas || 6027 szc > segvn_maxpgszc) { 6028 return (EINVAL); 6029 } 6030 6031 /* paranoid check */ 6032 if (svd->vp != NULL && 6033 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) { 6034 return (EINVAL); 6035 } 6036 6037 if (seg->s_szc == 0 && svd->vp != NULL && 6038 map_addr_vacalign_check(addr, off)) { 6039 return (EINVAL); 6040 } 6041 6042 /* 6043 * Check that protections are the same within new page 6044 * size boundaries. 6045 */ 6046 if (svd->pageprot) { 6047 for (a = addr; a < eaddr; a += pgsz) { 6048 if ((a + pgsz) > eaddr) { 6049 if (!sameprot(seg, a, eaddr - a)) { 6050 return (EINVAL); 6051 } 6052 } else { 6053 if (!sameprot(seg, a, pgsz)) { 6054 return (EINVAL); 6055 } 6056 } 6057 } 6058 } 6059 6060 /* 6061 * Since we are changing page size we first have to flush 6062 * the cache. This makes sure all the pagelock calls have 6063 * to recheck protections. 6064 */ 6065 if (svd->softlockcnt > 0) { 6066 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6067 /* 6068 * Since we do have the segvn writers lock nobody can fill 6069 * the cache with entries belonging to this seg during 6070 * the purge. The flush either succeeds or we still have 6071 * pending I/Os. 6072 */ 6073 segvn_purge(seg); 6074 if (svd->softlockcnt > 0) { 6075 return (EAGAIN); 6076 } 6077 } 6078 6079 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6080 ASSERT(svd->amp == NULL); 6081 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6082 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6083 HAT_REGION_TEXT); 6084 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6085 } else if (svd->tr_state == SEGVN_TR_INIT) { 6086 svd->tr_state = SEGVN_TR_OFF; 6087 } else if (svd->tr_state == SEGVN_TR_ON) { 6088 ASSERT(svd->amp != NULL); 6089 segvn_textunrepl(seg, 1); 6090 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6091 amp = NULL; 6092 } 6093 6094 /* 6095 * Operation for sub range of existing segment. 6096 */ 6097 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) { 6098 if (szc < seg->s_szc) { 6099 VM_STAT_ADD(segvnvmstats.demoterange[2]); 6100 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0); 6101 if (err == 0) { 6102 return (IE_RETRY); 6103 } 6104 if (err == ENOMEM) { 6105 return (IE_NOMEM); 6106 } 6107 return (err); 6108 } 6109 if (addr != seg->s_base) { 6110 nseg = segvn_split_seg(seg, addr); 6111 if (eaddr != (nseg->s_base + nseg->s_size)) { 6112 /* eaddr is szc aligned */ 6113 (void) segvn_split_seg(nseg, eaddr); 6114 } 6115 return (IE_RETRY); 6116 } 6117 if (eaddr != (seg->s_base + seg->s_size)) { 6118 /* eaddr is szc aligned */ 6119 (void) segvn_split_seg(seg, eaddr); 6120 } 6121 return (IE_RETRY); 6122 } 6123 6124 /* 6125 * Break any low level sharing and reset seg->s_szc to 0. 6126 */ 6127 if ((err = segvn_clrszc(seg)) != 0) { 6128 if (err == ENOMEM) { 6129 err = IE_NOMEM; 6130 } 6131 return (err); 6132 } 6133 ASSERT(seg->s_szc == 0); 6134 6135 /* 6136 * If the end of the current segment is not pgsz aligned 6137 * then attempt to concatenate with the next segment. 6138 */ 6139 if (!IS_P2ALIGNED(eaddr, pgsz)) { 6140 nseg = AS_SEGNEXT(seg->s_as, seg); 6141 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) { 6142 return (ENOMEM); 6143 } 6144 if (nseg->s_ops != &segvn_ops) { 6145 return (EINVAL); 6146 } 6147 nsvd = (struct segvn_data *)nseg->s_data; 6148 if (nsvd->softlockcnt > 0) { 6149 segvn_purge(nseg); 6150 if (nsvd->softlockcnt > 0) { 6151 return (EAGAIN); 6152 } 6153 } 6154 err = segvn_clrszc(nseg); 6155 if (err == ENOMEM) { 6156 err = IE_NOMEM; 6157 } 6158 if (err != 0) { 6159 return (err); 6160 } 6161 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6162 err = segvn_concat(seg, nseg, 1); 6163 if (err == -1) { 6164 return (EINVAL); 6165 } 6166 if (err == -2) { 6167 return (IE_NOMEM); 6168 } 6169 return (IE_RETRY); 6170 } 6171 6172 /* 6173 * May need to re-align anon array to 6174 * new szc. 6175 */ 6176 if (amp != NULL) { 6177 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) { 6178 struct anon_hdr *nahp; 6179 6180 ASSERT(svd->type == MAP_PRIVATE); 6181 6182 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6183 ASSERT(amp->refcnt == 1); 6184 nahp = anon_create(btop(amp->size), ANON_NOSLEEP); 6185 if (nahp == NULL) { 6186 ANON_LOCK_EXIT(&->a_rwlock); 6187 return (IE_NOMEM); 6188 } 6189 if (anon_copy_ptr(amp->ahp, svd->anon_index, 6190 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) { 6191 anon_release(nahp, btop(amp->size)); 6192 ANON_LOCK_EXIT(&->a_rwlock); 6193 return (IE_NOMEM); 6194 } 6195 anon_release(amp->ahp, btop(amp->size)); 6196 amp->ahp = nahp; 6197 svd->anon_index = 0; 6198 ANON_LOCK_EXIT(&->a_rwlock); 6199 } 6200 } 6201 if (svd->vp != NULL && szc != 0) { 6202 struct vattr va; 6203 u_offset_t eoffpage = svd->offset; 6204 va.va_mask = AT_SIZE; 6205 eoffpage += seg->s_size; 6206 eoffpage = btopr(eoffpage); 6207 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred) != 0) { 6208 segvn_setpgsz_getattr_err++; 6209 return (EINVAL); 6210 } 6211 if (btopr(va.va_size) < eoffpage) { 6212 segvn_setpgsz_eof_err++; 6213 return (EINVAL); 6214 } 6215 if (amp != NULL) { 6216 /* 6217 * anon_fill_cow_holes() may call VOP_GETPAGE(). 6218 * don't take anon map lock here to avoid holding it 6219 * across VOP_GETPAGE() calls that may call back into 6220 * segvn for klsutering checks. We don't really need 6221 * anon map lock here since it's a private segment and 6222 * we hold as level lock as writers. 6223 */ 6224 if ((err = anon_fill_cow_holes(seg, seg->s_base, 6225 amp->ahp, svd->anon_index, svd->vp, svd->offset, 6226 seg->s_size, szc, svd->prot, svd->vpage, 6227 svd->cred)) != 0) { 6228 return (EINVAL); 6229 } 6230 } 6231 segvn_setvnode_mpss(svd->vp); 6232 } 6233 6234 if (amp != NULL) { 6235 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6236 if (svd->type == MAP_PRIVATE) { 6237 amp->a_szc = szc; 6238 } else if (szc > amp->a_szc) { 6239 amp->a_szc = szc; 6240 } 6241 ANON_LOCK_EXIT(&->a_rwlock); 6242 } 6243 6244 seg->s_szc = szc; 6245 6246 return (0); 6247 } 6248 6249 static int 6250 segvn_clrszc(struct seg *seg) 6251 { 6252 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6253 struct anon_map *amp = svd->amp; 6254 size_t pgsz; 6255 pgcnt_t pages; 6256 int err = 0; 6257 caddr_t a = seg->s_base; 6258 caddr_t ea = a + seg->s_size; 6259 ulong_t an_idx = svd->anon_index; 6260 vnode_t *vp = svd->vp; 6261 struct vpage *vpage = svd->vpage; 6262 page_t *anon_pl[1 + 1], *pp; 6263 struct anon *ap, *oldap; 6264 uint_t prot = svd->prot, vpprot; 6265 int pageflag = 0; 6266 6267 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6268 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 6269 ASSERT(svd->softlockcnt == 0); 6270 6271 if (vp == NULL && amp == NULL) { 6272 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6273 seg->s_szc = 0; 6274 return (0); 6275 } 6276 6277 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6278 ASSERT(svd->amp == NULL); 6279 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6280 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6281 HAT_REGION_TEXT); 6282 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6283 } else if (svd->tr_state == SEGVN_TR_ON) { 6284 ASSERT(svd->amp != NULL); 6285 segvn_textunrepl(seg, 1); 6286 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6287 amp = NULL; 6288 } else { 6289 if (svd->tr_state != SEGVN_TR_OFF) { 6290 ASSERT(svd->tr_state == SEGVN_TR_INIT); 6291 svd->tr_state = SEGVN_TR_OFF; 6292 } 6293 6294 /* 6295 * do HAT_UNLOAD_UNMAP since we are changing the pagesize. 6296 * unload argument is 0 when we are freeing the segment 6297 * and unload was already done. 6298 */ 6299 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size, 6300 HAT_UNLOAD_UNMAP); 6301 } 6302 6303 if (amp == NULL || svd->type == MAP_SHARED) { 6304 seg->s_szc = 0; 6305 return (0); 6306 } 6307 6308 pgsz = page_get_pagesize(seg->s_szc); 6309 pages = btop(pgsz); 6310 6311 /* 6312 * XXX anon rwlock is not really needed because this is a 6313 * private segment and we are writers. 6314 */ 6315 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6316 6317 for (; a < ea; a += pgsz, an_idx += pages) { 6318 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) { 6319 ASSERT(vpage != NULL || svd->pageprot == 0); 6320 if (vpage != NULL) { 6321 ASSERT(sameprot(seg, a, pgsz)); 6322 prot = VPP_PROT(vpage); 6323 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0; 6324 } 6325 if (seg->s_szc != 0) { 6326 ASSERT(vp == NULL || anon_pages(amp->ahp, 6327 an_idx, pages) == pages); 6328 if ((err = anon_map_demotepages(amp, an_idx, 6329 seg, a, prot, vpage, svd->cred)) != 0) { 6330 goto out; 6331 } 6332 } else { 6333 if (oldap->an_refcnt == 1) { 6334 continue; 6335 } 6336 if ((err = anon_getpage(&oldap, &vpprot, 6337 anon_pl, PAGESIZE, seg, a, S_READ, 6338 svd->cred))) { 6339 goto out; 6340 } 6341 if ((pp = anon_private(&ap, seg, a, prot, 6342 anon_pl[0], pageflag, svd->cred)) == NULL) { 6343 err = ENOMEM; 6344 goto out; 6345 } 6346 anon_decref(oldap); 6347 (void) anon_set_ptr(amp->ahp, an_idx, ap, 6348 ANON_SLEEP); 6349 page_unlock(pp); 6350 } 6351 } 6352 vpage = (vpage == NULL) ? NULL : vpage + pages; 6353 } 6354 6355 amp->a_szc = 0; 6356 seg->s_szc = 0; 6357 out: 6358 ANON_LOCK_EXIT(&->a_rwlock); 6359 return (err); 6360 } 6361 6362 static int 6363 segvn_claim_pages( 6364 struct seg *seg, 6365 struct vpage *svp, 6366 u_offset_t off, 6367 ulong_t anon_idx, 6368 uint_t prot) 6369 { 6370 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6371 size_t ppasize = (pgcnt + 1) * sizeof (page_t *); 6372 page_t **ppa; 6373 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6374 struct anon_map *amp = svd->amp; 6375 struct vpage *evp = svp + pgcnt; 6376 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT) 6377 + seg->s_base; 6378 struct anon *ap; 6379 struct vnode *vp = svd->vp; 6380 page_t *pp; 6381 pgcnt_t pg_idx, i; 6382 int err = 0; 6383 anoff_t aoff; 6384 int anon = (amp != NULL) ? 1 : 0; 6385 6386 ASSERT(svd->type == MAP_PRIVATE); 6387 ASSERT(svd->vpage != NULL); 6388 ASSERT(seg->s_szc != 0); 6389 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 6390 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt)); 6391 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT)); 6392 6393 if (VPP_PROT(svp) == prot) 6394 return (1); 6395 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE)) 6396 return (1); 6397 6398 ppa = kmem_alloc(ppasize, KM_SLEEP); 6399 if (anon && vp != NULL) { 6400 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) { 6401 anon = 0; 6402 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt)); 6403 } 6404 ASSERT(!anon || 6405 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt); 6406 } 6407 6408 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) { 6409 if (!VPP_ISPPLOCK(svp)) 6410 continue; 6411 if (anon) { 6412 ap = anon_get_ptr(amp->ahp, anon_idx); 6413 if (ap == NULL) { 6414 panic("segvn_claim_pages: no anon slot"); 6415 } 6416 swap_xlate(ap, &vp, &aoff); 6417 off = (u_offset_t)aoff; 6418 } 6419 ASSERT(vp != NULL); 6420 if ((pp = page_lookup(vp, 6421 (u_offset_t)off, SE_SHARED)) == NULL) { 6422 panic("segvn_claim_pages: no page"); 6423 } 6424 ppa[pg_idx++] = pp; 6425 off += PAGESIZE; 6426 } 6427 6428 if (ppa[0] == NULL) { 6429 kmem_free(ppa, ppasize); 6430 return (1); 6431 } 6432 6433 ASSERT(pg_idx <= pgcnt); 6434 ppa[pg_idx] = NULL; 6435 6436 if (prot & PROT_WRITE) 6437 err = page_addclaim_pages(ppa); 6438 else 6439 err = page_subclaim_pages(ppa); 6440 6441 for (i = 0; i < pg_idx; i++) { 6442 ASSERT(ppa[i] != NULL); 6443 page_unlock(ppa[i]); 6444 } 6445 6446 kmem_free(ppa, ppasize); 6447 return (err); 6448 } 6449 6450 /* 6451 * Returns right (upper address) segment if split occured. 6452 * If the address is equal to the beginning or end of its segment it returns 6453 * the current segment. 6454 */ 6455 static struct seg * 6456 segvn_split_seg(struct seg *seg, caddr_t addr) 6457 { 6458 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6459 struct seg *nseg; 6460 size_t nsize; 6461 struct segvn_data *nsvd; 6462 6463 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6464 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6465 6466 ASSERT(addr >= seg->s_base); 6467 ASSERT(addr <= seg->s_base + seg->s_size); 6468 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6469 6470 if (addr == seg->s_base || addr == seg->s_base + seg->s_size) 6471 return (seg); 6472 6473 nsize = seg->s_base + seg->s_size - addr; 6474 seg->s_size = addr - seg->s_base; 6475 nseg = seg_alloc(seg->s_as, addr, nsize); 6476 ASSERT(nseg != NULL); 6477 nseg->s_ops = seg->s_ops; 6478 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 6479 nseg->s_data = (void *)nsvd; 6480 nseg->s_szc = seg->s_szc; 6481 *nsvd = *svd; 6482 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6483 nsvd->seg = nseg; 6484 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL); 6485 6486 if (nsvd->vp != NULL) { 6487 VN_HOLD(nsvd->vp); 6488 nsvd->offset = svd->offset + 6489 (uintptr_t)(nseg->s_base - seg->s_base); 6490 if (nsvd->type == MAP_SHARED) 6491 lgrp_shm_policy_init(NULL, nsvd->vp); 6492 } else { 6493 /* 6494 * The offset for an anonymous segment has no signifigance in 6495 * terms of an offset into a file. If we were to use the above 6496 * calculation instead, the structures read out of 6497 * /proc/<pid>/xmap would be more difficult to decipher since 6498 * it would be unclear whether two seemingly contiguous 6499 * prxmap_t structures represented different segments or a 6500 * single segment that had been split up into multiple prxmap_t 6501 * structures (e.g. if some part of the segment had not yet 6502 * been faulted in). 6503 */ 6504 nsvd->offset = 0; 6505 } 6506 6507 ASSERT(svd->softlockcnt == 0); 6508 crhold(svd->cred); 6509 6510 if (svd->vpage != NULL) { 6511 size_t bytes = vpgtob(seg_pages(seg)); 6512 size_t nbytes = vpgtob(seg_pages(nseg)); 6513 struct vpage *ovpage = svd->vpage; 6514 6515 svd->vpage = kmem_alloc(bytes, KM_SLEEP); 6516 bcopy(ovpage, svd->vpage, bytes); 6517 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 6518 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes); 6519 kmem_free(ovpage, bytes + nbytes); 6520 } 6521 if (svd->amp != NULL && svd->type == MAP_PRIVATE) { 6522 struct anon_map *oamp = svd->amp, *namp; 6523 struct anon_hdr *nahp; 6524 6525 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER); 6526 ASSERT(oamp->refcnt == 1); 6527 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 6528 (void) anon_copy_ptr(oamp->ahp, svd->anon_index, 6529 nahp, 0, btop(seg->s_size), ANON_SLEEP); 6530 6531 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 6532 namp->a_szc = nseg->s_szc; 6533 (void) anon_copy_ptr(oamp->ahp, 6534 svd->anon_index + btop(seg->s_size), 6535 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 6536 anon_release(oamp->ahp, btop(oamp->size)); 6537 oamp->ahp = nahp; 6538 oamp->size = seg->s_size; 6539 svd->anon_index = 0; 6540 nsvd->amp = namp; 6541 nsvd->anon_index = 0; 6542 ANON_LOCK_EXIT(&oamp->a_rwlock); 6543 } else if (svd->amp != NULL) { 6544 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6545 ASSERT(svd->amp == nsvd->amp); 6546 ASSERT(seg->s_szc <= svd->amp->a_szc); 6547 nsvd->anon_index = svd->anon_index + seg_pages(seg); 6548 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt)); 6549 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER); 6550 svd->amp->refcnt++; 6551 ANON_LOCK_EXIT(&svd->amp->a_rwlock); 6552 } 6553 6554 /* 6555 * Split amount of swap reserve 6556 */ 6557 if (svd->swresv) { 6558 /* 6559 * For MAP_NORESERVE, only allocate swap reserve for pages 6560 * being used. Other segments get enough to cover whole 6561 * segment. 6562 */ 6563 if (svd->flags & MAP_NORESERVE) { 6564 size_t oswresv; 6565 6566 ASSERT(svd->amp); 6567 oswresv = svd->swresv; 6568 svd->swresv = ptob(anon_pages(svd->amp->ahp, 6569 svd->anon_index, btop(seg->s_size))); 6570 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 6571 nsvd->anon_index, btop(nseg->s_size))); 6572 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 6573 } else { 6574 ASSERT(svd->swresv == seg->s_size + nseg->s_size); 6575 svd->swresv = seg->s_size; 6576 nsvd->swresv = nseg->s_size; 6577 } 6578 } 6579 6580 return (nseg); 6581 } 6582 6583 /* 6584 * called on memory operations (unmap, setprot, setpagesize) for a subset 6585 * of a large page segment to either demote the memory range (SDR_RANGE) 6586 * or the ends (SDR_END) by addr/len. 6587 * 6588 * returns 0 on success. returns errno, including ENOMEM, on failure. 6589 */ 6590 static int 6591 segvn_demote_range( 6592 struct seg *seg, 6593 caddr_t addr, 6594 size_t len, 6595 int flag, 6596 uint_t szcvec) 6597 { 6598 caddr_t eaddr = addr + len; 6599 caddr_t lpgaddr, lpgeaddr; 6600 struct seg *nseg; 6601 struct seg *badseg1 = NULL; 6602 struct seg *badseg2 = NULL; 6603 size_t pgsz; 6604 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6605 int err; 6606 uint_t szc = seg->s_szc; 6607 uint_t tszcvec; 6608 6609 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6610 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6611 ASSERT(szc != 0); 6612 pgsz = page_get_pagesize(szc); 6613 ASSERT(seg->s_base != addr || seg->s_size != len); 6614 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6615 ASSERT(svd->softlockcnt == 0); 6616 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6617 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED)); 6618 6619 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 6620 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr); 6621 if (flag == SDR_RANGE) { 6622 /* demote entire range */ 6623 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6624 (void) segvn_split_seg(nseg, lpgeaddr); 6625 ASSERT(badseg1->s_base == lpgaddr); 6626 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr); 6627 } else if (addr != lpgaddr) { 6628 ASSERT(flag == SDR_END); 6629 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6630 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz && 6631 eaddr < lpgaddr + 2 * pgsz) { 6632 (void) segvn_split_seg(nseg, lpgeaddr); 6633 ASSERT(badseg1->s_base == lpgaddr); 6634 ASSERT(badseg1->s_size == 2 * pgsz); 6635 } else { 6636 nseg = segvn_split_seg(nseg, lpgaddr + pgsz); 6637 ASSERT(badseg1->s_base == lpgaddr); 6638 ASSERT(badseg1->s_size == pgsz); 6639 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) { 6640 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz); 6641 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz); 6642 badseg2 = nseg; 6643 (void) segvn_split_seg(nseg, lpgeaddr); 6644 ASSERT(badseg2->s_base == lpgeaddr - pgsz); 6645 ASSERT(badseg2->s_size == pgsz); 6646 } 6647 } 6648 } else { 6649 ASSERT(flag == SDR_END); 6650 ASSERT(eaddr < lpgeaddr); 6651 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz); 6652 (void) segvn_split_seg(nseg, lpgeaddr); 6653 ASSERT(badseg1->s_base == lpgeaddr - pgsz); 6654 ASSERT(badseg1->s_size == pgsz); 6655 } 6656 6657 ASSERT(badseg1 != NULL); 6658 ASSERT(badseg1->s_szc == szc); 6659 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz || 6660 badseg1->s_size == 2 * pgsz); 6661 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz)); 6662 ASSERT(badseg1->s_size == pgsz || 6663 sameprot(badseg1, badseg1->s_base + pgsz, pgsz)); 6664 if (err = segvn_clrszc(badseg1)) { 6665 return (err); 6666 } 6667 ASSERT(badseg1->s_szc == 0); 6668 6669 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6670 uint_t tszc = highbit(tszcvec) - 1; 6671 caddr_t ta = MAX(addr, badseg1->s_base); 6672 caddr_t te; 6673 size_t tpgsz = page_get_pagesize(tszc); 6674 6675 ASSERT(svd->type == MAP_SHARED); 6676 ASSERT(flag == SDR_END); 6677 ASSERT(tszc < szc && tszc > 0); 6678 6679 if (eaddr > badseg1->s_base + badseg1->s_size) { 6680 te = badseg1->s_base + badseg1->s_size; 6681 } else { 6682 te = eaddr; 6683 } 6684 6685 ASSERT(ta <= te); 6686 badseg1->s_szc = tszc; 6687 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) { 6688 if (badseg2 != NULL) { 6689 err = segvn_demote_range(badseg1, ta, te - ta, 6690 SDR_END, tszcvec); 6691 if (err != 0) { 6692 return (err); 6693 } 6694 } else { 6695 return (segvn_demote_range(badseg1, ta, 6696 te - ta, SDR_END, tszcvec)); 6697 } 6698 } 6699 } 6700 6701 if (badseg2 == NULL) 6702 return (0); 6703 ASSERT(badseg2->s_szc == szc); 6704 ASSERT(badseg2->s_size == pgsz); 6705 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size)); 6706 if (err = segvn_clrszc(badseg2)) { 6707 return (err); 6708 } 6709 ASSERT(badseg2->s_szc == 0); 6710 6711 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6712 uint_t tszc = highbit(tszcvec) - 1; 6713 size_t tpgsz = page_get_pagesize(tszc); 6714 6715 ASSERT(svd->type == MAP_SHARED); 6716 ASSERT(flag == SDR_END); 6717 ASSERT(tszc < szc && tszc > 0); 6718 ASSERT(badseg2->s_base > addr); 6719 ASSERT(eaddr > badseg2->s_base); 6720 ASSERT(eaddr < badseg2->s_base + badseg2->s_size); 6721 6722 badseg2->s_szc = tszc; 6723 if (!IS_P2ALIGNED(eaddr, tpgsz)) { 6724 return (segvn_demote_range(badseg2, badseg2->s_base, 6725 eaddr - badseg2->s_base, SDR_END, tszcvec)); 6726 } 6727 } 6728 6729 return (0); 6730 } 6731 6732 static int 6733 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 6734 { 6735 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6736 struct vpage *vp, *evp; 6737 6738 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6739 6740 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6741 /* 6742 * If segment protection can be used, simply check against them. 6743 */ 6744 if (svd->pageprot == 0) { 6745 int err; 6746 6747 err = ((svd->prot & prot) != prot) ? EACCES : 0; 6748 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6749 return (err); 6750 } 6751 6752 /* 6753 * Have to check down to the vpage level. 6754 */ 6755 evp = &svd->vpage[seg_page(seg, addr + len)]; 6756 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) { 6757 if ((VPP_PROT(vp) & prot) != prot) { 6758 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6759 return (EACCES); 6760 } 6761 } 6762 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6763 return (0); 6764 } 6765 6766 static int 6767 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 6768 { 6769 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6770 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1; 6771 6772 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6773 6774 if (pgno != 0) { 6775 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6776 if (svd->pageprot == 0) { 6777 do 6778 protv[--pgno] = svd->prot; 6779 while (pgno != 0); 6780 } else { 6781 size_t pgoff = seg_page(seg, addr); 6782 6783 do { 6784 pgno--; 6785 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]); 6786 } while (pgno != 0); 6787 } 6788 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6789 } 6790 return (0); 6791 } 6792 6793 static u_offset_t 6794 segvn_getoffset(struct seg *seg, caddr_t addr) 6795 { 6796 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6797 6798 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6799 6800 return (svd->offset + (uintptr_t)(addr - seg->s_base)); 6801 } 6802 6803 /*ARGSUSED*/ 6804 static int 6805 segvn_gettype(struct seg *seg, caddr_t addr) 6806 { 6807 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6808 6809 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6810 6811 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT | 6812 MAP_INITDATA))); 6813 } 6814 6815 /*ARGSUSED*/ 6816 static int 6817 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 6818 { 6819 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6820 6821 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6822 6823 *vpp = svd->vp; 6824 return (0); 6825 } 6826 6827 /* 6828 * Check to see if it makes sense to do kluster/read ahead to 6829 * addr + delta relative to the mapping at addr. We assume here 6830 * that delta is a signed PAGESIZE'd multiple (which can be negative). 6831 * 6832 * For segvn, we currently "approve" of the action if we are 6833 * still in the segment and it maps from the same vp/off, 6834 * or if the advice stored in segvn_data or vpages allows it. 6835 * Currently, klustering is not allowed only if MADV_RANDOM is set. 6836 */ 6837 static int 6838 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 6839 { 6840 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6841 struct anon *oap, *ap; 6842 ssize_t pd; 6843 size_t page; 6844 struct vnode *vp1, *vp2; 6845 u_offset_t off1, off2; 6846 struct anon_map *amp; 6847 6848 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6849 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6850 SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 6851 6852 if (addr + delta < seg->s_base || 6853 addr + delta >= (seg->s_base + seg->s_size)) 6854 return (-1); /* exceeded segment bounds */ 6855 6856 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */ 6857 page = seg_page(seg, addr); 6858 6859 /* 6860 * Check to see if either of the pages addr or addr + delta 6861 * have advice set that prevents klustering (if MADV_RANDOM advice 6862 * is set for entire segment, or MADV_SEQUENTIAL is set and delta 6863 * is negative). 6864 */ 6865 if (svd->advice == MADV_RANDOM || 6866 svd->advice == MADV_SEQUENTIAL && delta < 0) 6867 return (-1); 6868 else if (svd->pageadvice && svd->vpage) { 6869 struct vpage *bvpp, *evpp; 6870 6871 bvpp = &svd->vpage[page]; 6872 evpp = &svd->vpage[page + pd]; 6873 if (VPP_ADVICE(bvpp) == MADV_RANDOM || 6874 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0) 6875 return (-1); 6876 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) && 6877 VPP_ADVICE(evpp) == MADV_RANDOM) 6878 return (-1); 6879 } 6880 6881 if (svd->type == MAP_SHARED) 6882 return (0); /* shared mapping - all ok */ 6883 6884 if ((amp = svd->amp) == NULL) 6885 return (0); /* off original vnode */ 6886 6887 page += svd->anon_index; 6888 6889 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 6890 6891 oap = anon_get_ptr(amp->ahp, page); 6892 ap = anon_get_ptr(amp->ahp, page + pd); 6893 6894 ANON_LOCK_EXIT(&->a_rwlock); 6895 6896 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) { 6897 return (-1); /* one with and one without an anon */ 6898 } 6899 6900 if (oap == NULL) { /* implies that ap == NULL */ 6901 return (0); /* off original vnode */ 6902 } 6903 6904 /* 6905 * Now we know we have two anon pointers - check to 6906 * see if they happen to be properly allocated. 6907 */ 6908 6909 /* 6910 * XXX We cheat here and don't lock the anon slots. We can't because 6911 * we may have been called from the anon layer which might already 6912 * have locked them. We are holding a refcnt on the slots so they 6913 * can't disappear. The worst that will happen is we'll get the wrong 6914 * names (vp, off) for the slots and make a poor klustering decision. 6915 */ 6916 swap_xlate(ap, &vp1, &off1); 6917 swap_xlate(oap, &vp2, &off2); 6918 6919 6920 if (!VOP_CMP(vp1, vp2) || off1 - off2 != delta) 6921 return (-1); 6922 return (0); 6923 } 6924 6925 /* 6926 * Swap the pages of seg out to secondary storage, returning the 6927 * number of bytes of storage freed. 6928 * 6929 * The basic idea is first to unload all translations and then to call 6930 * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the 6931 * swap device. Pages to which other segments have mappings will remain 6932 * mapped and won't be swapped. Our caller (as_swapout) has already 6933 * performed the unloading step. 6934 * 6935 * The value returned is intended to correlate well with the process's 6936 * memory requirements. However, there are some caveats: 6937 * 1) When given a shared segment as argument, this routine will 6938 * only succeed in swapping out pages for the last sharer of the 6939 * segment. (Previous callers will only have decremented mapping 6940 * reference counts.) 6941 * 2) We assume that the hat layer maintains a large enough translation 6942 * cache to capture process reference patterns. 6943 */ 6944 static size_t 6945 segvn_swapout(struct seg *seg) 6946 { 6947 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6948 struct anon_map *amp; 6949 pgcnt_t pgcnt = 0; 6950 pgcnt_t npages; 6951 pgcnt_t page; 6952 ulong_t anon_index; 6953 6954 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6955 6956 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6957 /* 6958 * Find pages unmapped by our caller and force them 6959 * out to the virtual swap device. 6960 */ 6961 if ((amp = svd->amp) != NULL) 6962 anon_index = svd->anon_index; 6963 npages = seg->s_size >> PAGESHIFT; 6964 for (page = 0; page < npages; page++) { 6965 page_t *pp; 6966 struct anon *ap; 6967 struct vnode *vp; 6968 u_offset_t off; 6969 anon_sync_obj_t cookie; 6970 6971 /* 6972 * Obtain <vp, off> pair for the page, then look it up. 6973 * 6974 * Note that this code is willing to consider regular 6975 * pages as well as anon pages. Is this appropriate here? 6976 */ 6977 ap = NULL; 6978 if (amp != NULL) { 6979 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 6980 if (anon_array_try_enter(amp, anon_index + page, 6981 &cookie)) { 6982 ANON_LOCK_EXIT(&->a_rwlock); 6983 continue; 6984 } 6985 ap = anon_get_ptr(amp->ahp, anon_index + page); 6986 if (ap != NULL) { 6987 swap_xlate(ap, &vp, &off); 6988 } else { 6989 vp = svd->vp; 6990 off = svd->offset + ptob(page); 6991 } 6992 anon_array_exit(&cookie); 6993 ANON_LOCK_EXIT(&->a_rwlock); 6994 } else { 6995 vp = svd->vp; 6996 off = svd->offset + ptob(page); 6997 } 6998 if (vp == NULL) { /* untouched zfod page */ 6999 ASSERT(ap == NULL); 7000 continue; 7001 } 7002 7003 pp = page_lookup_nowait(vp, off, SE_SHARED); 7004 if (pp == NULL) 7005 continue; 7006 7007 7008 /* 7009 * Examine the page to see whether it can be tossed out, 7010 * keeping track of how many we've found. 7011 */ 7012 if (!page_tryupgrade(pp)) { 7013 /* 7014 * If the page has an i/o lock and no mappings, 7015 * it's very likely that the page is being 7016 * written out as a result of klustering. 7017 * Assume this is so and take credit for it here. 7018 */ 7019 if (!page_io_trylock(pp)) { 7020 if (!hat_page_is_mapped(pp)) 7021 pgcnt++; 7022 } else { 7023 page_io_unlock(pp); 7024 } 7025 page_unlock(pp); 7026 continue; 7027 } 7028 ASSERT(!page_iolock_assert(pp)); 7029 7030 7031 /* 7032 * Skip if page is locked or has mappings. 7033 * We don't need the page_struct_lock to look at lckcnt 7034 * and cowcnt because the page is exclusive locked. 7035 */ 7036 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 7037 hat_page_is_mapped(pp)) { 7038 page_unlock(pp); 7039 continue; 7040 } 7041 7042 /* 7043 * dispose skips large pages so try to demote first. 7044 */ 7045 if (pp->p_szc != 0 && !page_try_demote_pages(pp)) { 7046 page_unlock(pp); 7047 /* 7048 * XXX should skip the remaining page_t's of this 7049 * large page. 7050 */ 7051 continue; 7052 } 7053 7054 ASSERT(pp->p_szc == 0); 7055 7056 /* 7057 * No longer mapped -- we can toss it out. How 7058 * we do so depends on whether or not it's dirty. 7059 */ 7060 if (hat_ismod(pp) && pp->p_vnode) { 7061 /* 7062 * We must clean the page before it can be 7063 * freed. Setting B_FREE will cause pvn_done 7064 * to free the page when the i/o completes. 7065 * XXX: This also causes it to be accounted 7066 * as a pageout instead of a swap: need 7067 * B_SWAPOUT bit to use instead of B_FREE. 7068 * 7069 * Hold the vnode before releasing the page lock 7070 * to prevent it from being freed and re-used by 7071 * some other thread. 7072 */ 7073 VN_HOLD(vp); 7074 page_unlock(pp); 7075 7076 /* 7077 * Queue all i/o requests for the pageout thread 7078 * to avoid saturating the pageout devices. 7079 */ 7080 if (!queue_io_request(vp, off)) 7081 VN_RELE(vp); 7082 } else { 7083 /* 7084 * The page was clean, free it. 7085 * 7086 * XXX: Can we ever encounter modified pages 7087 * with no associated vnode here? 7088 */ 7089 ASSERT(pp->p_vnode != NULL); 7090 /*LINTED: constant in conditional context*/ 7091 VN_DISPOSE(pp, B_FREE, 0, kcred); 7092 } 7093 7094 /* 7095 * Credit now even if i/o is in progress. 7096 */ 7097 pgcnt++; 7098 } 7099 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7100 7101 /* 7102 * Wakeup pageout to initiate i/o on all queued requests. 7103 */ 7104 cv_signal_pageout(); 7105 return (ptob(pgcnt)); 7106 } 7107 7108 /* 7109 * Synchronize primary storage cache with real object in virtual memory. 7110 * 7111 * XXX - Anonymous pages should not be sync'ed out at all. 7112 */ 7113 static int 7114 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) 7115 { 7116 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7117 struct vpage *vpp; 7118 page_t *pp; 7119 u_offset_t offset; 7120 struct vnode *vp; 7121 u_offset_t off; 7122 caddr_t eaddr; 7123 int bflags; 7124 int err = 0; 7125 int segtype; 7126 int pageprot; 7127 int prot; 7128 ulong_t anon_index; 7129 struct anon_map *amp; 7130 struct anon *ap; 7131 anon_sync_obj_t cookie; 7132 7133 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7134 7135 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7136 7137 if (svd->softlockcnt > 0) { 7138 /* 7139 * flush all pages from seg cache 7140 * otherwise we may deadlock in swap_putpage 7141 * for B_INVAL page (4175402). 7142 * 7143 * Even if we grab segvn WRITER's lock or segp_slock 7144 * here, there might be another thread which could've 7145 * successfully performed lookup/insert just before 7146 * we acquired the lock here. So, grabbing either 7147 * lock here is of not much use. Until we devise 7148 * a strategy at upper layers to solve the 7149 * synchronization issues completely, we expect 7150 * applications to handle this appropriately. 7151 */ 7152 segvn_purge(seg); 7153 if (svd->softlockcnt > 0) { 7154 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7155 return (EAGAIN); 7156 } 7157 } 7158 7159 vpp = svd->vpage; 7160 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7161 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) | 7162 ((flags & MS_INVALIDATE) ? B_INVAL : 0); 7163 7164 if (attr) { 7165 pageprot = attr & ~(SHARED|PRIVATE); 7166 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE; 7167 7168 /* 7169 * We are done if the segment types don't match 7170 * or if we have segment level protections and 7171 * they don't match. 7172 */ 7173 if (svd->type != segtype) { 7174 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7175 return (0); 7176 } 7177 if (vpp == NULL) { 7178 if (svd->prot != pageprot) { 7179 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7180 return (0); 7181 } 7182 prot = svd->prot; 7183 } else 7184 vpp = &svd->vpage[seg_page(seg, addr)]; 7185 7186 } else if (svd->vp && svd->amp == NULL && 7187 (flags & MS_INVALIDATE) == 0) { 7188 7189 /* 7190 * No attributes, no anonymous pages and MS_INVALIDATE flag 7191 * is not on, just use one big request. 7192 */ 7193 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len, 7194 bflags, svd->cred); 7195 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7196 return (err); 7197 } 7198 7199 if ((amp = svd->amp) != NULL) 7200 anon_index = svd->anon_index + seg_page(seg, addr); 7201 7202 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) { 7203 ap = NULL; 7204 if (amp != NULL) { 7205 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7206 anon_array_enter(amp, anon_index, &cookie); 7207 ap = anon_get_ptr(amp->ahp, anon_index++); 7208 if (ap != NULL) { 7209 swap_xlate(ap, &vp, &off); 7210 } else { 7211 vp = svd->vp; 7212 off = offset; 7213 } 7214 anon_array_exit(&cookie); 7215 ANON_LOCK_EXIT(&->a_rwlock); 7216 } else { 7217 vp = svd->vp; 7218 off = offset; 7219 } 7220 offset += PAGESIZE; 7221 7222 if (vp == NULL) /* untouched zfod page */ 7223 continue; 7224 7225 if (attr) { 7226 if (vpp) { 7227 prot = VPP_PROT(vpp); 7228 vpp++; 7229 } 7230 if (prot != pageprot) { 7231 continue; 7232 } 7233 } 7234 7235 /* 7236 * See if any of these pages are locked -- if so, then we 7237 * will have to truncate an invalidate request at the first 7238 * locked one. We don't need the page_struct_lock to test 7239 * as this is only advisory; even if we acquire it someone 7240 * might race in and lock the page after we unlock and before 7241 * we do the PUTPAGE, then PUTPAGE simply does nothing. 7242 */ 7243 if (flags & MS_INVALIDATE) { 7244 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) { 7245 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 7246 page_unlock(pp); 7247 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7248 return (EBUSY); 7249 } 7250 if (ap != NULL && pp->p_szc != 0 && 7251 page_tryupgrade(pp)) { 7252 if (pp->p_lckcnt == 0 && 7253 pp->p_cowcnt == 0) { 7254 /* 7255 * swapfs VN_DISPOSE() won't 7256 * invalidate large pages. 7257 * Attempt to demote. 7258 * XXX can't help it if it 7259 * fails. But for swapfs 7260 * pages it is no big deal. 7261 */ 7262 (void) page_try_demote_pages( 7263 pp); 7264 } 7265 } 7266 page_unlock(pp); 7267 } 7268 } else if (svd->type == MAP_SHARED && amp != NULL) { 7269 /* 7270 * Avoid writting out to disk ISM's large pages 7271 * because segspt_free_pages() relies on NULL an_pvp 7272 * of anon slots of such pages. 7273 */ 7274 7275 ASSERT(svd->vp == NULL); 7276 /* 7277 * swapfs uses page_lookup_nowait if not freeing or 7278 * invalidating and skips a page if 7279 * page_lookup_nowait returns NULL. 7280 */ 7281 pp = page_lookup_nowait(vp, off, SE_SHARED); 7282 if (pp == NULL) { 7283 continue; 7284 } 7285 if (pp->p_szc != 0) { 7286 page_unlock(pp); 7287 continue; 7288 } 7289 7290 /* 7291 * Note ISM pages are created large so (vp, off)'s 7292 * page cannot suddenly become large after we unlock 7293 * pp. 7294 */ 7295 page_unlock(pp); 7296 } 7297 /* 7298 * XXX - Should ultimately try to kluster 7299 * calls to VOP_PUTPAGE() for performance. 7300 */ 7301 VN_HOLD(vp); 7302 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 7303 bflags, svd->cred); 7304 VN_RELE(vp); 7305 if (err) 7306 break; 7307 } 7308 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7309 return (err); 7310 } 7311 7312 /* 7313 * Determine if we have data corresponding to pages in the 7314 * primary storage virtual memory cache (i.e., "in core"). 7315 */ 7316 static size_t 7317 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec) 7318 { 7319 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7320 struct vnode *vp, *avp; 7321 u_offset_t offset, aoffset; 7322 size_t p, ep; 7323 int ret; 7324 struct vpage *vpp; 7325 page_t *pp; 7326 uint_t start; 7327 struct anon_map *amp; /* XXX - for locknest */ 7328 struct anon *ap; 7329 uint_t attr; 7330 anon_sync_obj_t cookie; 7331 7332 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7333 7334 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7335 if (svd->amp == NULL && svd->vp == NULL) { 7336 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7337 bzero(vec, btopr(len)); 7338 return (len); /* no anonymous pages created yet */ 7339 } 7340 7341 p = seg_page(seg, addr); 7342 ep = seg_page(seg, addr + len); 7343 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0; 7344 7345 amp = svd->amp; 7346 for (; p < ep; p++, addr += PAGESIZE) { 7347 vpp = (svd->vpage) ? &svd->vpage[p]: NULL; 7348 ret = start; 7349 ap = NULL; 7350 avp = NULL; 7351 /* Grab the vnode/offset for the anon slot */ 7352 if (amp != NULL) { 7353 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7354 anon_array_enter(amp, svd->anon_index + p, &cookie); 7355 ap = anon_get_ptr(amp->ahp, svd->anon_index + p); 7356 if (ap != NULL) { 7357 swap_xlate(ap, &avp, &aoffset); 7358 } 7359 anon_array_exit(&cookie); 7360 ANON_LOCK_EXIT(&->a_rwlock); 7361 } 7362 if ((avp != NULL) && page_exists(avp, aoffset)) { 7363 /* A page exists for the anon slot */ 7364 ret |= SEG_PAGE_INCORE; 7365 7366 /* 7367 * If page is mapped and writable 7368 */ 7369 attr = (uint_t)0; 7370 if ((hat_getattr(seg->s_as->a_hat, addr, 7371 &attr) != -1) && (attr & PROT_WRITE)) { 7372 ret |= SEG_PAGE_ANON; 7373 } 7374 /* 7375 * Don't get page_struct lock for lckcnt and cowcnt, 7376 * since this is purely advisory. 7377 */ 7378 if ((pp = page_lookup_nowait(avp, aoffset, 7379 SE_SHARED)) != NULL) { 7380 if (pp->p_lckcnt) 7381 ret |= SEG_PAGE_SOFTLOCK; 7382 if (pp->p_cowcnt) 7383 ret |= SEG_PAGE_HASCOW; 7384 page_unlock(pp); 7385 } 7386 } 7387 7388 /* Gather vnode statistics */ 7389 vp = svd->vp; 7390 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7391 7392 if (vp != NULL) { 7393 /* 7394 * Try to obtain a "shared" lock on the page 7395 * without blocking. If this fails, determine 7396 * if the page is in memory. 7397 */ 7398 pp = page_lookup_nowait(vp, offset, SE_SHARED); 7399 if ((pp == NULL) && (page_exists(vp, offset))) { 7400 /* Page is incore, and is named */ 7401 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7402 } 7403 /* 7404 * Don't get page_struct lock for lckcnt and cowcnt, 7405 * since this is purely advisory. 7406 */ 7407 if (pp != NULL) { 7408 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7409 if (pp->p_lckcnt) 7410 ret |= SEG_PAGE_SOFTLOCK; 7411 if (pp->p_cowcnt) 7412 ret |= SEG_PAGE_HASCOW; 7413 page_unlock(pp); 7414 } 7415 } 7416 7417 /* Gather virtual page information */ 7418 if (vpp) { 7419 if (VPP_ISPPLOCK(vpp)) 7420 ret |= SEG_PAGE_LOCKED; 7421 vpp++; 7422 } 7423 7424 *vec++ = (char)ret; 7425 } 7426 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7427 return (len); 7428 } 7429 7430 /* 7431 * Statement for p_cowcnts/p_lckcnts. 7432 * 7433 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region 7434 * irrespective of the following factors or anything else: 7435 * 7436 * (1) anon slots are populated or not 7437 * (2) cow is broken or not 7438 * (3) refcnt on ap is 1 or greater than 1 7439 * 7440 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock 7441 * and munlock. 7442 * 7443 * 7444 * Handling p_cowcnts/p_lckcnts during copy-on-write fault: 7445 * 7446 * if vpage has PROT_WRITE 7447 * transfer cowcnt on the oldpage -> cowcnt on the newpage 7448 * else 7449 * transfer lckcnt on the oldpage -> lckcnt on the newpage 7450 * 7451 * During copy-on-write, decrement p_cowcnt on the oldpage and increment 7452 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE. 7453 * 7454 * We may also break COW if softlocking on read access in the physio case. 7455 * In this case, vpage may not have PROT_WRITE. So, we need to decrement 7456 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the 7457 * vpage doesn't have PROT_WRITE. 7458 * 7459 * 7460 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region: 7461 * 7462 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and 7463 * increment p_lckcnt by calling page_subclaim() which takes care of 7464 * availrmem accounting and p_lckcnt overflow. 7465 * 7466 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and 7467 * increment p_cowcnt by calling page_addclaim() which takes care of 7468 * availrmem availability and p_cowcnt overflow. 7469 */ 7470 7471 /* 7472 * Lock down (or unlock) pages mapped by this segment. 7473 * 7474 * XXX only creates PAGESIZE pages if anon slots are not initialized. 7475 * At fault time they will be relocated into larger pages. 7476 */ 7477 static int 7478 segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 7479 int attr, int op, ulong_t *lockmap, size_t pos) 7480 { 7481 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7482 struct vpage *vpp; 7483 struct vpage *evp; 7484 page_t *pp; 7485 u_offset_t offset; 7486 u_offset_t off; 7487 int segtype; 7488 int pageprot; 7489 int claim; 7490 struct vnode *vp; 7491 ulong_t anon_index; 7492 struct anon_map *amp; 7493 struct anon *ap; 7494 struct vattr va; 7495 anon_sync_obj_t cookie; 7496 struct kshmid *sp = NULL; 7497 struct proc *p = curproc; 7498 kproject_t *proj = NULL; 7499 int chargeproc = 1; 7500 size_t locked_bytes = 0; 7501 size_t unlocked_bytes = 0; 7502 int err = 0; 7503 7504 /* 7505 * Hold write lock on address space because may split or concatenate 7506 * segments 7507 */ 7508 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7509 7510 /* 7511 * If this is a shm, use shm's project and zone, else use 7512 * project and zone of calling process 7513 */ 7514 7515 /* Determine if this segment backs a sysV shm */ 7516 if (svd->amp != NULL && svd->amp->a_sp != NULL) { 7517 ASSERT(svd->type == MAP_SHARED); 7518 ASSERT(svd->tr_state == SEGVN_TR_OFF); 7519 sp = svd->amp->a_sp; 7520 proj = sp->shm_perm.ipc_proj; 7521 chargeproc = 0; 7522 } 7523 7524 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 7525 if (attr) { 7526 pageprot = attr & ~(SHARED|PRIVATE); 7527 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE; 7528 7529 /* 7530 * We are done if the segment types don't match 7531 * or if we have segment level protections and 7532 * they don't match. 7533 */ 7534 if (svd->type != segtype) { 7535 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7536 return (0); 7537 } 7538 if (svd->pageprot == 0 && svd->prot != pageprot) { 7539 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7540 return (0); 7541 } 7542 } 7543 7544 if (op == MC_LOCK) { 7545 if (svd->tr_state == SEGVN_TR_INIT) { 7546 svd->tr_state = SEGVN_TR_OFF; 7547 } else if (svd->tr_state == SEGVN_TR_ON) { 7548 ASSERT(svd->amp != NULL); 7549 segvn_textunrepl(seg, 0); 7550 ASSERT(svd->amp == NULL && 7551 svd->tr_state == SEGVN_TR_OFF); 7552 } 7553 } 7554 7555 /* 7556 * If we're locking, then we must create a vpage structure if 7557 * none exists. If we're unlocking, then check to see if there 7558 * is a vpage -- if not, then we could not have locked anything. 7559 */ 7560 7561 if ((vpp = svd->vpage) == NULL) { 7562 if (op == MC_LOCK) 7563 segvn_vpage(seg); 7564 else { 7565 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7566 return (0); 7567 } 7568 } 7569 7570 /* 7571 * The anonymous data vector (i.e., previously 7572 * unreferenced mapping to swap space) can be allocated 7573 * by lazily testing for its existence. 7574 */ 7575 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) { 7576 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 7577 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 7578 svd->amp->a_szc = seg->s_szc; 7579 } 7580 7581 if ((amp = svd->amp) != NULL) { 7582 anon_index = svd->anon_index + seg_page(seg, addr); 7583 } 7584 7585 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7586 evp = &svd->vpage[seg_page(seg, addr + len)]; 7587 7588 if (sp != NULL) 7589 mutex_enter(&sp->shm_mlock); 7590 7591 /* determine number of unlocked bytes in range for lock operation */ 7592 if (op == MC_LOCK) { 7593 7594 if (sp == NULL) { 7595 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7596 vpp++) { 7597 if (!VPP_ISPPLOCK(vpp)) 7598 unlocked_bytes += PAGESIZE; 7599 } 7600 } else { 7601 ulong_t i_idx, i_edx; 7602 anon_sync_obj_t i_cookie; 7603 struct anon *i_ap; 7604 struct vnode *i_vp; 7605 u_offset_t i_off; 7606 7607 /* Only count sysV pages once for locked memory */ 7608 i_edx = svd->anon_index + seg_page(seg, addr + len); 7609 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7610 for (i_idx = anon_index; i_idx < i_edx; i_idx++) { 7611 anon_array_enter(amp, i_idx, &i_cookie); 7612 i_ap = anon_get_ptr(amp->ahp, i_idx); 7613 if (i_ap == NULL) { 7614 unlocked_bytes += PAGESIZE; 7615 anon_array_exit(&i_cookie); 7616 continue; 7617 } 7618 swap_xlate(i_ap, &i_vp, &i_off); 7619 anon_array_exit(&i_cookie); 7620 pp = page_lookup(i_vp, i_off, SE_SHARED); 7621 if (pp == NULL) { 7622 unlocked_bytes += PAGESIZE; 7623 continue; 7624 } else if (pp->p_lckcnt == 0) 7625 unlocked_bytes += PAGESIZE; 7626 page_unlock(pp); 7627 } 7628 ANON_LOCK_EXIT(&->a_rwlock); 7629 } 7630 7631 mutex_enter(&p->p_lock); 7632 err = rctl_incr_locked_mem(p, proj, unlocked_bytes, 7633 chargeproc); 7634 mutex_exit(&p->p_lock); 7635 7636 if (err) { 7637 if (sp != NULL) 7638 mutex_exit(&sp->shm_mlock); 7639 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7640 return (err); 7641 } 7642 } 7643 /* 7644 * Loop over all pages in the range. Process if we're locking and 7645 * page has not already been locked in this mapping; or if we're 7646 * unlocking and the page has been locked. 7647 */ 7648 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7649 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) { 7650 if ((attr == 0 || VPP_PROT(vpp) == pageprot) && 7651 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) || 7652 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) { 7653 7654 if (amp != NULL) 7655 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7656 /* 7657 * If this isn't a MAP_NORESERVE segment and 7658 * we're locking, allocate anon slots if they 7659 * don't exist. The page is brought in later on. 7660 */ 7661 if (op == MC_LOCK && svd->vp == NULL && 7662 ((svd->flags & MAP_NORESERVE) == 0) && 7663 amp != NULL && 7664 ((ap = anon_get_ptr(amp->ahp, anon_index)) 7665 == NULL)) { 7666 anon_array_enter(amp, anon_index, &cookie); 7667 7668 if ((ap = anon_get_ptr(amp->ahp, 7669 anon_index)) == NULL) { 7670 pp = anon_zero(seg, addr, &ap, 7671 svd->cred); 7672 if (pp == NULL) { 7673 anon_array_exit(&cookie); 7674 ANON_LOCK_EXIT(&->a_rwlock); 7675 err = ENOMEM; 7676 goto out; 7677 } 7678 ASSERT(anon_get_ptr(amp->ahp, 7679 anon_index) == NULL); 7680 (void) anon_set_ptr(amp->ahp, 7681 anon_index, ap, ANON_SLEEP); 7682 page_unlock(pp); 7683 } 7684 anon_array_exit(&cookie); 7685 } 7686 7687 /* 7688 * Get name for page, accounting for 7689 * existence of private copy. 7690 */ 7691 ap = NULL; 7692 if (amp != NULL) { 7693 anon_array_enter(amp, anon_index, &cookie); 7694 ap = anon_get_ptr(amp->ahp, anon_index); 7695 if (ap != NULL) { 7696 swap_xlate(ap, &vp, &off); 7697 } else { 7698 if (svd->vp == NULL && 7699 (svd->flags & MAP_NORESERVE)) { 7700 anon_array_exit(&cookie); 7701 ANON_LOCK_EXIT(&->a_rwlock); 7702 continue; 7703 } 7704 vp = svd->vp; 7705 off = offset; 7706 } 7707 anon_array_exit(&cookie); 7708 ANON_LOCK_EXIT(&->a_rwlock); 7709 } else { 7710 vp = svd->vp; 7711 off = offset; 7712 } 7713 7714 /* 7715 * Get page frame. It's ok if the page is 7716 * not available when we're unlocking, as this 7717 * may simply mean that a page we locked got 7718 * truncated out of existence after we locked it. 7719 * 7720 * Invoke VOP_GETPAGE() to obtain the page struct 7721 * since we may need to read it from disk if its 7722 * been paged out. 7723 */ 7724 if (op != MC_LOCK) 7725 pp = page_lookup(vp, off, SE_SHARED); 7726 else { 7727 page_t *pl[1 + 1]; 7728 int error; 7729 7730 ASSERT(vp != NULL); 7731 7732 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 7733 (uint_t *)NULL, pl, PAGESIZE, seg, addr, 7734 S_OTHER, svd->cred); 7735 7736 /* 7737 * If the error is EDEADLK then we must bounce 7738 * up and drop all vm subsystem locks and then 7739 * retry the operation later 7740 * This behavior is a temporary measure because 7741 * ufs/sds logging is badly designed and will 7742 * deadlock if we don't allow this bounce to 7743 * happen. The real solution is to re-design 7744 * the logging code to work properly. See bug 7745 * 4125102 for details of the problem. 7746 */ 7747 if (error == EDEADLK) { 7748 err = error; 7749 goto out; 7750 } 7751 /* 7752 * Quit if we fail to fault in the page. Treat 7753 * the failure as an error, unless the addr 7754 * is mapped beyond the end of a file. 7755 */ 7756 if (error && svd->vp) { 7757 va.va_mask = AT_SIZE; 7758 if (VOP_GETATTR(svd->vp, &va, 0, 7759 svd->cred) != 0) { 7760 err = EIO; 7761 goto out; 7762 } 7763 if (btopr(va.va_size) >= 7764 btopr(off + 1)) { 7765 err = EIO; 7766 goto out; 7767 } 7768 goto out; 7769 7770 } else if (error) { 7771 err = EIO; 7772 goto out; 7773 } 7774 pp = pl[0]; 7775 ASSERT(pp != NULL); 7776 } 7777 7778 /* 7779 * See Statement at the beginning of this routine. 7780 * 7781 * claim is always set if MAP_PRIVATE and PROT_WRITE 7782 * irrespective of following factors: 7783 * 7784 * (1) anon slots are populated or not 7785 * (2) cow is broken or not 7786 * (3) refcnt on ap is 1 or greater than 1 7787 * 7788 * See 4140683 for details 7789 */ 7790 claim = ((VPP_PROT(vpp) & PROT_WRITE) && 7791 (svd->type == MAP_PRIVATE)); 7792 7793 /* 7794 * Perform page-level operation appropriate to 7795 * operation. If locking, undo the SOFTLOCK 7796 * performed to bring the page into memory 7797 * after setting the lock. If unlocking, 7798 * and no page was found, account for the claim 7799 * separately. 7800 */ 7801 if (op == MC_LOCK) { 7802 int ret = 1; /* Assume success */ 7803 7804 ASSERT(!VPP_ISPPLOCK(vpp)); 7805 7806 ret = page_pp_lock(pp, claim, 0); 7807 if (ret == 0) { 7808 /* locking page failed */ 7809 page_unlock(pp); 7810 err = EAGAIN; 7811 goto out; 7812 } 7813 VPP_SETPPLOCK(vpp); 7814 if (sp != NULL) { 7815 if (pp->p_lckcnt == 1) 7816 locked_bytes += PAGESIZE; 7817 } else 7818 locked_bytes += PAGESIZE; 7819 7820 if (lockmap != (ulong_t *)NULL) 7821 BT_SET(lockmap, pos); 7822 7823 page_unlock(pp); 7824 } else { 7825 ASSERT(VPP_ISPPLOCK(vpp)); 7826 if (pp != NULL) { 7827 /* sysV pages should be locked */ 7828 ASSERT(sp == NULL || pp->p_lckcnt > 0); 7829 page_pp_unlock(pp, claim, 0); 7830 if (sp != NULL) { 7831 if (pp->p_lckcnt == 0) 7832 unlocked_bytes 7833 += PAGESIZE; 7834 } else 7835 unlocked_bytes += PAGESIZE; 7836 page_unlock(pp); 7837 } else { 7838 ASSERT(sp == NULL); 7839 unlocked_bytes += PAGESIZE; 7840 } 7841 VPP_CLRPPLOCK(vpp); 7842 } 7843 } 7844 } 7845 out: 7846 if (op == MC_LOCK) { 7847 /* Credit back bytes that did not get locked */ 7848 if ((unlocked_bytes - locked_bytes) > 0) { 7849 if (proj == NULL) 7850 mutex_enter(&p->p_lock); 7851 rctl_decr_locked_mem(p, proj, 7852 (unlocked_bytes - locked_bytes), chargeproc); 7853 if (proj == NULL) 7854 mutex_exit(&p->p_lock); 7855 } 7856 7857 } else { 7858 /* Account bytes that were unlocked */ 7859 if (unlocked_bytes > 0) { 7860 if (proj == NULL) 7861 mutex_enter(&p->p_lock); 7862 rctl_decr_locked_mem(p, proj, unlocked_bytes, 7863 chargeproc); 7864 if (proj == NULL) 7865 mutex_exit(&p->p_lock); 7866 } 7867 } 7868 if (sp != NULL) 7869 mutex_exit(&sp->shm_mlock); 7870 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7871 7872 return (err); 7873 } 7874 7875 /* 7876 * Set advice from user for specified pages 7877 * There are 5 types of advice: 7878 * MADV_NORMAL - Normal (default) behavior (whatever that is) 7879 * MADV_RANDOM - Random page references 7880 * do not allow readahead or 'klustering' 7881 * MADV_SEQUENTIAL - Sequential page references 7882 * Pages previous to the one currently being 7883 * accessed (determined by fault) are 'not needed' 7884 * and are freed immediately 7885 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl) 7886 * MADV_DONTNEED - Pages are not needed (synced out in mctl) 7887 * MADV_FREE - Contents can be discarded 7888 * MADV_ACCESS_DEFAULT- Default access 7889 * MADV_ACCESS_LWP - Next LWP will access heavily 7890 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily 7891 */ 7892 static int 7893 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 7894 { 7895 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7896 size_t page; 7897 int err = 0; 7898 int already_set; 7899 struct anon_map *amp; 7900 ulong_t anon_index; 7901 struct seg *next; 7902 lgrp_mem_policy_t policy; 7903 struct seg *prev; 7904 struct vnode *vp; 7905 7906 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7907 7908 /* 7909 * In case of MADV_FREE, we won't be modifying any segment private 7910 * data structures; so, we only need to grab READER's lock 7911 */ 7912 if (behav != MADV_FREE) { 7913 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 7914 if (svd->tr_state != SEGVN_TR_OFF) { 7915 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7916 return (0); 7917 } 7918 } else { 7919 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7920 } 7921 7922 /* 7923 * Large pages are assumed to be only turned on when accesses to the 7924 * segment's address range have spatial and temporal locality. That 7925 * justifies ignoring MADV_SEQUENTIAL for large page segments. 7926 * Also, ignore advice affecting lgroup memory allocation 7927 * if don't need to do lgroup optimizations on this system 7928 */ 7929 7930 if ((behav == MADV_SEQUENTIAL && 7931 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) || 7932 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT || 7933 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) { 7934 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7935 return (0); 7936 } 7937 7938 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT || 7939 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) { 7940 /* 7941 * Since we are going to unload hat mappings 7942 * we first have to flush the cache. Otherwise 7943 * this might lead to system panic if another 7944 * thread is doing physio on the range whose 7945 * mappings are unloaded by madvise(3C). 7946 */ 7947 if (svd->softlockcnt > 0) { 7948 /* 7949 * Since we do have the segvn writers lock 7950 * nobody can fill the cache with entries 7951 * belonging to this seg during the purge. 7952 * The flush either succeeds or we still 7953 * have pending I/Os. In the later case, 7954 * madvise(3C) fails. 7955 */ 7956 segvn_purge(seg); 7957 if (svd->softlockcnt > 0) { 7958 /* 7959 * Since madvise(3C) is advisory and 7960 * it's not part of UNIX98, madvise(3C) 7961 * failure here doesn't cause any hardship. 7962 * Note that we don't block in "as" layer. 7963 */ 7964 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7965 return (EAGAIN); 7966 } 7967 } 7968 } 7969 7970 amp = svd->amp; 7971 vp = svd->vp; 7972 if (behav == MADV_FREE) { 7973 /* 7974 * MADV_FREE is not supported for segments with 7975 * underlying object; if anonmap is NULL, anon slots 7976 * are not yet populated and there is nothing for 7977 * us to do. As MADV_FREE is advisory, we don't 7978 * return error in either case. 7979 */ 7980 if (vp != NULL || amp == NULL) { 7981 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7982 return (0); 7983 } 7984 7985 page = seg_page(seg, addr); 7986 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7987 anon_disclaim(amp, svd->anon_index + page, len, 0); 7988 ANON_LOCK_EXIT(&->a_rwlock); 7989 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7990 return (0); 7991 } 7992 7993 /* 7994 * If advice is to be applied to entire segment, 7995 * use advice field in seg_data structure 7996 * otherwise use appropriate vpage entry. 7997 */ 7998 if ((addr == seg->s_base) && (len == seg->s_size)) { 7999 switch (behav) { 8000 case MADV_ACCESS_LWP: 8001 case MADV_ACCESS_MANY: 8002 case MADV_ACCESS_DEFAULT: 8003 /* 8004 * Set memory allocation policy for this segment 8005 */ 8006 policy = lgrp_madv_to_policy(behav, len, svd->type); 8007 if (svd->type == MAP_SHARED) 8008 already_set = lgrp_shm_policy_set(policy, amp, 8009 svd->anon_index, vp, svd->offset, len); 8010 else { 8011 /* 8012 * For private memory, need writers lock on 8013 * address space because the segment may be 8014 * split or concatenated when changing policy 8015 */ 8016 if (AS_READ_HELD(seg->s_as, 8017 &seg->s_as->a_lock)) { 8018 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8019 return (IE_RETRY); 8020 } 8021 8022 already_set = lgrp_privm_policy_set(policy, 8023 &svd->policy_info, len); 8024 } 8025 8026 /* 8027 * If policy set already and it shouldn't be reapplied, 8028 * don't do anything. 8029 */ 8030 if (already_set && 8031 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8032 break; 8033 8034 /* 8035 * Mark any existing pages in given range for 8036 * migration 8037 */ 8038 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8039 vp, svd->offset, 1); 8040 8041 /* 8042 * If same policy set already or this is a shared 8043 * memory segment, don't need to try to concatenate 8044 * segment with adjacent ones. 8045 */ 8046 if (already_set || svd->type == MAP_SHARED) 8047 break; 8048 8049 /* 8050 * Try to concatenate this segment with previous 8051 * one and next one, since we changed policy for 8052 * this one and it may be compatible with adjacent 8053 * ones now. 8054 */ 8055 prev = AS_SEGPREV(seg->s_as, seg); 8056 next = AS_SEGNEXT(seg->s_as, seg); 8057 8058 if (next && next->s_ops == &segvn_ops && 8059 addr + len == next->s_base) 8060 (void) segvn_concat(seg, next, 1); 8061 8062 if (prev && prev->s_ops == &segvn_ops && 8063 addr == prev->s_base + prev->s_size) { 8064 /* 8065 * Drop lock for private data of current 8066 * segment before concatenating (deleting) it 8067 * and return IE_REATTACH to tell as_ctl() that 8068 * current segment has changed 8069 */ 8070 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8071 if (!segvn_concat(prev, seg, 1)) 8072 err = IE_REATTACH; 8073 8074 return (err); 8075 } 8076 break; 8077 8078 case MADV_SEQUENTIAL: 8079 /* 8080 * unloading mapping guarantees 8081 * detection in segvn_fault 8082 */ 8083 ASSERT(seg->s_szc == 0); 8084 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8085 hat_unload(seg->s_as->a_hat, addr, len, 8086 HAT_UNLOAD); 8087 /* FALLTHROUGH */ 8088 case MADV_NORMAL: 8089 case MADV_RANDOM: 8090 svd->advice = (uchar_t)behav; 8091 svd->pageadvice = 0; 8092 break; 8093 case MADV_WILLNEED: /* handled in memcntl */ 8094 case MADV_DONTNEED: /* handled in memcntl */ 8095 case MADV_FREE: /* handled above */ 8096 break; 8097 default: 8098 err = EINVAL; 8099 } 8100 } else { 8101 caddr_t eaddr; 8102 struct seg *new_seg; 8103 struct segvn_data *new_svd; 8104 u_offset_t off; 8105 caddr_t oldeaddr; 8106 8107 page = seg_page(seg, addr); 8108 8109 segvn_vpage(seg); 8110 8111 switch (behav) { 8112 struct vpage *bvpp, *evpp; 8113 8114 case MADV_ACCESS_LWP: 8115 case MADV_ACCESS_MANY: 8116 case MADV_ACCESS_DEFAULT: 8117 /* 8118 * Set memory allocation policy for portion of this 8119 * segment 8120 */ 8121 8122 /* 8123 * Align address and length of advice to page 8124 * boundaries for large pages 8125 */ 8126 if (seg->s_szc != 0) { 8127 size_t pgsz; 8128 8129 pgsz = page_get_pagesize(seg->s_szc); 8130 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 8131 len = P2ROUNDUP(len, pgsz); 8132 } 8133 8134 /* 8135 * Check to see whether policy is set already 8136 */ 8137 policy = lgrp_madv_to_policy(behav, len, svd->type); 8138 8139 anon_index = svd->anon_index + page; 8140 off = svd->offset + (uintptr_t)(addr - seg->s_base); 8141 8142 if (svd->type == MAP_SHARED) 8143 already_set = lgrp_shm_policy_set(policy, amp, 8144 anon_index, vp, off, len); 8145 else 8146 already_set = 8147 (policy == svd->policy_info.mem_policy); 8148 8149 /* 8150 * If policy set already and it shouldn't be reapplied, 8151 * don't do anything. 8152 */ 8153 if (already_set && 8154 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8155 break; 8156 8157 /* 8158 * For private memory, need writers lock on 8159 * address space because the segment may be 8160 * split or concatenated when changing policy 8161 */ 8162 if (svd->type == MAP_PRIVATE && 8163 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) { 8164 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8165 return (IE_RETRY); 8166 } 8167 8168 /* 8169 * Mark any existing pages in given range for 8170 * migration 8171 */ 8172 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8173 vp, svd->offset, 1); 8174 8175 /* 8176 * Don't need to try to split or concatenate 8177 * segments, since policy is same or this is a shared 8178 * memory segment 8179 */ 8180 if (already_set || svd->type == MAP_SHARED) 8181 break; 8182 8183 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 8184 ASSERT(svd->amp == NULL); 8185 ASSERT(svd->tr_state == SEGVN_TR_OFF); 8186 ASSERT(svd->softlockcnt == 0); 8187 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 8188 HAT_REGION_TEXT); 8189 svd->rcookie = HAT_INVALID_REGION_COOKIE; 8190 } 8191 8192 /* 8193 * Split off new segment if advice only applies to a 8194 * portion of existing segment starting in middle 8195 */ 8196 new_seg = NULL; 8197 eaddr = addr + len; 8198 oldeaddr = seg->s_base + seg->s_size; 8199 if (addr > seg->s_base) { 8200 /* 8201 * Must flush I/O page cache 8202 * before splitting segment 8203 */ 8204 if (svd->softlockcnt > 0) 8205 segvn_purge(seg); 8206 8207 /* 8208 * Split segment and return IE_REATTACH to tell 8209 * as_ctl() that current segment changed 8210 */ 8211 new_seg = segvn_split_seg(seg, addr); 8212 new_svd = (struct segvn_data *)new_seg->s_data; 8213 err = IE_REATTACH; 8214 8215 /* 8216 * If new segment ends where old one 8217 * did, try to concatenate the new 8218 * segment with next one. 8219 */ 8220 if (eaddr == oldeaddr) { 8221 /* 8222 * Set policy for new segment 8223 */ 8224 (void) lgrp_privm_policy_set(policy, 8225 &new_svd->policy_info, 8226 new_seg->s_size); 8227 8228 next = AS_SEGNEXT(new_seg->s_as, 8229 new_seg); 8230 8231 if (next && 8232 next->s_ops == &segvn_ops && 8233 eaddr == next->s_base) 8234 (void) segvn_concat(new_seg, 8235 next, 1); 8236 } 8237 } 8238 8239 /* 8240 * Split off end of existing segment if advice only 8241 * applies to a portion of segment ending before 8242 * end of the existing segment 8243 */ 8244 if (eaddr < oldeaddr) { 8245 /* 8246 * Must flush I/O page cache 8247 * before splitting segment 8248 */ 8249 if (svd->softlockcnt > 0) 8250 segvn_purge(seg); 8251 8252 /* 8253 * If beginning of old segment was already 8254 * split off, use new segment to split end off 8255 * from. 8256 */ 8257 if (new_seg != NULL && new_seg != seg) { 8258 /* 8259 * Split segment 8260 */ 8261 (void) segvn_split_seg(new_seg, eaddr); 8262 8263 /* 8264 * Set policy for new segment 8265 */ 8266 (void) lgrp_privm_policy_set(policy, 8267 &new_svd->policy_info, 8268 new_seg->s_size); 8269 } else { 8270 /* 8271 * Split segment and return IE_REATTACH 8272 * to tell as_ctl() that current 8273 * segment changed 8274 */ 8275 (void) segvn_split_seg(seg, eaddr); 8276 err = IE_REATTACH; 8277 8278 (void) lgrp_privm_policy_set(policy, 8279 &svd->policy_info, seg->s_size); 8280 8281 /* 8282 * If new segment starts where old one 8283 * did, try to concatenate it with 8284 * previous segment. 8285 */ 8286 if (addr == seg->s_base) { 8287 prev = AS_SEGPREV(seg->s_as, 8288 seg); 8289 8290 /* 8291 * Drop lock for private data 8292 * of current segment before 8293 * concatenating (deleting) it 8294 */ 8295 if (prev && 8296 prev->s_ops == 8297 &segvn_ops && 8298 addr == prev->s_base + 8299 prev->s_size) { 8300 SEGVN_LOCK_EXIT( 8301 seg->s_as, 8302 &svd->lock); 8303 (void) segvn_concat( 8304 prev, seg, 1); 8305 return (err); 8306 } 8307 } 8308 } 8309 } 8310 break; 8311 case MADV_SEQUENTIAL: 8312 ASSERT(seg->s_szc == 0); 8313 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8314 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 8315 /* FALLTHROUGH */ 8316 case MADV_NORMAL: 8317 case MADV_RANDOM: 8318 bvpp = &svd->vpage[page]; 8319 evpp = &svd->vpage[page + (len >> PAGESHIFT)]; 8320 for (; bvpp < evpp; bvpp++) 8321 VPP_SETADVICE(bvpp, behav); 8322 svd->advice = MADV_NORMAL; 8323 break; 8324 case MADV_WILLNEED: /* handled in memcntl */ 8325 case MADV_DONTNEED: /* handled in memcntl */ 8326 case MADV_FREE: /* handled above */ 8327 break; 8328 default: 8329 err = EINVAL; 8330 } 8331 } 8332 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8333 return (err); 8334 } 8335 8336 /* 8337 * Create a vpage structure for this seg. 8338 */ 8339 static void 8340 segvn_vpage(struct seg *seg) 8341 { 8342 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8343 struct vpage *vp, *evp; 8344 8345 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 8346 8347 /* 8348 * If no vpage structure exists, allocate one. Copy the protections 8349 * and the advice from the segment itself to the individual pages. 8350 */ 8351 if (svd->vpage == NULL) { 8352 svd->pageadvice = 1; 8353 svd->vpage = kmem_zalloc(seg_pages(seg) * sizeof (struct vpage), 8354 KM_SLEEP); 8355 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 8356 for (vp = svd->vpage; vp < evp; vp++) { 8357 VPP_SETPROT(vp, svd->prot); 8358 VPP_SETADVICE(vp, svd->advice); 8359 } 8360 } 8361 } 8362 8363 /* 8364 * Dump the pages belonging to this segvn segment. 8365 */ 8366 static void 8367 segvn_dump(struct seg *seg) 8368 { 8369 struct segvn_data *svd; 8370 page_t *pp; 8371 struct anon_map *amp; 8372 ulong_t anon_index; 8373 struct vnode *vp; 8374 u_offset_t off, offset; 8375 pfn_t pfn; 8376 pgcnt_t page, npages; 8377 caddr_t addr; 8378 8379 npages = seg_pages(seg); 8380 svd = (struct segvn_data *)seg->s_data; 8381 vp = svd->vp; 8382 off = offset = svd->offset; 8383 addr = seg->s_base; 8384 8385 if ((amp = svd->amp) != NULL) { 8386 anon_index = svd->anon_index; 8387 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8388 } 8389 8390 for (page = 0; page < npages; page++, offset += PAGESIZE) { 8391 struct anon *ap; 8392 int we_own_it = 0; 8393 8394 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) { 8395 swap_xlate_nopanic(ap, &vp, &off); 8396 } else { 8397 vp = svd->vp; 8398 off = offset; 8399 } 8400 8401 /* 8402 * If pp == NULL, the page either does not exist 8403 * or is exclusively locked. So determine if it 8404 * exists before searching for it. 8405 */ 8406 8407 if ((pp = page_lookup_nowait(vp, off, SE_SHARED))) 8408 we_own_it = 1; 8409 else 8410 pp = page_exists(vp, off); 8411 8412 if (pp) { 8413 pfn = page_pptonum(pp); 8414 dump_addpage(seg->s_as, addr, pfn); 8415 if (we_own_it) 8416 page_unlock(pp); 8417 } 8418 addr += PAGESIZE; 8419 dump_timeleft = dump_timeout; 8420 } 8421 8422 if (amp != NULL) 8423 ANON_LOCK_EXIT(&->a_rwlock); 8424 } 8425 8426 /* 8427 * lock/unlock anon pages over a given range. Return shadow list 8428 */ 8429 static int 8430 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp, 8431 enum lock_type type, enum seg_rw rw) 8432 { 8433 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8434 size_t np, adjustpages = 0, npages = (len >> PAGESHIFT); 8435 ulong_t anon_index; 8436 uint_t protchk; 8437 uint_t error; 8438 struct anon_map *amp; 8439 struct page **pplist, **pl, *pp; 8440 caddr_t a; 8441 size_t page; 8442 caddr_t lpgaddr, lpgeaddr; 8443 pgcnt_t szc0_npages = 0; 8444 8445 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START, 8446 "segvn_pagelock: start seg %p addr %p", seg, addr); 8447 8448 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8449 if (seg->s_szc != 0 && (type == L_PAGELOCK || type == L_PAGEUNLOCK)) { 8450 /* 8451 * We are adjusting the pagelock region to the large page size 8452 * boundary because the unlocked part of a large page cannot 8453 * be freed anyway unless all constituent pages of a large 8454 * page are locked. Therefore this adjustment allows us to 8455 * decrement availrmem by the right value (note we don't want 8456 * to just decrement availrem by the large page size without 8457 * adjusting addr and len because then we may end up 8458 * decrementing availrmem by large page size for every 8459 * constituent page locked by a new as_pagelock call). 8460 * as_pageunlock caller must always match as_pagelock call's 8461 * addr and len. 8462 * 8463 * Note segment's page size cannot change while we are holding 8464 * as lock. And then it cannot change while softlockcnt is 8465 * not 0. This will allow us to correctly recalculate large 8466 * page size region for the matching pageunlock/reclaim call. 8467 * 8468 * for pageunlock *ppp points to the pointer of page_t that 8469 * corresponds to the real unadjusted start address. Similar 8470 * for pagelock *ppp must point to the pointer of page_t that 8471 * corresponds to the real unadjusted start address. 8472 */ 8473 size_t pgsz = page_get_pagesize(seg->s_szc); 8474 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 8475 adjustpages = ((uintptr_t)(addr - lpgaddr)) >> PAGESHIFT; 8476 } 8477 8478 if (type == L_PAGEUNLOCK) { 8479 8480 /* 8481 * update hat ref bits for /proc. We need to make sure 8482 * that threads tracing the ref and mod bits of the 8483 * address space get the right data. 8484 * Note: page ref and mod bits are updated at reclaim time 8485 */ 8486 if (seg->s_as->a_vbits) { 8487 for (a = addr; a < addr + len; a += PAGESIZE) { 8488 if (rw == S_WRITE) { 8489 hat_setstat(seg->s_as, a, 8490 PAGESIZE, P_REF | P_MOD); 8491 } else { 8492 hat_setstat(seg->s_as, a, 8493 PAGESIZE, P_REF); 8494 } 8495 } 8496 } 8497 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8498 if (seg->s_szc != 0) { 8499 VM_STAT_ADD(segvnvmstats.pagelock[0]); 8500 seg_pinactive(seg, lpgaddr, lpgeaddr - lpgaddr, 8501 *ppp - adjustpages, rw, segvn_reclaim); 8502 } else { 8503 seg_pinactive(seg, addr, len, *ppp, rw, segvn_reclaim); 8504 } 8505 8506 /* 8507 * If someone is blocked while unmapping, we purge 8508 * segment page cache and thus reclaim pplist synchronously 8509 * without waiting for seg_pasync_thread. This speeds up 8510 * unmapping in cases where munmap(2) is called, while 8511 * raw async i/o is still in progress or where a thread 8512 * exits on data fault in a multithreaded application. 8513 */ 8514 if (AS_ISUNMAPWAIT(seg->s_as) && (svd->softlockcnt > 0)) { 8515 /* 8516 * Even if we grab segvn WRITER's lock or segp_slock 8517 * here, there might be another thread which could've 8518 * successfully performed lookup/insert just before 8519 * we acquired the lock here. So, grabbing either 8520 * lock here is of not much use. Until we devise 8521 * a strategy at upper layers to solve the 8522 * synchronization issues completely, we expect 8523 * applications to handle this appropriately. 8524 */ 8525 segvn_purge(seg); 8526 } 8527 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8528 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END, 8529 "segvn_pagelock: unlock seg %p addr %p", seg, addr); 8530 return (0); 8531 } else if (type == L_PAGERECLAIM) { 8532 VM_STAT_COND_ADD(seg->s_szc != 0, segvnvmstats.pagelock[1]); 8533 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8534 (void) segvn_reclaim(seg, addr, len, *ppp, rw); 8535 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8536 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END, 8537 "segvn_pagelock: reclaim seg %p addr %p", seg, addr); 8538 return (0); 8539 } 8540 8541 if (seg->s_szc != 0) { 8542 VM_STAT_ADD(segvnvmstats.pagelock[2]); 8543 addr = lpgaddr; 8544 len = lpgeaddr - lpgaddr; 8545 npages = (len >> PAGESHIFT); 8546 } 8547 8548 /* 8549 * for now we only support pagelock to anon memory. We've to check 8550 * protections for vnode objects and call into the vnode driver. 8551 * That's too much for a fast path. Let the fault entry point handle it. 8552 */ 8553 if (svd->vp != NULL) { 8554 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END, 8555 "segvn_pagelock: mapped vnode seg %p addr %p", seg, addr); 8556 *ppp = NULL; 8557 return (ENOTSUP); 8558 } 8559 8560 /* 8561 * if anonmap is not yet created, let the fault entry point populate it 8562 * with anon ptrs. 8563 */ 8564 if ((amp = svd->amp) == NULL) { 8565 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END, 8566 "segvn_pagelock: anonmap null seg %p addr %p", seg, addr); 8567 *ppp = NULL; 8568 return (EFAULT); 8569 } 8570 8571 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8572 8573 /* 8574 * we acquire segp_slock to prevent duplicate entries 8575 * in seg_pcache 8576 */ 8577 mutex_enter(&svd->segp_slock); 8578 8579 /* 8580 * try to find pages in segment page cache 8581 */ 8582 pplist = seg_plookup(seg, addr, len, rw); 8583 if (pplist != NULL) { 8584 mutex_exit(&svd->segp_slock); 8585 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8586 *ppp = pplist + adjustpages; 8587 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END, 8588 "segvn_pagelock: cache hit seg %p addr %p", seg, addr); 8589 return (0); 8590 } 8591 8592 if (rw == S_READ) { 8593 protchk = PROT_READ; 8594 } else { 8595 protchk = PROT_WRITE; 8596 } 8597 8598 if (svd->pageprot == 0) { 8599 if ((svd->prot & protchk) == 0) { 8600 mutex_exit(&svd->segp_slock); 8601 error = EFAULT; 8602 goto out; 8603 } 8604 } else { 8605 /* 8606 * check page protections 8607 */ 8608 for (a = addr; a < addr + len; a += PAGESIZE) { 8609 struct vpage *vp; 8610 8611 vp = &svd->vpage[seg_page(seg, a)]; 8612 if ((VPP_PROT(vp) & protchk) == 0) { 8613 mutex_exit(&svd->segp_slock); 8614 error = EFAULT; 8615 goto out; 8616 } 8617 } 8618 } 8619 8620 /* 8621 * Avoid per page overhead of segvn_slock_anonpages() for small 8622 * pages. For large pages segvn_slock_anonpages() only does real 8623 * work once per large page. The tradeoff is that we may decrement 8624 * availrmem more than once for the same page but this is ok 8625 * for small pages. 8626 */ 8627 if (seg->s_szc == 0) { 8628 mutex_enter(&freemem_lock); 8629 if (availrmem < tune.t_minarmem + npages) { 8630 mutex_exit(&freemem_lock); 8631 mutex_exit(&svd->segp_slock); 8632 error = ENOMEM; 8633 goto out; 8634 } 8635 availrmem -= npages; 8636 mutex_exit(&freemem_lock); 8637 } 8638 8639 pplist = kmem_alloc(sizeof (page_t *) * npages, KM_SLEEP); 8640 pl = pplist; 8641 *ppp = pplist + adjustpages; 8642 8643 page = seg_page(seg, addr); 8644 anon_index = svd->anon_index + page; 8645 8646 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8647 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) { 8648 struct anon *ap; 8649 struct vnode *vp; 8650 u_offset_t off; 8651 anon_sync_obj_t cookie; 8652 8653 anon_array_enter(amp, anon_index, &cookie); 8654 ap = anon_get_ptr(amp->ahp, anon_index); 8655 if (ap == NULL) { 8656 anon_array_exit(&cookie); 8657 break; 8658 } else { 8659 /* 8660 * We must never use seg_pcache for COW pages 8661 * because we might end up with original page still 8662 * lying in seg_pcache even after private page is 8663 * created. This leads to data corruption as 8664 * aio_write refers to the page still in cache 8665 * while all other accesses refer to the private 8666 * page. 8667 */ 8668 if (ap->an_refcnt != 1) { 8669 anon_array_exit(&cookie); 8670 break; 8671 } 8672 } 8673 swap_xlate(ap, &vp, &off); 8674 anon_array_exit(&cookie); 8675 8676 pp = page_lookup_nowait(vp, off, SE_SHARED); 8677 if (pp == NULL) { 8678 break; 8679 } 8680 if (seg->s_szc != 0 || pp->p_szc != 0) { 8681 if (!segvn_slock_anonpages(pp, a == addr)) { 8682 page_unlock(pp); 8683 break; 8684 } 8685 } else { 8686 szc0_npages++; 8687 } 8688 *pplist++ = pp; 8689 } 8690 ANON_LOCK_EXIT(&->a_rwlock); 8691 8692 ASSERT(npages >= szc0_npages); 8693 8694 if (a >= addr + len) { 8695 mutex_enter(&freemem_lock); 8696 if (seg->s_szc == 0 && npages != szc0_npages) { 8697 ASSERT(svd->type == MAP_SHARED && amp->a_szc > 0); 8698 availrmem += (npages - szc0_npages); 8699 } 8700 svd->softlockcnt += npages; 8701 segvn_pages_locked += npages; 8702 mutex_exit(&freemem_lock); 8703 (void) seg_pinsert(seg, addr, len, pl, rw, SEGP_ASYNC_FLUSH, 8704 segvn_reclaim); 8705 mutex_exit(&svd->segp_slock); 8706 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8707 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END, 8708 "segvn_pagelock: cache fill seg %p addr %p", seg, addr); 8709 return (0); 8710 } 8711 8712 mutex_exit(&svd->segp_slock); 8713 if (seg->s_szc == 0) { 8714 mutex_enter(&freemem_lock); 8715 availrmem += npages; 8716 mutex_exit(&freemem_lock); 8717 } 8718 error = EFAULT; 8719 pplist = pl; 8720 np = ((uintptr_t)(a - addr)) >> PAGESHIFT; 8721 while (np > (uint_t)0) { 8722 ASSERT(PAGE_LOCKED(*pplist)); 8723 if (seg->s_szc != 0 || (*pplist)->p_szc != 0) { 8724 segvn_sunlock_anonpages(*pplist, pplist == pl); 8725 } 8726 page_unlock(*pplist); 8727 np--; 8728 pplist++; 8729 } 8730 kmem_free(pl, sizeof (page_t *) * npages); 8731 out: 8732 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8733 *ppp = NULL; 8734 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END, 8735 "segvn_pagelock: cache miss seg %p addr %p", seg, addr); 8736 return (error); 8737 } 8738 8739 /* 8740 * purge any cached pages in the I/O page cache 8741 */ 8742 static void 8743 segvn_purge(struct seg *seg) 8744 { 8745 seg_ppurge(seg); 8746 } 8747 8748 static int 8749 segvn_reclaim(struct seg *seg, caddr_t addr, size_t len, struct page **pplist, 8750 enum seg_rw rw) 8751 { 8752 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8753 pgcnt_t np, npages; 8754 struct page **pl; 8755 pgcnt_t szc0_npages = 0; 8756 8757 #ifdef lint 8758 addr = addr; 8759 #endif 8760 8761 npages = np = (len >> PAGESHIFT); 8762 ASSERT(npages); 8763 pl = pplist; 8764 if (seg->s_szc != 0) { 8765 size_t pgsz = page_get_pagesize(seg->s_szc); 8766 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 8767 panic("segvn_reclaim: unaligned addr or len"); 8768 /*NOTREACHED*/ 8769 } 8770 } 8771 8772 ASSERT(svd->vp == NULL && svd->amp != NULL); 8773 8774 while (np > (uint_t)0) { 8775 if (rw == S_WRITE) { 8776 hat_setrefmod(*pplist); 8777 } else { 8778 hat_setref(*pplist); 8779 } 8780 if (seg->s_szc != 0 || (*pplist)->p_szc != 0) { 8781 segvn_sunlock_anonpages(*pplist, pplist == pl); 8782 } else { 8783 szc0_npages++; 8784 } 8785 page_unlock(*pplist); 8786 np--; 8787 pplist++; 8788 } 8789 kmem_free(pl, sizeof (page_t *) * npages); 8790 8791 mutex_enter(&freemem_lock); 8792 segvn_pages_locked -= npages; 8793 svd->softlockcnt -= npages; 8794 if (szc0_npages != 0) { 8795 availrmem += szc0_npages; 8796 } 8797 mutex_exit(&freemem_lock); 8798 if (svd->softlockcnt <= 0) { 8799 if (AS_ISUNMAPWAIT(seg->s_as)) { 8800 mutex_enter(&seg->s_as->a_contents); 8801 if (AS_ISUNMAPWAIT(seg->s_as)) { 8802 AS_CLRUNMAPWAIT(seg->s_as); 8803 cv_broadcast(&seg->s_as->a_cv); 8804 } 8805 mutex_exit(&seg->s_as->a_contents); 8806 } 8807 } 8808 return (0); 8809 } 8810 /* 8811 * get a memory ID for an addr in a given segment 8812 * 8813 * XXX only creates PAGESIZE pages if anon slots are not initialized. 8814 * At fault time they will be relocated into larger pages. 8815 */ 8816 static int 8817 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 8818 { 8819 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8820 struct anon *ap = NULL; 8821 ulong_t anon_index; 8822 struct anon_map *amp; 8823 anon_sync_obj_t cookie; 8824 8825 if (svd->type == MAP_PRIVATE) { 8826 memidp->val[0] = (uintptr_t)seg->s_as; 8827 memidp->val[1] = (uintptr_t)addr; 8828 return (0); 8829 } 8830 8831 if (svd->type == MAP_SHARED) { 8832 if (svd->vp) { 8833 memidp->val[0] = (uintptr_t)svd->vp; 8834 memidp->val[1] = (u_longlong_t)svd->offset + 8835 (uintptr_t)(addr - seg->s_base); 8836 return (0); 8837 } else { 8838 8839 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8840 if ((amp = svd->amp) != NULL) { 8841 anon_index = svd->anon_index + 8842 seg_page(seg, addr); 8843 } 8844 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8845 8846 ASSERT(amp != NULL); 8847 8848 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8849 anon_array_enter(amp, anon_index, &cookie); 8850 ap = anon_get_ptr(amp->ahp, anon_index); 8851 if (ap == NULL) { 8852 page_t *pp; 8853 8854 pp = anon_zero(seg, addr, &ap, svd->cred); 8855 if (pp == NULL) { 8856 anon_array_exit(&cookie); 8857 ANON_LOCK_EXIT(&->a_rwlock); 8858 return (ENOMEM); 8859 } 8860 ASSERT(anon_get_ptr(amp->ahp, anon_index) 8861 == NULL); 8862 (void) anon_set_ptr(amp->ahp, anon_index, 8863 ap, ANON_SLEEP); 8864 page_unlock(pp); 8865 } 8866 8867 anon_array_exit(&cookie); 8868 ANON_LOCK_EXIT(&->a_rwlock); 8869 8870 memidp->val[0] = (uintptr_t)ap; 8871 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 8872 return (0); 8873 } 8874 } 8875 return (EINVAL); 8876 } 8877 8878 static int 8879 sameprot(struct seg *seg, caddr_t a, size_t len) 8880 { 8881 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8882 struct vpage *vpage; 8883 spgcnt_t pages = btop(len); 8884 uint_t prot; 8885 8886 if (svd->pageprot == 0) 8887 return (1); 8888 8889 ASSERT(svd->vpage != NULL); 8890 8891 vpage = &svd->vpage[seg_page(seg, a)]; 8892 prot = VPP_PROT(vpage); 8893 vpage++; 8894 pages--; 8895 while (pages-- > 0) { 8896 if (prot != VPP_PROT(vpage)) 8897 return (0); 8898 vpage++; 8899 } 8900 return (1); 8901 } 8902 8903 /* 8904 * Get memory allocation policy info for specified address in given segment 8905 */ 8906 static lgrp_mem_policy_info_t * 8907 segvn_getpolicy(struct seg *seg, caddr_t addr) 8908 { 8909 struct anon_map *amp; 8910 ulong_t anon_index; 8911 lgrp_mem_policy_info_t *policy_info; 8912 struct segvn_data *svn_data; 8913 u_offset_t vn_off; 8914 vnode_t *vp; 8915 8916 ASSERT(seg != NULL); 8917 8918 svn_data = (struct segvn_data *)seg->s_data; 8919 if (svn_data == NULL) 8920 return (NULL); 8921 8922 /* 8923 * Get policy info for private or shared memory 8924 */ 8925 if (svn_data->type != MAP_SHARED) { 8926 if (svn_data->tr_state != SEGVN_TR_ON) { 8927 policy_info = &svn_data->policy_info; 8928 } else { 8929 policy_info = &svn_data->tr_policy_info; 8930 ASSERT(policy_info->mem_policy == 8931 LGRP_MEM_POLICY_NEXT_SEG); 8932 } 8933 } else { 8934 amp = svn_data->amp; 8935 anon_index = svn_data->anon_index + seg_page(seg, addr); 8936 vp = svn_data->vp; 8937 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base); 8938 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off); 8939 } 8940 8941 return (policy_info); 8942 } 8943 8944 /*ARGSUSED*/ 8945 static int 8946 segvn_capable(struct seg *seg, segcapability_t capability) 8947 { 8948 return (0); 8949 } 8950 8951 /* 8952 * Bind text vnode segment to an amp. If we bind successfully mappings will be 8953 * established to per vnode mapping per lgroup amp pages instead of to vnode 8954 * pages. There's one amp per vnode text mapping per lgroup. Many processes 8955 * may share the same text replication amp. If a suitable amp doesn't already 8956 * exist in svntr hash table create a new one. We may fail to bind to amp if 8957 * segment is not eligible for text replication. Code below first checks for 8958 * these conditions. If binding is successful segment tr_state is set to on 8959 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and 8960 * svd->amp remains as NULL. 8961 */ 8962 static void 8963 segvn_textrepl(struct seg *seg) 8964 { 8965 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8966 vnode_t *vp = svd->vp; 8967 u_offset_t off = svd->offset; 8968 size_t size = seg->s_size; 8969 u_offset_t eoff = off + size; 8970 uint_t szc = seg->s_szc; 8971 ulong_t hash = SVNTR_HASH_FUNC(vp); 8972 svntr_t *svntrp; 8973 struct vattr va; 8974 proc_t *p = seg->s_as->a_proc; 8975 lgrp_id_t lgrp_id; 8976 lgrp_id_t olid; 8977 int first; 8978 struct anon_map *amp; 8979 8980 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8981 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 8982 ASSERT(p != NULL); 8983 ASSERT(svd->tr_state == SEGVN_TR_INIT); 8984 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 8985 ASSERT(svd->flags & MAP_TEXT); 8986 ASSERT(svd->type == MAP_PRIVATE); 8987 ASSERT(vp != NULL && svd->amp == NULL); 8988 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 8989 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0); 8990 ASSERT(seg->s_as != &kas); 8991 ASSERT(off < eoff); 8992 ASSERT(svntr_hashtab != NULL); 8993 8994 /* 8995 * If numa optimizations are no longer desired bail out. 8996 */ 8997 if (!lgrp_optimizations()) { 8998 svd->tr_state = SEGVN_TR_OFF; 8999 return; 9000 } 9001 9002 /* 9003 * Avoid creating anon maps with size bigger than the file size. 9004 * If VOP_GETATTR() call fails bail out. 9005 */ 9006 va.va_mask = AT_SIZE | AT_MTIME; 9007 if (VOP_GETATTR(vp, &va, 0, svd->cred) != 0) { 9008 svd->tr_state = SEGVN_TR_OFF; 9009 SEGVN_TR_ADDSTAT(gaerr); 9010 return; 9011 } 9012 if (btopr(va.va_size) < btopr(eoff)) { 9013 svd->tr_state = SEGVN_TR_OFF; 9014 SEGVN_TR_ADDSTAT(overmap); 9015 return; 9016 } 9017 9018 /* 9019 * VVMEXEC may not be set yet if exec() prefaults text segment. Set 9020 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED 9021 * mapping that checks if trcache for this vnode needs to be 9022 * invalidated can't miss us. 9023 */ 9024 if (!(vp->v_flag & VVMEXEC)) { 9025 mutex_enter(&vp->v_lock); 9026 vp->v_flag |= VVMEXEC; 9027 mutex_exit(&vp->v_lock); 9028 } 9029 mutex_enter(&svntr_hashtab[hash].tr_lock); 9030 /* 9031 * Bail out if potentially MAP_SHARED writable mappings exist to this 9032 * vnode. We don't want to use old file contents from existing 9033 * replicas if this mapping was established after the original file 9034 * was changed. 9035 */ 9036 if (vn_is_mapped(vp, V_WRITE)) { 9037 mutex_exit(&svntr_hashtab[hash].tr_lock); 9038 svd->tr_state = SEGVN_TR_OFF; 9039 SEGVN_TR_ADDSTAT(wrcnt); 9040 return; 9041 } 9042 svntrp = svntr_hashtab[hash].tr_head; 9043 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9044 ASSERT(svntrp->tr_refcnt != 0); 9045 if (svntrp->tr_vp != vp) { 9046 continue; 9047 } 9048 /* 9049 * Bail out if file was changed after this replication entry 9050 * was created since we need to use the latest file contents. 9051 */ 9052 if (!svntrp->tr_valid || 9053 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec || 9054 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec) { 9055 mutex_exit(&svntr_hashtab[hash].tr_lock); 9056 svd->tr_state = SEGVN_TR_OFF; 9057 SEGVN_TR_ADDSTAT(stale); 9058 return; 9059 } 9060 /* 9061 * if off, eoff and szc match current segment we found the 9062 * existing entry we can use. 9063 */ 9064 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff && 9065 svntrp->tr_szc == szc) { 9066 break; 9067 } 9068 /* 9069 * Don't create different but overlapping in file offsets 9070 * entries to avoid replication of the same file pages more 9071 * than once per lgroup. 9072 */ 9073 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) || 9074 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) { 9075 mutex_exit(&svntr_hashtab[hash].tr_lock); 9076 svd->tr_state = SEGVN_TR_OFF; 9077 SEGVN_TR_ADDSTAT(overlap); 9078 return; 9079 } 9080 } 9081 /* 9082 * If we didn't find existing entry create a new one. 9083 */ 9084 if (svntrp == NULL) { 9085 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP); 9086 if (svntrp == NULL) { 9087 mutex_exit(&svntr_hashtab[hash].tr_lock); 9088 svd->tr_state = SEGVN_TR_OFF; 9089 SEGVN_TR_ADDSTAT(nokmem); 9090 return; 9091 } 9092 #ifdef DEBUG 9093 { 9094 lgrp_id_t i; 9095 for (i = 0; i < NLGRPS_MAX; i++) { 9096 ASSERT(svntrp->tr_amp[i] == NULL); 9097 } 9098 } 9099 #endif /* DEBUG */ 9100 svntrp->tr_vp = vp; 9101 svntrp->tr_off = off; 9102 svntrp->tr_eoff = eoff; 9103 svntrp->tr_szc = szc; 9104 svntrp->tr_valid = 1; 9105 svntrp->tr_mtime = va.va_mtime; 9106 svntrp->tr_refcnt = 0; 9107 svntrp->tr_next = svntr_hashtab[hash].tr_head; 9108 svntr_hashtab[hash].tr_head = svntrp; 9109 } 9110 first = 1; 9111 again: 9112 /* 9113 * We want to pick a replica with pages on main thread's (t_tid = 1, 9114 * aka T1) lgrp. Currently text replication is only optimized for 9115 * workloads that either have all threads of a process on the same 9116 * lgrp or execute their large text primarily on main thread. 9117 */ 9118 lgrp_id = p->p_t1_lgrpid; 9119 if (lgrp_id == LGRP_NONE) { 9120 /* 9121 * In case exec() prefaults text on non main thread use 9122 * current thread lgrpid. It will become main thread anyway 9123 * soon. 9124 */ 9125 lgrp_id = lgrp_home_id(curthread); 9126 } 9127 /* 9128 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise 9129 * just set it to NLGRPS_MAX if it's different from current process T1 9130 * home lgrp. p_tr_lgrpid is used to detect if process uses text 9131 * replication and T1 new home is different from lgrp used for text 9132 * replication. When this happens asyncronous segvn thread rechecks if 9133 * segments should change lgrps used for text replication. If we fail 9134 * to set p_tr_lgrpid with cas32 then set it to NLGRPS_MAX without cas 9135 * if it's not already NLGRPS_MAX and not equal lgrp_id we want to 9136 * use. We don't need to use cas in this case because another thread 9137 * that races in between our non atomic check and set may only change 9138 * p_tr_lgrpid to NLGRPS_MAX at this point. 9139 */ 9140 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9141 olid = p->p_tr_lgrpid; 9142 if (lgrp_id != olid && olid != NLGRPS_MAX) { 9143 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX; 9144 if (cas32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) != olid) { 9145 olid = p->p_tr_lgrpid; 9146 ASSERT(olid != LGRP_NONE); 9147 if (olid != lgrp_id && olid != NLGRPS_MAX) { 9148 p->p_tr_lgrpid = NLGRPS_MAX; 9149 } 9150 } 9151 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 9152 membar_producer(); 9153 /* 9154 * lgrp_move_thread() won't schedule async recheck after 9155 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not 9156 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid 9157 * is not LGRP_NONE. 9158 */ 9159 if (first && p->p_t1_lgrpid != LGRP_NONE && 9160 p->p_t1_lgrpid != lgrp_id) { 9161 first = 0; 9162 goto again; 9163 } 9164 } 9165 /* 9166 * If no amp was created yet for lgrp_id create a new one as long as 9167 * we have enough memory to afford it. 9168 */ 9169 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) { 9170 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 9171 if (trmem > segvn_textrepl_max_bytes) { 9172 SEGVN_TR_ADDSTAT(normem); 9173 goto fail; 9174 } 9175 if (anon_try_resv_zone(size, NULL) == 0) { 9176 SEGVN_TR_ADDSTAT(noanon); 9177 goto fail; 9178 } 9179 amp = anonmap_alloc(size, size, ANON_NOSLEEP); 9180 if (amp == NULL) { 9181 anon_unresv_zone(size, NULL); 9182 SEGVN_TR_ADDSTAT(nokmem); 9183 goto fail; 9184 } 9185 ASSERT(amp->refcnt == 1); 9186 amp->a_szc = szc; 9187 svntrp->tr_amp[lgrp_id] = amp; 9188 SEGVN_TR_ADDSTAT(newamp); 9189 } 9190 svntrp->tr_refcnt++; 9191 ASSERT(svd->svn_trnext == NULL); 9192 ASSERT(svd->svn_trprev == NULL); 9193 svd->svn_trnext = svntrp->tr_svnhead; 9194 svd->svn_trprev = NULL; 9195 if (svntrp->tr_svnhead != NULL) { 9196 svntrp->tr_svnhead->svn_trprev = svd; 9197 } 9198 svntrp->tr_svnhead = svd; 9199 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size); 9200 ASSERT(amp->refcnt >= 1); 9201 svd->amp = amp; 9202 svd->anon_index = 0; 9203 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG; 9204 svd->tr_policy_info.mem_lgrpid = lgrp_id; 9205 svd->tr_state = SEGVN_TR_ON; 9206 mutex_exit(&svntr_hashtab[hash].tr_lock); 9207 SEGVN_TR_ADDSTAT(repl); 9208 return; 9209 fail: 9210 ASSERT(segvn_textrepl_bytes >= size); 9211 atomic_add_long(&segvn_textrepl_bytes, -size); 9212 ASSERT(svntrp != NULL); 9213 ASSERT(svntrp->tr_amp[lgrp_id] == NULL); 9214 if (svntrp->tr_refcnt == 0) { 9215 ASSERT(svntrp == svntr_hashtab[hash].tr_head); 9216 svntr_hashtab[hash].tr_head = svntrp->tr_next; 9217 mutex_exit(&svntr_hashtab[hash].tr_lock); 9218 kmem_cache_free(svntr_cache, svntrp); 9219 } else { 9220 mutex_exit(&svntr_hashtab[hash].tr_lock); 9221 } 9222 svd->tr_state = SEGVN_TR_OFF; 9223 } 9224 9225 /* 9226 * Convert seg back to regular vnode mapping seg by unbinding it from its text 9227 * replication amp. This routine is most typically called when segment is 9228 * unmapped but can also be called when segment no longer qualifies for text 9229 * replication (e.g. due to protection changes). If unload_unmap is set use 9230 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of 9231 * svntr free all its anon maps and remove it from the hash table. 9232 */ 9233 static void 9234 segvn_textunrepl(struct seg *seg, int unload_unmap) 9235 { 9236 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9237 vnode_t *vp = svd->vp; 9238 u_offset_t off = svd->offset; 9239 size_t size = seg->s_size; 9240 u_offset_t eoff = off + size; 9241 uint_t szc = seg->s_szc; 9242 ulong_t hash = SVNTR_HASH_FUNC(vp); 9243 svntr_t *svntrp; 9244 svntr_t **prv_svntrp; 9245 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid; 9246 lgrp_id_t i; 9247 9248 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9249 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 9250 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 9251 ASSERT(svd->tr_state == SEGVN_TR_ON); 9252 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9253 ASSERT(svd->amp != NULL); 9254 ASSERT(svd->amp->refcnt >= 1); 9255 ASSERT(svd->anon_index == 0); 9256 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9257 ASSERT(svntr_hashtab != NULL); 9258 9259 mutex_enter(&svntr_hashtab[hash].tr_lock); 9260 prv_svntrp = &svntr_hashtab[hash].tr_head; 9261 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) { 9262 ASSERT(svntrp->tr_refcnt != 0); 9263 if (svntrp->tr_vp == vp && svntrp->tr_off == off && 9264 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) { 9265 break; 9266 } 9267 } 9268 if (svntrp == NULL) { 9269 panic("segvn_textunrepl: svntr record not found"); 9270 } 9271 if (svntrp->tr_amp[lgrp_id] != svd->amp) { 9272 panic("segvn_textunrepl: amp mismatch"); 9273 } 9274 svd->tr_state = SEGVN_TR_OFF; 9275 svd->amp = NULL; 9276 if (svd->svn_trprev == NULL) { 9277 ASSERT(svntrp->tr_svnhead == svd); 9278 svntrp->tr_svnhead = svd->svn_trnext; 9279 if (svntrp->tr_svnhead != NULL) { 9280 svntrp->tr_svnhead->svn_trprev = NULL; 9281 } 9282 svd->svn_trnext = NULL; 9283 } else { 9284 svd->svn_trprev->svn_trnext = svd->svn_trnext; 9285 if (svd->svn_trnext != NULL) { 9286 svd->svn_trnext->svn_trprev = svd->svn_trprev; 9287 svd->svn_trnext = NULL; 9288 } 9289 svd->svn_trprev = NULL; 9290 } 9291 if (--svntrp->tr_refcnt) { 9292 mutex_exit(&svntr_hashtab[hash].tr_lock); 9293 goto done; 9294 } 9295 *prv_svntrp = svntrp->tr_next; 9296 mutex_exit(&svntr_hashtab[hash].tr_lock); 9297 for (i = 0; i < NLGRPS_MAX; i++) { 9298 struct anon_map *amp = svntrp->tr_amp[i]; 9299 if (amp == NULL) { 9300 continue; 9301 } 9302 ASSERT(amp->refcnt == 1); 9303 ASSERT(amp->swresv == size); 9304 ASSERT(amp->size == size); 9305 ASSERT(amp->a_szc == szc); 9306 if (amp->a_szc != 0) { 9307 anon_free_pages(amp->ahp, 0, size, szc); 9308 } else { 9309 anon_free(amp->ahp, 0, size); 9310 } 9311 svntrp->tr_amp[i] = NULL; 9312 ASSERT(segvn_textrepl_bytes >= size); 9313 atomic_add_long(&segvn_textrepl_bytes, -size); 9314 anon_unresv_zone(amp->swresv, NULL); 9315 amp->refcnt = 0; 9316 anonmap_free(amp); 9317 } 9318 kmem_cache_free(svntr_cache, svntrp); 9319 done: 9320 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size, 9321 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL); 9322 } 9323 9324 /* 9325 * This is called when a MAP_SHARED writabble mapping is created to a vnode 9326 * that is currently used for execution (VVMEXEC flag is set). In this case we 9327 * need to prevent further use of existing replicas. 9328 */ 9329 static void 9330 segvn_inval_trcache(vnode_t *vp) 9331 { 9332 ulong_t hash = SVNTR_HASH_FUNC(vp); 9333 svntr_t *svntrp; 9334 9335 ASSERT(vp->v_flag & VVMEXEC); 9336 9337 if (svntr_hashtab == NULL) { 9338 return; 9339 } 9340 9341 mutex_enter(&svntr_hashtab[hash].tr_lock); 9342 svntrp = svntr_hashtab[hash].tr_head; 9343 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9344 ASSERT(svntrp->tr_refcnt != 0); 9345 if (svntrp->tr_vp == vp && svntrp->tr_valid) { 9346 svntrp->tr_valid = 0; 9347 } 9348 } 9349 mutex_exit(&svntr_hashtab[hash].tr_lock); 9350 } 9351 9352 static void 9353 segvn_trasync_thread(void) 9354 { 9355 callb_cpr_t cpr_info; 9356 kmutex_t cpr_lock; /* just for CPR stuff */ 9357 9358 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL); 9359 9360 CALLB_CPR_INIT(&cpr_info, &cpr_lock, 9361 callb_generic_cpr, "segvn_async"); 9362 9363 if (segvn_update_textrepl_interval == 0) { 9364 segvn_update_textrepl_interval = segvn_update_tr_time * hz; 9365 } else { 9366 segvn_update_textrepl_interval *= hz; 9367 } 9368 (void) timeout(segvn_trupdate_wakeup, NULL, 9369 segvn_update_textrepl_interval); 9370 9371 for (;;) { 9372 mutex_enter(&cpr_lock); 9373 CALLB_CPR_SAFE_BEGIN(&cpr_info); 9374 mutex_exit(&cpr_lock); 9375 sema_p(&segvn_trasync_sem); 9376 mutex_enter(&cpr_lock); 9377 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 9378 mutex_exit(&cpr_lock); 9379 segvn_trupdate(); 9380 } 9381 } 9382 9383 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0; 9384 9385 static void 9386 segvn_trupdate_wakeup(void *dummy) 9387 { 9388 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations(); 9389 9390 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) { 9391 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs; 9392 sema_v(&segvn_trasync_sem); 9393 } 9394 9395 if (!segvn_disable_textrepl_update && 9396 segvn_update_textrepl_interval != 0) { 9397 (void) timeout(segvn_trupdate_wakeup, dummy, 9398 segvn_update_textrepl_interval); 9399 } 9400 } 9401 9402 static void 9403 segvn_trupdate(void) 9404 { 9405 ulong_t hash; 9406 svntr_t *svntrp; 9407 segvn_data_t *svd; 9408 9409 ASSERT(svntr_hashtab != NULL); 9410 9411 for (hash = 0; hash < svntr_hashtab_sz; hash++) { 9412 mutex_enter(&svntr_hashtab[hash].tr_lock); 9413 svntrp = svntr_hashtab[hash].tr_head; 9414 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9415 ASSERT(svntrp->tr_refcnt != 0); 9416 svd = svntrp->tr_svnhead; 9417 for (; svd != NULL; svd = svd->svn_trnext) { 9418 segvn_trupdate_seg(svd->seg, svd, svntrp, 9419 hash); 9420 } 9421 } 9422 mutex_exit(&svntr_hashtab[hash].tr_lock); 9423 } 9424 } 9425 9426 static void 9427 segvn_trupdate_seg(struct seg *seg, 9428 segvn_data_t *svd, 9429 svntr_t *svntrp, 9430 ulong_t hash) 9431 { 9432 proc_t *p; 9433 lgrp_id_t lgrp_id; 9434 struct as *as; 9435 size_t size; 9436 struct anon_map *amp; 9437 9438 ASSERT(svd->vp != NULL); 9439 ASSERT(svd->vp == svntrp->tr_vp); 9440 ASSERT(svd->offset == svntrp->tr_off); 9441 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff); 9442 ASSERT(seg != NULL); 9443 ASSERT(svd->seg == seg); 9444 ASSERT(seg->s_data == (void *)svd); 9445 ASSERT(seg->s_szc == svntrp->tr_szc); 9446 ASSERT(svd->tr_state == SEGVN_TR_ON); 9447 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9448 ASSERT(svd->amp != NULL); 9449 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 9450 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE); 9451 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX); 9452 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp); 9453 ASSERT(svntrp->tr_refcnt != 0); 9454 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock)); 9455 9456 as = seg->s_as; 9457 ASSERT(as != NULL && as != &kas); 9458 p = as->a_proc; 9459 ASSERT(p != NULL); 9460 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 9461 lgrp_id = p->p_t1_lgrpid; 9462 if (lgrp_id == LGRP_NONE) { 9463 return; 9464 } 9465 ASSERT(lgrp_id < NLGRPS_MAX); 9466 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) { 9467 return; 9468 } 9469 9470 /* 9471 * Use tryenter locking since we are locking as/seg and svntr hash 9472 * lock in reverse from syncrounous thread order. 9473 */ 9474 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) { 9475 SEGVN_TR_ADDSTAT(nolock); 9476 if (segvn_lgrp_trthr_migrs_snpsht) { 9477 segvn_lgrp_trthr_migrs_snpsht = 0; 9478 } 9479 return; 9480 } 9481 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) { 9482 AS_LOCK_EXIT(as, &as->a_lock); 9483 SEGVN_TR_ADDSTAT(nolock); 9484 if (segvn_lgrp_trthr_migrs_snpsht) { 9485 segvn_lgrp_trthr_migrs_snpsht = 0; 9486 } 9487 return; 9488 } 9489 size = seg->s_size; 9490 if (svntrp->tr_amp[lgrp_id] == NULL) { 9491 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 9492 if (trmem > segvn_textrepl_max_bytes) { 9493 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9494 AS_LOCK_EXIT(as, &as->a_lock); 9495 atomic_add_long(&segvn_textrepl_bytes, -size); 9496 SEGVN_TR_ADDSTAT(normem); 9497 return; 9498 } 9499 if (anon_try_resv_zone(size, NULL) == 0) { 9500 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9501 AS_LOCK_EXIT(as, &as->a_lock); 9502 atomic_add_long(&segvn_textrepl_bytes, -size); 9503 SEGVN_TR_ADDSTAT(noanon); 9504 return; 9505 } 9506 amp = anonmap_alloc(size, size, KM_NOSLEEP); 9507 if (amp == NULL) { 9508 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9509 AS_LOCK_EXIT(as, &as->a_lock); 9510 atomic_add_long(&segvn_textrepl_bytes, -size); 9511 anon_unresv_zone(size, NULL); 9512 SEGVN_TR_ADDSTAT(nokmem); 9513 return; 9514 } 9515 ASSERT(amp->refcnt == 1); 9516 amp->a_szc = seg->s_szc; 9517 svntrp->tr_amp[lgrp_id] = amp; 9518 } 9519 /* 9520 * We don't need to drop the bucket lock but here we give other 9521 * threads a chance. svntr and svd can't be unlinked as long as 9522 * segment lock is held as a writer and AS held as well. After we 9523 * retake bucket lock we'll continue from where we left. We'll be able 9524 * to reach the end of either list since new entries are always added 9525 * to the beginning of the lists. 9526 */ 9527 mutex_exit(&svntr_hashtab[hash].tr_lock); 9528 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL); 9529 mutex_enter(&svntr_hashtab[hash].tr_lock); 9530 9531 ASSERT(svd->tr_state == SEGVN_TR_ON); 9532 ASSERT(svd->amp != NULL); 9533 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 9534 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id); 9535 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]); 9536 9537 svd->tr_policy_info.mem_lgrpid = lgrp_id; 9538 svd->amp = svntrp->tr_amp[lgrp_id]; 9539 p->p_tr_lgrpid = NLGRPS_MAX; 9540 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9541 AS_LOCK_EXIT(as, &as->a_lock); 9542 9543 ASSERT(svntrp->tr_refcnt != 0); 9544 ASSERT(svd->vp == svntrp->tr_vp); 9545 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id); 9546 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]); 9547 ASSERT(svd->seg == seg); 9548 ASSERT(svd->tr_state == SEGVN_TR_ON); 9549 9550 SEGVN_TR_ADDSTAT(asyncrepl); 9551 } 9552