1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * University Copyright- Copyright (c) 1982, 1986, 1988 31 * The Regents of the University of California 32 * All Rights Reserved 33 * 34 * University Acknowledgment- Portions of this document are derived from 35 * software developed by the University of California, Berkeley, and its 36 * contributors. 37 */ 38 39 #pragma ident "%Z%%M% %I% %E% SMI" 40 41 /* 42 * VM - shared or copy-on-write from a vnode/anonymous memory. 43 */ 44 45 #include <sys/types.h> 46 #include <sys/param.h> 47 #include <sys/t_lock.h> 48 #include <sys/errno.h> 49 #include <sys/systm.h> 50 #include <sys/mman.h> 51 #include <sys/debug.h> 52 #include <sys/cred.h> 53 #include <sys/vmsystm.h> 54 #include <sys/tuneable.h> 55 #include <sys/bitmap.h> 56 #include <sys/swap.h> 57 #include <sys/kmem.h> 58 #include <sys/sysmacros.h> 59 #include <sys/vtrace.h> 60 #include <sys/cmn_err.h> 61 #include <sys/callb.h> 62 #include <sys/vm.h> 63 #include <sys/dumphdr.h> 64 #include <sys/lgrp.h> 65 66 #include <vm/hat.h> 67 #include <vm/as.h> 68 #include <vm/seg.h> 69 #include <vm/seg_vn.h> 70 #include <vm/pvn.h> 71 #include <vm/anon.h> 72 #include <vm/page.h> 73 #include <vm/vpage.h> 74 #include <sys/proc.h> 75 #include <sys/task.h> 76 #include <sys/project.h> 77 #include <sys/zone.h> 78 #include <sys/shm_impl.h> 79 /* 80 * Private seg op routines. 81 */ 82 static int segvn_dup(struct seg *seg, struct seg *newseg); 83 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len); 84 static void segvn_free(struct seg *seg); 85 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg, 86 caddr_t addr, size_t len, enum fault_type type, 87 enum seg_rw rw); 88 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr); 89 static int segvn_setprot(struct seg *seg, caddr_t addr, 90 size_t len, uint_t prot); 91 static int segvn_checkprot(struct seg *seg, caddr_t addr, 92 size_t len, uint_t prot); 93 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 94 static size_t segvn_swapout(struct seg *seg); 95 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len, 96 int attr, uint_t flags); 97 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len, 98 char *vec); 99 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 100 int attr, int op, ulong_t *lockmap, size_t pos); 101 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len, 102 uint_t *protv); 103 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr); 104 static int segvn_gettype(struct seg *seg, caddr_t addr); 105 static int segvn_getvp(struct seg *seg, caddr_t addr, 106 struct vnode **vpp); 107 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len, 108 uint_t behav); 109 static void segvn_dump(struct seg *seg); 110 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, 111 struct page ***ppp, enum lock_type type, enum seg_rw rw); 112 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, 113 uint_t szc); 114 static int segvn_getmemid(struct seg *seg, caddr_t addr, 115 memid_t *memidp); 116 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t); 117 static int segvn_capable(struct seg *seg, segcapability_t capable); 118 119 struct seg_ops segvn_ops = { 120 segvn_dup, 121 segvn_unmap, 122 segvn_free, 123 segvn_fault, 124 segvn_faulta, 125 segvn_setprot, 126 segvn_checkprot, 127 segvn_kluster, 128 segvn_swapout, 129 segvn_sync, 130 segvn_incore, 131 segvn_lockop, 132 segvn_getprot, 133 segvn_getoffset, 134 segvn_gettype, 135 segvn_getvp, 136 segvn_advise, 137 segvn_dump, 138 segvn_pagelock, 139 segvn_setpagesize, 140 segvn_getmemid, 141 segvn_getpolicy, 142 segvn_capable, 143 }; 144 145 /* 146 * Common zfod structures, provided as a shorthand for others to use. 147 */ 148 static segvn_crargs_t zfod_segvn_crargs = 149 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL); 150 static segvn_crargs_t kzfod_segvn_crargs = 151 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER, 152 PROT_ALL & ~PROT_USER); 153 static segvn_crargs_t stack_noexec_crargs = 154 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL); 155 156 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */ 157 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */ 158 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */ 159 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */ 160 161 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */ 162 163 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */ 164 165 static int segvn_concat(struct seg *, struct seg *, int); 166 static int segvn_extend_prev(struct seg *, struct seg *, 167 struct segvn_crargs *, size_t); 168 static int segvn_extend_next(struct seg *, struct seg *, 169 struct segvn_crargs *, size_t); 170 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw); 171 static void segvn_pagelist_rele(page_t **); 172 static void segvn_setvnode_mpss(vnode_t *); 173 static void segvn_relocate_pages(page_t **, page_t *); 174 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *); 175 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t, 176 uint_t, page_t **, page_t **, uint_t *, int *); 177 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t, 178 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 179 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t, 180 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 181 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t, 182 u_offset_t, struct vpage *, page_t **, uint_t, 183 enum fault_type, enum seg_rw, int, int); 184 static void segvn_vpage(struct seg *); 185 186 static void segvn_purge(struct seg *seg); 187 static int segvn_reclaim(struct seg *, caddr_t, size_t, struct page **, 188 enum seg_rw); 189 190 static int sameprot(struct seg *, caddr_t, size_t); 191 192 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t); 193 static int segvn_clrszc(struct seg *); 194 static struct seg *segvn_split_seg(struct seg *, caddr_t); 195 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t, 196 ulong_t, uint_t); 197 198 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t, 199 size_t, void *, u_offset_t); 200 201 static int segvn_pp_lock_anonpages(page_t *, int); 202 static void segvn_pp_unlock_anonpages(page_t *, int); 203 204 static struct kmem_cache *segvn_cache; 205 206 #ifdef VM_STATS 207 static struct segvnvmstats_str { 208 ulong_t fill_vp_pages[31]; 209 ulong_t fltvnpages[49]; 210 ulong_t fullszcpages[10]; 211 ulong_t relocatepages[3]; 212 ulong_t fltanpages[17]; 213 ulong_t pagelock[3]; 214 ulong_t demoterange[3]; 215 } segvnvmstats; 216 #endif /* VM_STATS */ 217 218 #define SDR_RANGE 1 /* demote entire range */ 219 #define SDR_END 2 /* demote non aligned ends only */ 220 221 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \ 222 if ((len) != 0) { \ 223 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \ 224 ASSERT(lpgaddr >= (seg)->s_base); \ 225 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \ 226 (len)), pgsz); \ 227 ASSERT(lpgeaddr > lpgaddr); \ 228 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \ 229 } else { \ 230 lpgeaddr = lpgaddr = (addr); \ 231 } \ 232 } 233 234 /*ARGSUSED*/ 235 static int 236 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags) 237 { 238 struct segvn_data *svd = buf; 239 240 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL); 241 mutex_init(&svd->segp_slock, NULL, MUTEX_DEFAULT, NULL); 242 svd->svn_trnext = svd->svn_trprev = NULL; 243 return (0); 244 } 245 246 /*ARGSUSED1*/ 247 static void 248 segvn_cache_destructor(void *buf, void *cdrarg) 249 { 250 struct segvn_data *svd = buf; 251 252 rw_destroy(&svd->lock); 253 mutex_destroy(&svd->segp_slock); 254 } 255 256 /*ARGSUSED*/ 257 static int 258 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags) 259 { 260 bzero(buf, sizeof (svntr_t)); 261 return (0); 262 } 263 264 /* 265 * Patching this variable to non-zero allows the system to run with 266 * stacks marked as "not executable". It's a bit of a kludge, but is 267 * provided as a tweakable for platforms that export those ABIs 268 * (e.g. sparc V8) that have executable stacks enabled by default. 269 * There are also some restrictions for platforms that don't actually 270 * implement 'noexec' protections. 271 * 272 * Once enabled, the system is (therefore) unable to provide a fully 273 * ABI-compliant execution environment, though practically speaking, 274 * most everything works. The exceptions are generally some interpreters 275 * and debuggers that create executable code on the stack and jump 276 * into it (without explicitly mprotecting the address range to include 277 * PROT_EXEC). 278 * 279 * One important class of applications that are disabled are those 280 * that have been transformed into malicious agents using one of the 281 * numerous "buffer overflow" attacks. See 4007890. 282 */ 283 int noexec_user_stack = 0; 284 int noexec_user_stack_log = 1; 285 286 int segvn_lpg_disable = 0; 287 uint_t segvn_maxpgszc = 0; 288 289 ulong_t segvn_vmpss_clrszc_cnt; 290 ulong_t segvn_vmpss_clrszc_err; 291 ulong_t segvn_fltvnpages_clrszc_cnt; 292 ulong_t segvn_fltvnpages_clrszc_err; 293 ulong_t segvn_setpgsz_align_err; 294 ulong_t segvn_setpgsz_anon_align_err; 295 ulong_t segvn_setpgsz_getattr_err; 296 ulong_t segvn_setpgsz_eof_err; 297 ulong_t segvn_faultvnmpss_align_err1; 298 ulong_t segvn_faultvnmpss_align_err2; 299 ulong_t segvn_faultvnmpss_align_err3; 300 ulong_t segvn_faultvnmpss_align_err4; 301 ulong_t segvn_faultvnmpss_align_err5; 302 ulong_t segvn_vmpss_pageio_deadlk_err; 303 304 int segvn_use_regions = 1; 305 306 /* 307 * Segvn supports text replication optimization for NUMA platforms. Text 308 * replica's are represented by anon maps (amp). There's one amp per text file 309 * region per lgroup. A process chooses the amp for each of its text mappings 310 * based on the lgroup assignment of its main thread (t_tid = 1). All 311 * processes that want a replica on a particular lgroup for the same text file 312 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table 313 * with vp,off,size,szc used as a key. Text replication segments are read only 314 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by 315 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode 316 * pages. Replication amp is assigned to a segment when it gets its first 317 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread 318 * rechecks periodically if the process still maps an amp local to the main 319 * thread. If not async thread forces process to remap to an amp in the new 320 * home lgroup of the main thread. Current text replication implementation 321 * only provides the benefit to workloads that do most of their work in the 322 * main thread of a process or all the threads of a process run in the same 323 * lgroup. To extend text replication benefit to different types of 324 * multithreaded workloads further work would be needed in the hat layer to 325 * allow the same virtual address in the same hat to simultaneously map 326 * different physical addresses (i.e. page table replication would be needed 327 * for x86). 328 * 329 * amp pages are used instead of vnode pages as long as segment has a very 330 * simple life cycle. It's created via segvn_create(), handles S_EXEC 331 * (S_READ) pagefaults and is fully unmapped. If anything more complicated 332 * happens such as protection is changed, real COW fault happens, pagesize is 333 * changed, MC_LOCK is requested or segment is partially unmapped we turn off 334 * text replication by converting the segment back to vnode only segment 335 * (unmap segment's address range and set svd->amp to NULL). 336 * 337 * The original file can be changed after amp is inserted into 338 * svntr_hashtab. Processes that are launched after the file is already 339 * changed can't use the replica's created prior to the file change. To 340 * implement this functionality hash entries are timestamped. Replica's can 341 * only be used if current file modification time is the same as the timestamp 342 * saved when hash entry was created. However just timestamps alone are not 343 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We 344 * deal with file changes via MAP_SHARED mappings differently. When writable 345 * MAP_SHARED mappings are created to vnodes marked as executable we mark all 346 * existing replica's for this vnode as not usable for future text 347 * mappings. And we don't create new replica's for files that currently have 348 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is 349 * true). 350 */ 351 352 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20) 353 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR; 354 355 static ulong_t svntr_hashtab_sz = 512; 356 static svntr_bucket_t *svntr_hashtab = NULL; 357 static struct kmem_cache *svntr_cache; 358 static svntr_stats_t *segvn_textrepl_stats; 359 static ksema_t segvn_trasync_sem; 360 361 int segvn_disable_textrepl = 0; 362 size_t textrepl_size_thresh = (size_t)-1; 363 size_t segvn_textrepl_bytes = 0; 364 size_t segvn_textrepl_max_bytes = 0; 365 clock_t segvn_update_textrepl_interval = 0; 366 int segvn_update_tr_time = 10; 367 int segvn_disable_textrepl_update = 0; 368 369 static void segvn_textrepl(struct seg *); 370 static void segvn_textunrepl(struct seg *, int); 371 static void segvn_inval_trcache(vnode_t *); 372 static void segvn_trasync_thread(void); 373 static void segvn_trupdate_wakeup(void *); 374 static void segvn_trupdate(void); 375 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *, 376 ulong_t); 377 378 /* 379 * Initialize segvn data structures 380 */ 381 void 382 segvn_init(void) 383 { 384 uint_t maxszc; 385 uint_t szc; 386 size_t pgsz; 387 388 segvn_cache = kmem_cache_create("segvn_cache", 389 sizeof (struct segvn_data), 0, 390 segvn_cache_constructor, segvn_cache_destructor, NULL, 391 NULL, NULL, 0); 392 393 if (segvn_lpg_disable != 0) 394 return; 395 szc = maxszc = page_num_pagesizes() - 1; 396 if (szc == 0) { 397 segvn_lpg_disable = 1; 398 return; 399 } 400 if (page_get_pagesize(0) != PAGESIZE) { 401 panic("segvn_init: bad szc 0"); 402 /*NOTREACHED*/ 403 } 404 while (szc != 0) { 405 pgsz = page_get_pagesize(szc); 406 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) { 407 panic("segvn_init: bad szc %d", szc); 408 /*NOTREACHED*/ 409 } 410 szc--; 411 } 412 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc) 413 segvn_maxpgszc = maxszc; 414 415 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL)) 416 segvn_use_regions = 0; 417 418 /* 419 * For now shared regions and text replication segvn support 420 * are mutually exclusive. This is acceptable because 421 * currently significant benefit from text replication was 422 * only observed on AMD64 NUMA platforms (due to relatively 423 * small L2$ size) and currently we don't support shared 424 * regions on x86. 425 */ 426 if (segvn_use_regions && !segvn_disable_textrepl) { 427 segvn_disable_textrepl = 1; 428 } 429 430 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 && 431 !segvn_disable_textrepl) { 432 ulong_t i; 433 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t); 434 435 svntr_cache = kmem_cache_create("svntr_cache", 436 sizeof (svntr_t), 0, svntr_cache_constructor, NULL, 437 NULL, NULL, NULL, 0); 438 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP); 439 for (i = 0; i < svntr_hashtab_sz; i++) { 440 mutex_init(&svntr_hashtab[i].tr_lock, NULL, 441 MUTEX_DEFAULT, NULL); 442 } 443 segvn_textrepl_max_bytes = ptob(physmem) / 444 segvn_textrepl_max_bytes_factor; 445 segvn_textrepl_stats = kmem_zalloc(NCPU * 446 sizeof (svntr_stats_t), KM_SLEEP); 447 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL); 448 (void) thread_create(NULL, 0, segvn_trasync_thread, 449 NULL, 0, &p0, TS_RUN, minclsyspri); 450 } 451 } 452 453 #define SEGVN_PAGEIO ((void *)0x1) 454 #define SEGVN_NOPAGEIO ((void *)0x2) 455 456 static void 457 segvn_setvnode_mpss(vnode_t *vp) 458 { 459 int err; 460 461 ASSERT(vp->v_mpssdata == NULL || 462 vp->v_mpssdata == SEGVN_PAGEIO || 463 vp->v_mpssdata == SEGVN_NOPAGEIO); 464 465 if (vp->v_mpssdata == NULL) { 466 if (vn_vmpss_usepageio(vp)) { 467 err = VOP_PAGEIO(vp, (page_t *)NULL, 468 (u_offset_t)0, 0, 0, CRED()); 469 } else { 470 err = ENOSYS; 471 } 472 /* 473 * set v_mpssdata just once per vnode life 474 * so that it never changes. 475 */ 476 mutex_enter(&vp->v_lock); 477 if (vp->v_mpssdata == NULL) { 478 if (err == EINVAL) { 479 vp->v_mpssdata = SEGVN_PAGEIO; 480 } else { 481 vp->v_mpssdata = SEGVN_NOPAGEIO; 482 } 483 } 484 mutex_exit(&vp->v_lock); 485 } 486 } 487 488 int 489 segvn_create(struct seg *seg, void *argsp) 490 { 491 struct segvn_crargs *a = (struct segvn_crargs *)argsp; 492 struct segvn_data *svd; 493 size_t swresv = 0; 494 struct cred *cred; 495 struct anon_map *amp; 496 int error = 0; 497 size_t pgsz; 498 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT; 499 int use_rgn = 0; 500 int trok = 0; 501 502 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 503 504 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) { 505 panic("segvn_create type"); 506 /*NOTREACHED*/ 507 } 508 509 /* 510 * Check arguments. If a shared anon structure is given then 511 * it is illegal to also specify a vp. 512 */ 513 if (a->amp != NULL && a->vp != NULL) { 514 panic("segvn_create anon_map"); 515 /*NOTREACHED*/ 516 } 517 518 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) && 519 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) && 520 segvn_use_regions) { 521 use_rgn = 1; 522 } 523 524 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */ 525 if (a->type == MAP_SHARED) 526 a->flags &= ~MAP_NORESERVE; 527 528 if (a->szc != 0) { 529 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) || 530 (a->amp != NULL && a->type == MAP_PRIVATE) || 531 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) { 532 a->szc = 0; 533 } else { 534 if (a->szc > segvn_maxpgszc) 535 a->szc = segvn_maxpgszc; 536 pgsz = page_get_pagesize(a->szc); 537 if (!IS_P2ALIGNED(seg->s_base, pgsz) || 538 !IS_P2ALIGNED(seg->s_size, pgsz)) { 539 a->szc = 0; 540 } else if (a->vp != NULL) { 541 extern struct vnode kvp; 542 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) { 543 /* 544 * paranoid check. 545 * hat_page_demote() is not supported 546 * on swapfs pages. 547 */ 548 a->szc = 0; 549 } else if (map_addr_vacalign_check(seg->s_base, 550 a->offset & PAGEMASK)) { 551 a->szc = 0; 552 } 553 } else if (a->amp != NULL) { 554 pgcnt_t anum = btopr(a->offset); 555 pgcnt_t pgcnt = page_get_pagecnt(a->szc); 556 if (!IS_P2ALIGNED(anum, pgcnt)) { 557 a->szc = 0; 558 } 559 } 560 } 561 } 562 563 /* 564 * If segment may need private pages, reserve them now. 565 */ 566 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) || 567 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) { 568 if (anon_resv(seg->s_size) == 0) 569 return (EAGAIN); 570 swresv = seg->s_size; 571 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 572 seg, swresv, 1); 573 } 574 575 /* 576 * Reserve any mapping structures that may be required. 577 * 578 * Don't do it for segments that may use regions. It's currently a 579 * noop in the hat implementations anyway. 580 */ 581 if (!use_rgn) { 582 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP); 583 } 584 585 if (a->cred) { 586 cred = a->cred; 587 crhold(cred); 588 } else { 589 crhold(cred = CRED()); 590 } 591 592 /* Inform the vnode of the new mapping */ 593 if (a->vp != NULL) { 594 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK, 595 seg->s_as, seg->s_base, seg->s_size, a->prot, 596 a->maxprot, a->type, cred); 597 if (error) { 598 if (swresv != 0) { 599 anon_unresv(swresv); 600 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 601 "anon proc:%p %lu %u", 602 seg, swresv, 0); 603 } 604 crfree(cred); 605 if (!use_rgn) { 606 hat_unload(seg->s_as->a_hat, seg->s_base, 607 seg->s_size, HAT_UNLOAD_UNMAP); 608 } 609 return (error); 610 } 611 /* 612 * svntr_hashtab will be NULL if we support shared regions. 613 */ 614 trok = ((a->flags & MAP_TEXT) && 615 (seg->s_size > textrepl_size_thresh || 616 (a->flags & _MAP_TEXTREPL)) && 617 lgrp_optimizations() && svntr_hashtab != NULL && 618 a->type == MAP_PRIVATE && swresv == 0 && 619 !(a->flags & MAP_NORESERVE) && 620 seg->s_as != &kas && a->vp->v_type == VREG); 621 622 ASSERT(!trok || !use_rgn); 623 } 624 625 /* 626 * If more than one segment in the address space, and they're adjacent 627 * virtually, try to concatenate them. Don't concatenate if an 628 * explicit anon_map structure was supplied (e.g., SystemV shared 629 * memory) or if we'll use text replication for this segment. 630 */ 631 if (a->amp == NULL && !use_rgn && !trok) { 632 struct seg *pseg, *nseg; 633 struct segvn_data *psvd, *nsvd; 634 lgrp_mem_policy_t ppolicy, npolicy; 635 uint_t lgrp_mem_policy_flags = 0; 636 extern lgrp_mem_policy_t lgrp_mem_default_policy; 637 638 /* 639 * Memory policy flags (lgrp_mem_policy_flags) is valid when 640 * extending stack/heap segments. 641 */ 642 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) && 643 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) { 644 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags; 645 } else { 646 /* 647 * Get policy when not extending it from another segment 648 */ 649 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type); 650 } 651 652 /* 653 * First, try to concatenate the previous and new segments 654 */ 655 pseg = AS_SEGPREV(seg->s_as, seg); 656 if (pseg != NULL && 657 pseg->s_base + pseg->s_size == seg->s_base && 658 pseg->s_ops == &segvn_ops) { 659 /* 660 * Get memory allocation policy from previous segment. 661 * When extension is specified (e.g. for heap) apply 662 * this policy to the new segment regardless of the 663 * outcome of segment concatenation. Extension occurs 664 * for non-default policy otherwise default policy is 665 * used and is based on extended segment size. 666 */ 667 psvd = (struct segvn_data *)pseg->s_data; 668 ppolicy = psvd->policy_info.mem_policy; 669 if (lgrp_mem_policy_flags == 670 LGRP_MP_FLAG_EXTEND_UP) { 671 if (ppolicy != lgrp_mem_default_policy) { 672 mpolicy = ppolicy; 673 } else { 674 mpolicy = lgrp_mem_policy_default( 675 pseg->s_size + seg->s_size, 676 a->type); 677 } 678 } 679 680 if (mpolicy == ppolicy && 681 (pseg->s_size + seg->s_size <= 682 segvn_comb_thrshld || psvd->amp == NULL) && 683 segvn_extend_prev(pseg, seg, a, swresv) == 0) { 684 /* 685 * success! now try to concatenate 686 * with following seg 687 */ 688 crfree(cred); 689 nseg = AS_SEGNEXT(pseg->s_as, pseg); 690 if (nseg != NULL && 691 nseg != pseg && 692 nseg->s_ops == &segvn_ops && 693 pseg->s_base + pseg->s_size == 694 nseg->s_base) 695 (void) segvn_concat(pseg, nseg, 0); 696 ASSERT(pseg->s_szc == 0 || 697 (a->szc == pseg->s_szc && 698 IS_P2ALIGNED(pseg->s_base, pgsz) && 699 IS_P2ALIGNED(pseg->s_size, pgsz))); 700 return (0); 701 } 702 } 703 704 /* 705 * Failed, so try to concatenate with following seg 706 */ 707 nseg = AS_SEGNEXT(seg->s_as, seg); 708 if (nseg != NULL && 709 seg->s_base + seg->s_size == nseg->s_base && 710 nseg->s_ops == &segvn_ops) { 711 /* 712 * Get memory allocation policy from next segment. 713 * When extension is specified (e.g. for stack) apply 714 * this policy to the new segment regardless of the 715 * outcome of segment concatenation. Extension occurs 716 * for non-default policy otherwise default policy is 717 * used and is based on extended segment size. 718 */ 719 nsvd = (struct segvn_data *)nseg->s_data; 720 npolicy = nsvd->policy_info.mem_policy; 721 if (lgrp_mem_policy_flags == 722 LGRP_MP_FLAG_EXTEND_DOWN) { 723 if (npolicy != lgrp_mem_default_policy) { 724 mpolicy = npolicy; 725 } else { 726 mpolicy = lgrp_mem_policy_default( 727 nseg->s_size + seg->s_size, 728 a->type); 729 } 730 } 731 732 if (mpolicy == npolicy && 733 segvn_extend_next(seg, nseg, a, swresv) == 0) { 734 crfree(cred); 735 ASSERT(nseg->s_szc == 0 || 736 (a->szc == nseg->s_szc && 737 IS_P2ALIGNED(nseg->s_base, pgsz) && 738 IS_P2ALIGNED(nseg->s_size, pgsz))); 739 return (0); 740 } 741 } 742 } 743 744 if (a->vp != NULL) { 745 VN_HOLD(a->vp); 746 if (a->type == MAP_SHARED) 747 lgrp_shm_policy_init(NULL, a->vp); 748 } 749 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 750 751 seg->s_ops = &segvn_ops; 752 seg->s_data = (void *)svd; 753 seg->s_szc = a->szc; 754 755 svd->seg = seg; 756 svd->vp = a->vp; 757 /* 758 * Anonymous mappings have no backing file so the offset is meaningless. 759 */ 760 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0; 761 svd->prot = a->prot; 762 svd->maxprot = a->maxprot; 763 svd->pageprot = 0; 764 svd->type = a->type; 765 svd->vpage = NULL; 766 svd->cred = cred; 767 svd->advice = MADV_NORMAL; 768 svd->pageadvice = 0; 769 svd->flags = (ushort_t)a->flags; 770 svd->softlockcnt = 0; 771 svd->rcookie = HAT_INVALID_REGION_COOKIE; 772 773 if (a->szc != 0 && a->vp != NULL) { 774 segvn_setvnode_mpss(a->vp); 775 } 776 if (svd->type == MAP_SHARED && svd->vp != NULL && 777 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) { 778 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 779 segvn_inval_trcache(svd->vp); 780 } 781 782 amp = a->amp; 783 if ((svd->amp = amp) == NULL) { 784 svd->anon_index = 0; 785 if (svd->type == MAP_SHARED) { 786 svd->swresv = 0; 787 /* 788 * Shared mappings to a vp need no other setup. 789 * If we have a shared mapping to an anon_map object 790 * which hasn't been allocated yet, allocate the 791 * struct now so that it will be properly shared 792 * by remembering the swap reservation there. 793 */ 794 if (a->vp == NULL) { 795 svd->amp = anonmap_alloc(seg->s_size, swresv, 796 ANON_SLEEP); 797 svd->amp->a_szc = seg->s_szc; 798 } 799 } else { 800 /* 801 * Private mapping (with or without a vp). 802 * Allocate anon_map when needed. 803 */ 804 svd->swresv = swresv; 805 } 806 } else { 807 pgcnt_t anon_num; 808 809 /* 810 * Mapping to an existing anon_map structure without a vp. 811 * For now we will insure that the segment size isn't larger 812 * than the size - offset gives us. Later on we may wish to 813 * have the anon array dynamically allocated itself so that 814 * we don't always have to allocate all the anon pointer slots. 815 * This of course involves adding extra code to check that we 816 * aren't trying to use an anon pointer slot beyond the end 817 * of the currently allocated anon array. 818 */ 819 if ((amp->size - a->offset) < seg->s_size) { 820 panic("segvn_create anon_map size"); 821 /*NOTREACHED*/ 822 } 823 824 anon_num = btopr(a->offset); 825 826 if (a->type == MAP_SHARED) { 827 /* 828 * SHARED mapping to a given anon_map. 829 */ 830 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 831 amp->refcnt++; 832 if (a->szc > amp->a_szc) { 833 amp->a_szc = a->szc; 834 } 835 ANON_LOCK_EXIT(&->a_rwlock); 836 svd->anon_index = anon_num; 837 svd->swresv = 0; 838 } else { 839 /* 840 * PRIVATE mapping to a given anon_map. 841 * Make sure that all the needed anon 842 * structures are created (so that we will 843 * share the underlying pages if nothing 844 * is written by this mapping) and then 845 * duplicate the anon array as is done 846 * when a privately mapped segment is dup'ed. 847 */ 848 struct anon *ap; 849 caddr_t addr; 850 caddr_t eaddr; 851 ulong_t anon_idx; 852 int hat_flag = HAT_LOAD; 853 854 if (svd->flags & MAP_TEXT) { 855 hat_flag |= HAT_LOAD_TEXT; 856 } 857 858 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 859 svd->amp->a_szc = seg->s_szc; 860 svd->anon_index = 0; 861 svd->swresv = swresv; 862 863 /* 864 * Prevent 2 threads from allocating anon 865 * slots simultaneously. 866 */ 867 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 868 eaddr = seg->s_base + seg->s_size; 869 870 for (anon_idx = anon_num, addr = seg->s_base; 871 addr < eaddr; addr += PAGESIZE, anon_idx++) { 872 page_t *pp; 873 874 if ((ap = anon_get_ptr(amp->ahp, 875 anon_idx)) != NULL) 876 continue; 877 878 /* 879 * Allocate the anon struct now. 880 * Might as well load up translation 881 * to the page while we're at it... 882 */ 883 pp = anon_zero(seg, addr, &ap, cred); 884 if (ap == NULL || pp == NULL) { 885 panic("segvn_create anon_zero"); 886 /*NOTREACHED*/ 887 } 888 889 /* 890 * Re-acquire the anon_map lock and 891 * initialize the anon array entry. 892 */ 893 ASSERT(anon_get_ptr(amp->ahp, 894 anon_idx) == NULL); 895 (void) anon_set_ptr(amp->ahp, anon_idx, ap, 896 ANON_SLEEP); 897 898 ASSERT(seg->s_szc == 0); 899 ASSERT(!IS_VMODSORT(pp->p_vnode)); 900 901 ASSERT(use_rgn == 0); 902 hat_memload(seg->s_as->a_hat, addr, pp, 903 svd->prot & ~PROT_WRITE, hat_flag); 904 905 page_unlock(pp); 906 } 907 ASSERT(seg->s_szc == 0); 908 anon_dup(amp->ahp, anon_num, svd->amp->ahp, 909 0, seg->s_size); 910 ANON_LOCK_EXIT(&->a_rwlock); 911 } 912 } 913 914 /* 915 * Set default memory allocation policy for segment 916 * 917 * Always set policy for private memory at least for initialization 918 * even if this is a shared memory segment 919 */ 920 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size); 921 922 if (svd->type == MAP_SHARED) 923 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index, 924 svd->vp, svd->offset, seg->s_size); 925 926 if (use_rgn) { 927 ASSERT(!trok); 928 ASSERT(svd->amp == NULL); 929 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base, 930 seg->s_size, (void *)svd->vp, svd->offset, svd->prot, 931 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback, 932 HAT_REGION_TEXT); 933 } 934 935 ASSERT(!trok || !(svd->prot & PROT_WRITE)); 936 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF; 937 938 return (0); 939 } 940 941 /* 942 * Concatenate two existing segments, if possible. 943 * Return 0 on success, -1 if two segments are not compatible 944 * or -2 on memory allocation failure. 945 * If amp_cat == 1 then try and concat segments with anon maps 946 */ 947 static int 948 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat) 949 { 950 struct segvn_data *svd1 = seg1->s_data; 951 struct segvn_data *svd2 = seg2->s_data; 952 struct anon_map *amp1 = svd1->amp; 953 struct anon_map *amp2 = svd2->amp; 954 struct vpage *vpage1 = svd1->vpage; 955 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL; 956 size_t size, nvpsize; 957 pgcnt_t npages1, npages2; 958 959 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as); 960 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 961 ASSERT(seg1->s_ops == seg2->s_ops); 962 963 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) || 964 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 965 return (-1); 966 } 967 968 /* both segments exist, try to merge them */ 969 #define incompat(x) (svd1->x != svd2->x) 970 if (incompat(vp) || incompat(maxprot) || 971 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) || 972 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) || 973 incompat(type) || incompat(cred) || incompat(flags) || 974 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) || 975 (svd2->softlockcnt > 0)) 976 return (-1); 977 #undef incompat 978 979 /* 980 * vp == NULL implies zfod, offset doesn't matter 981 */ 982 if (svd1->vp != NULL && 983 svd1->offset + seg1->s_size != svd2->offset) { 984 return (-1); 985 } 986 987 /* 988 * Don't concatenate if either segment uses text replication. 989 */ 990 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) { 991 return (-1); 992 } 993 994 /* 995 * Fail early if we're not supposed to concatenate 996 * segments with non NULL amp. 997 */ 998 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) { 999 return (-1); 1000 } 1001 1002 if (svd1->vp == NULL && svd1->type == MAP_SHARED) { 1003 if (amp1 != amp2) { 1004 return (-1); 1005 } 1006 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) != 1007 svd2->anon_index) { 1008 return (-1); 1009 } 1010 ASSERT(amp1 == NULL || amp1->refcnt >= 2); 1011 } 1012 1013 /* 1014 * If either seg has vpages, create a new merged vpage array. 1015 */ 1016 if (vpage1 != NULL || vpage2 != NULL) { 1017 struct vpage *vp; 1018 1019 npages1 = seg_pages(seg1); 1020 npages2 = seg_pages(seg2); 1021 nvpsize = vpgtob(npages1 + npages2); 1022 1023 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) { 1024 return (-2); 1025 } 1026 1027 if (vpage1 != NULL) { 1028 bcopy(vpage1, nvpage, vpgtob(npages1)); 1029 } else { 1030 for (vp = nvpage; vp < nvpage + npages1; vp++) { 1031 VPP_SETPROT(vp, svd1->prot); 1032 VPP_SETADVICE(vp, svd1->advice); 1033 } 1034 } 1035 1036 if (vpage2 != NULL) { 1037 bcopy(vpage2, nvpage + npages1, vpgtob(npages2)); 1038 } else { 1039 for (vp = nvpage + npages1; 1040 vp < nvpage + npages1 + npages2; vp++) { 1041 VPP_SETPROT(vp, svd2->prot); 1042 VPP_SETADVICE(vp, svd2->advice); 1043 } 1044 } 1045 } 1046 1047 /* 1048 * If either segment has private pages, create a new merged anon 1049 * array. If mergeing shared anon segments just decrement anon map's 1050 * refcnt. 1051 */ 1052 if (amp1 != NULL && svd1->type == MAP_SHARED) { 1053 ASSERT(amp1 == amp2 && svd1->vp == NULL); 1054 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1055 ASSERT(amp1->refcnt >= 2); 1056 amp1->refcnt--; 1057 ANON_LOCK_EXIT(&1->a_rwlock); 1058 svd2->amp = NULL; 1059 } else if (amp1 != NULL || amp2 != NULL) { 1060 struct anon_hdr *nahp; 1061 struct anon_map *namp = NULL; 1062 size_t asize; 1063 1064 ASSERT(svd1->type == MAP_PRIVATE); 1065 1066 asize = seg1->s_size + seg2->s_size; 1067 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) { 1068 if (nvpage != NULL) { 1069 kmem_free(nvpage, nvpsize); 1070 } 1071 return (-2); 1072 } 1073 if (amp1 != NULL) { 1074 /* 1075 * XXX anon rwlock is not really needed because 1076 * this is a private segment and we are writers. 1077 */ 1078 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1079 ASSERT(amp1->refcnt == 1); 1080 if (anon_copy_ptr(amp1->ahp, svd1->anon_index, 1081 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) { 1082 anon_release(nahp, btop(asize)); 1083 ANON_LOCK_EXIT(&1->a_rwlock); 1084 if (nvpage != NULL) { 1085 kmem_free(nvpage, nvpsize); 1086 } 1087 return (-2); 1088 } 1089 } 1090 if (amp2 != NULL) { 1091 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1092 ASSERT(amp2->refcnt == 1); 1093 if (anon_copy_ptr(amp2->ahp, svd2->anon_index, 1094 nahp, btop(seg1->s_size), btop(seg2->s_size), 1095 ANON_NOSLEEP)) { 1096 anon_release(nahp, btop(asize)); 1097 ANON_LOCK_EXIT(&2->a_rwlock); 1098 if (amp1 != NULL) { 1099 ANON_LOCK_EXIT(&1->a_rwlock); 1100 } 1101 if (nvpage != NULL) { 1102 kmem_free(nvpage, nvpsize); 1103 } 1104 return (-2); 1105 } 1106 } 1107 if (amp1 != NULL) { 1108 namp = amp1; 1109 anon_release(amp1->ahp, btop(amp1->size)); 1110 } 1111 if (amp2 != NULL) { 1112 if (namp == NULL) { 1113 ASSERT(amp1 == NULL); 1114 namp = amp2; 1115 anon_release(amp2->ahp, btop(amp2->size)); 1116 } else { 1117 amp2->refcnt--; 1118 ANON_LOCK_EXIT(&2->a_rwlock); 1119 anonmap_free(amp2); 1120 } 1121 svd2->amp = NULL; /* needed for seg_free */ 1122 } 1123 namp->ahp = nahp; 1124 namp->size = asize; 1125 svd1->amp = namp; 1126 svd1->anon_index = 0; 1127 ANON_LOCK_EXIT(&namp->a_rwlock); 1128 } 1129 /* 1130 * Now free the old vpage structures. 1131 */ 1132 if (nvpage != NULL) { 1133 if (vpage1 != NULL) { 1134 kmem_free(vpage1, vpgtob(npages1)); 1135 } 1136 if (vpage2 != NULL) { 1137 svd2->vpage = NULL; 1138 kmem_free(vpage2, vpgtob(npages2)); 1139 } 1140 if (svd2->pageprot) { 1141 svd1->pageprot = 1; 1142 } 1143 if (svd2->pageadvice) { 1144 svd1->pageadvice = 1; 1145 } 1146 svd1->vpage = nvpage; 1147 } 1148 1149 /* all looks ok, merge segments */ 1150 svd1->swresv += svd2->swresv; 1151 svd2->swresv = 0; /* so seg_free doesn't release swap space */ 1152 size = seg2->s_size; 1153 seg_free(seg2); 1154 seg1->s_size += size; 1155 return (0); 1156 } 1157 1158 /* 1159 * Extend the previous segment (seg1) to include the 1160 * new segment (seg2 + a), if possible. 1161 * Return 0 on success. 1162 */ 1163 static int 1164 segvn_extend_prev(seg1, seg2, a, swresv) 1165 struct seg *seg1, *seg2; 1166 struct segvn_crargs *a; 1167 size_t swresv; 1168 { 1169 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data; 1170 size_t size; 1171 struct anon_map *amp1; 1172 struct vpage *new_vpage; 1173 1174 /* 1175 * We don't need any segment level locks for "segvn" data 1176 * since the address space is "write" locked. 1177 */ 1178 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 1179 1180 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) { 1181 return (-1); 1182 } 1183 1184 /* second segment is new, try to extend first */ 1185 /* XXX - should also check cred */ 1186 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot || 1187 (!svd1->pageprot && (svd1->prot != a->prot)) || 1188 svd1->type != a->type || svd1->flags != a->flags || 1189 seg1->s_szc != a->szc) 1190 return (-1); 1191 1192 /* vp == NULL implies zfod, offset doesn't matter */ 1193 if (svd1->vp != NULL && 1194 svd1->offset + seg1->s_size != (a->offset & PAGEMASK)) 1195 return (-1); 1196 1197 if (svd1->tr_state != SEGVN_TR_OFF) { 1198 return (-1); 1199 } 1200 1201 amp1 = svd1->amp; 1202 if (amp1) { 1203 pgcnt_t newpgs; 1204 1205 /* 1206 * Segment has private pages, can data structures 1207 * be expanded? 1208 * 1209 * Acquire the anon_map lock to prevent it from changing, 1210 * if it is shared. This ensures that the anon_map 1211 * will not change while a thread which has a read/write 1212 * lock on an address space references it. 1213 * XXX - Don't need the anon_map lock at all if "refcnt" 1214 * is 1. 1215 * 1216 * Can't grow a MAP_SHARED segment with an anonmap because 1217 * there may be existing anon slots where we want to extend 1218 * the segment and we wouldn't know what to do with them 1219 * (e.g., for tmpfs right thing is to just leave them there, 1220 * for /dev/zero they should be cleared out). 1221 */ 1222 if (svd1->type == MAP_SHARED) 1223 return (-1); 1224 1225 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1226 if (amp1->refcnt > 1) { 1227 ANON_LOCK_EXIT(&1->a_rwlock); 1228 return (-1); 1229 } 1230 newpgs = anon_grow(amp1->ahp, &svd1->anon_index, 1231 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP); 1232 1233 if (newpgs == 0) { 1234 ANON_LOCK_EXIT(&1->a_rwlock); 1235 return (-1); 1236 } 1237 amp1->size = ptob(newpgs); 1238 ANON_LOCK_EXIT(&1->a_rwlock); 1239 } 1240 if (svd1->vpage != NULL) { 1241 struct vpage *vp, *evp; 1242 new_vpage = 1243 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1244 KM_NOSLEEP); 1245 if (new_vpage == NULL) 1246 return (-1); 1247 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1))); 1248 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1))); 1249 svd1->vpage = new_vpage; 1250 1251 vp = new_vpage + seg_pages(seg1); 1252 evp = vp + seg_pages(seg2); 1253 for (; vp < evp; vp++) 1254 VPP_SETPROT(vp, a->prot); 1255 } 1256 size = seg2->s_size; 1257 seg_free(seg2); 1258 seg1->s_size += size; 1259 svd1->swresv += swresv; 1260 if (svd1->pageprot && (a->prot & PROT_WRITE) && 1261 svd1->type == MAP_SHARED && svd1->vp != NULL && 1262 (svd1->vp->v_flag & VVMEXEC)) { 1263 ASSERT(vn_is_mapped(svd1->vp, V_WRITE)); 1264 segvn_inval_trcache(svd1->vp); 1265 } 1266 return (0); 1267 } 1268 1269 /* 1270 * Extend the next segment (seg2) to include the 1271 * new segment (seg1 + a), if possible. 1272 * Return 0 on success. 1273 */ 1274 static int 1275 segvn_extend_next( 1276 struct seg *seg1, 1277 struct seg *seg2, 1278 struct segvn_crargs *a, 1279 size_t swresv) 1280 { 1281 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data; 1282 size_t size; 1283 struct anon_map *amp2; 1284 struct vpage *new_vpage; 1285 1286 /* 1287 * We don't need any segment level locks for "segvn" data 1288 * since the address space is "write" locked. 1289 */ 1290 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock)); 1291 1292 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 1293 return (-1); 1294 } 1295 1296 /* first segment is new, try to extend second */ 1297 /* XXX - should also check cred */ 1298 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot || 1299 (!svd2->pageprot && (svd2->prot != a->prot)) || 1300 svd2->type != a->type || svd2->flags != a->flags || 1301 seg2->s_szc != a->szc) 1302 return (-1); 1303 /* vp == NULL implies zfod, offset doesn't matter */ 1304 if (svd2->vp != NULL && 1305 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset) 1306 return (-1); 1307 1308 if (svd2->tr_state != SEGVN_TR_OFF) { 1309 return (-1); 1310 } 1311 1312 amp2 = svd2->amp; 1313 if (amp2) { 1314 pgcnt_t newpgs; 1315 1316 /* 1317 * Segment has private pages, can data structures 1318 * be expanded? 1319 * 1320 * Acquire the anon_map lock to prevent it from changing, 1321 * if it is shared. This ensures that the anon_map 1322 * will not change while a thread which has a read/write 1323 * lock on an address space references it. 1324 * 1325 * XXX - Don't need the anon_map lock at all if "refcnt" 1326 * is 1. 1327 */ 1328 if (svd2->type == MAP_SHARED) 1329 return (-1); 1330 1331 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1332 if (amp2->refcnt > 1) { 1333 ANON_LOCK_EXIT(&2->a_rwlock); 1334 return (-1); 1335 } 1336 newpgs = anon_grow(amp2->ahp, &svd2->anon_index, 1337 btop(seg2->s_size), btop(seg1->s_size), 1338 ANON_NOSLEEP | ANON_GROWDOWN); 1339 1340 if (newpgs == 0) { 1341 ANON_LOCK_EXIT(&2->a_rwlock); 1342 return (-1); 1343 } 1344 amp2->size = ptob(newpgs); 1345 ANON_LOCK_EXIT(&2->a_rwlock); 1346 } 1347 if (svd2->vpage != NULL) { 1348 struct vpage *vp, *evp; 1349 new_vpage = 1350 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1351 KM_NOSLEEP); 1352 if (new_vpage == NULL) { 1353 /* Not merging segments so adjust anon_index back */ 1354 if (amp2) 1355 svd2->anon_index += seg_pages(seg1); 1356 return (-1); 1357 } 1358 bcopy(svd2->vpage, new_vpage + seg_pages(seg1), 1359 vpgtob(seg_pages(seg2))); 1360 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2))); 1361 svd2->vpage = new_vpage; 1362 1363 vp = new_vpage; 1364 evp = vp + seg_pages(seg1); 1365 for (; vp < evp; vp++) 1366 VPP_SETPROT(vp, a->prot); 1367 } 1368 size = seg1->s_size; 1369 seg_free(seg1); 1370 seg2->s_size += size; 1371 seg2->s_base -= size; 1372 svd2->offset -= size; 1373 svd2->swresv += swresv; 1374 if (svd2->pageprot && (a->prot & PROT_WRITE) && 1375 svd2->type == MAP_SHARED && svd2->vp != NULL && 1376 (svd2->vp->v_flag & VVMEXEC)) { 1377 ASSERT(vn_is_mapped(svd2->vp, V_WRITE)); 1378 segvn_inval_trcache(svd2->vp); 1379 } 1380 return (0); 1381 } 1382 1383 static int 1384 segvn_dup(struct seg *seg, struct seg *newseg) 1385 { 1386 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1387 struct segvn_data *newsvd; 1388 pgcnt_t npages = seg_pages(seg); 1389 int error = 0; 1390 uint_t prot; 1391 size_t len; 1392 struct anon_map *amp; 1393 1394 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1395 1396 /* 1397 * If segment has anon reserved, reserve more for the new seg. 1398 * For a MAP_NORESERVE segment swresv will be a count of all the 1399 * allocated anon slots; thus we reserve for the child as many slots 1400 * as the parent has allocated. This semantic prevents the child or 1401 * parent from dieing during a copy-on-write fault caused by trying 1402 * to write a shared pre-existing anon page. 1403 */ 1404 if ((len = svd->swresv) != 0) { 1405 if (anon_resv(svd->swresv) == 0) 1406 return (ENOMEM); 1407 1408 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 1409 seg, len, 0); 1410 } 1411 1412 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 1413 1414 newseg->s_ops = &segvn_ops; 1415 newseg->s_data = (void *)newsvd; 1416 newseg->s_szc = seg->s_szc; 1417 1418 newsvd->seg = newseg; 1419 if ((newsvd->vp = svd->vp) != NULL) { 1420 VN_HOLD(svd->vp); 1421 if (svd->type == MAP_SHARED) 1422 lgrp_shm_policy_init(NULL, svd->vp); 1423 } 1424 newsvd->offset = svd->offset; 1425 newsvd->prot = svd->prot; 1426 newsvd->maxprot = svd->maxprot; 1427 newsvd->pageprot = svd->pageprot; 1428 newsvd->type = svd->type; 1429 newsvd->cred = svd->cred; 1430 crhold(newsvd->cred); 1431 newsvd->advice = svd->advice; 1432 newsvd->pageadvice = svd->pageadvice; 1433 newsvd->swresv = svd->swresv; 1434 newsvd->flags = svd->flags; 1435 newsvd->softlockcnt = 0; 1436 newsvd->policy_info = svd->policy_info; 1437 newsvd->rcookie = HAT_INVALID_REGION_COOKIE; 1438 1439 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) { 1440 /* 1441 * Not attaching to a shared anon object. 1442 */ 1443 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) || 1444 svd->tr_state == SEGVN_TR_OFF); 1445 if (svd->tr_state == SEGVN_TR_ON) { 1446 ASSERT(newsvd->vp != NULL && amp != NULL); 1447 newsvd->tr_state = SEGVN_TR_INIT; 1448 } else { 1449 newsvd->tr_state = svd->tr_state; 1450 } 1451 newsvd->amp = NULL; 1452 newsvd->anon_index = 0; 1453 } else { 1454 /* regions for now are only used on pure vnode segments */ 1455 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 1456 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1457 newsvd->tr_state = SEGVN_TR_OFF; 1458 if (svd->type == MAP_SHARED) { 1459 newsvd->amp = amp; 1460 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1461 amp->refcnt++; 1462 ANON_LOCK_EXIT(&->a_rwlock); 1463 newsvd->anon_index = svd->anon_index; 1464 } else { 1465 int reclaim = 1; 1466 1467 /* 1468 * Allocate and initialize new anon_map structure. 1469 */ 1470 newsvd->amp = anonmap_alloc(newseg->s_size, 0, 1471 ANON_SLEEP); 1472 newsvd->amp->a_szc = newseg->s_szc; 1473 newsvd->anon_index = 0; 1474 1475 /* 1476 * We don't have to acquire the anon_map lock 1477 * for the new segment (since it belongs to an 1478 * address space that is still not associated 1479 * with any process), or the segment in the old 1480 * address space (since all threads in it 1481 * are stopped while duplicating the address space). 1482 */ 1483 1484 /* 1485 * The goal of the following code is to make sure that 1486 * softlocked pages do not end up as copy on write 1487 * pages. This would cause problems where one 1488 * thread writes to a page that is COW and a different 1489 * thread in the same process has softlocked it. The 1490 * softlock lock would move away from this process 1491 * because the write would cause this process to get 1492 * a copy (without the softlock). 1493 * 1494 * The strategy here is to just break the 1495 * sharing on pages that could possibly be 1496 * softlocked. 1497 */ 1498 retry: 1499 if (svd->softlockcnt) { 1500 struct anon *ap, *newap; 1501 size_t i; 1502 uint_t vpprot; 1503 page_t *anon_pl[1+1], *pp; 1504 caddr_t addr; 1505 ulong_t old_idx = svd->anon_index; 1506 ulong_t new_idx = 0; 1507 1508 /* 1509 * The softlock count might be non zero 1510 * because some pages are still stuck in the 1511 * cache for lazy reclaim. Flush the cache 1512 * now. This should drop the count to zero. 1513 * [or there is really I/O going on to these 1514 * pages]. Note, we have the writers lock so 1515 * nothing gets inserted during the flush. 1516 */ 1517 if (reclaim == 1) { 1518 segvn_purge(seg); 1519 reclaim = 0; 1520 goto retry; 1521 } 1522 i = btopr(seg->s_size); 1523 addr = seg->s_base; 1524 /* 1525 * XXX break cow sharing using PAGESIZE 1526 * pages. They will be relocated into larger 1527 * pages at fault time. 1528 */ 1529 while (i-- > 0) { 1530 if (ap = anon_get_ptr(amp->ahp, 1531 old_idx)) { 1532 error = anon_getpage(&ap, 1533 &vpprot, anon_pl, PAGESIZE, 1534 seg, addr, S_READ, 1535 svd->cred); 1536 if (error) { 1537 newsvd->vpage = NULL; 1538 goto out; 1539 } 1540 /* 1541 * prot need not be computed 1542 * below 'cause anon_private is 1543 * going to ignore it anyway 1544 * as child doesn't inherit 1545 * pagelock from parent. 1546 */ 1547 prot = svd->pageprot ? 1548 VPP_PROT( 1549 &svd->vpage[ 1550 seg_page(seg, addr)]) 1551 : svd->prot; 1552 pp = anon_private(&newap, 1553 newseg, addr, prot, 1554 anon_pl[0], 0, 1555 newsvd->cred); 1556 if (pp == NULL) { 1557 /* no mem abort */ 1558 newsvd->vpage = NULL; 1559 error = ENOMEM; 1560 goto out; 1561 } 1562 (void) anon_set_ptr( 1563 newsvd->amp->ahp, new_idx, 1564 newap, ANON_SLEEP); 1565 page_unlock(pp); 1566 } 1567 addr += PAGESIZE; 1568 old_idx++; 1569 new_idx++; 1570 } 1571 } else { /* common case */ 1572 if (seg->s_szc != 0) { 1573 /* 1574 * If at least one of anon slots of a 1575 * large page exists then make sure 1576 * all anon slots of a large page 1577 * exist to avoid partial cow sharing 1578 * of a large page in the future. 1579 */ 1580 anon_dup_fill_holes(amp->ahp, 1581 svd->anon_index, newsvd->amp->ahp, 1582 0, seg->s_size, seg->s_szc, 1583 svd->vp != NULL); 1584 } else { 1585 anon_dup(amp->ahp, svd->anon_index, 1586 newsvd->amp->ahp, 0, seg->s_size); 1587 } 1588 1589 hat_clrattr(seg->s_as->a_hat, seg->s_base, 1590 seg->s_size, PROT_WRITE); 1591 } 1592 } 1593 } 1594 /* 1595 * If necessary, create a vpage structure for the new segment. 1596 * Do not copy any page lock indications. 1597 */ 1598 if (svd->vpage != NULL) { 1599 uint_t i; 1600 struct vpage *ovp = svd->vpage; 1601 struct vpage *nvp; 1602 1603 nvp = newsvd->vpage = 1604 kmem_alloc(vpgtob(npages), KM_SLEEP); 1605 for (i = 0; i < npages; i++) { 1606 *nvp = *ovp++; 1607 VPP_CLRPPLOCK(nvp++); 1608 } 1609 } else 1610 newsvd->vpage = NULL; 1611 1612 /* Inform the vnode of the new mapping */ 1613 if (newsvd->vp != NULL) { 1614 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset, 1615 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot, 1616 newsvd->maxprot, newsvd->type, newsvd->cred); 1617 } 1618 out: 1619 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1620 ASSERT(newsvd->amp == NULL); 1621 ASSERT(newsvd->tr_state == SEGVN_TR_OFF); 1622 newsvd->rcookie = svd->rcookie; 1623 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie); 1624 } 1625 return (error); 1626 } 1627 1628 1629 /* 1630 * callback function used by segvn_unmap to invoke free_vp_pages() for only 1631 * those pages actually processed by the HAT 1632 */ 1633 extern int free_pages; 1634 1635 static void 1636 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 1637 size_t r_size, void *r_obj, u_offset_t r_objoff) 1638 { 1639 u_offset_t off; 1640 size_t len; 1641 vnode_t *vp = (vnode_t *)r_obj; 1642 1643 ASSERT(eaddr > saddr); 1644 ASSERT(saddr >= r_saddr); 1645 ASSERT(saddr < r_saddr + r_size); 1646 ASSERT(eaddr > r_saddr); 1647 ASSERT(eaddr <= r_saddr + r_size); 1648 ASSERT(vp != NULL); 1649 1650 if (!free_pages) { 1651 return; 1652 } 1653 1654 len = eaddr - saddr; 1655 off = (saddr - r_saddr) + r_objoff; 1656 free_vp_pages(vp, off, len); 1657 } 1658 1659 static void 1660 segvn_hat_unload_callback(hat_callback_t *cb) 1661 { 1662 struct seg *seg = cb->hcb_data; 1663 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1664 size_t len; 1665 u_offset_t off; 1666 1667 ASSERT(svd->vp != NULL); 1668 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr); 1669 ASSERT(cb->hcb_start_addr >= seg->s_base); 1670 1671 len = cb->hcb_end_addr - cb->hcb_start_addr; 1672 off = cb->hcb_start_addr - seg->s_base; 1673 free_vp_pages(svd->vp, svd->offset + off, len); 1674 } 1675 1676 static int 1677 segvn_unmap(struct seg *seg, caddr_t addr, size_t len) 1678 { 1679 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1680 struct segvn_data *nsvd; 1681 struct seg *nseg; 1682 struct anon_map *amp; 1683 pgcnt_t opages; /* old segment size in pages */ 1684 pgcnt_t npages; /* new segment size in pages */ 1685 pgcnt_t dpages; /* pages being deleted (unmapped) */ 1686 hat_callback_t callback; /* used for free_vp_pages() */ 1687 hat_callback_t *cbp = NULL; 1688 caddr_t nbase; 1689 size_t nsize; 1690 size_t oswresv; 1691 int reclaim = 1; 1692 1693 /* 1694 * We don't need any segment level locks for "segvn" data 1695 * since the address space is "write" locked. 1696 */ 1697 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1698 1699 /* 1700 * Fail the unmap if pages are SOFTLOCKed through this mapping. 1701 * softlockcnt is protected from change by the as write lock. 1702 */ 1703 retry: 1704 if (svd->softlockcnt > 0) { 1705 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1706 /* 1707 * since we do have the writers lock nobody can fill 1708 * the cache during the purge. The flush either succeeds 1709 * or we still have pending I/Os. 1710 */ 1711 if (reclaim == 1) { 1712 segvn_purge(seg); 1713 reclaim = 0; 1714 goto retry; 1715 } 1716 return (EAGAIN); 1717 } 1718 1719 /* 1720 * Check for bad sizes 1721 */ 1722 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size || 1723 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) { 1724 panic("segvn_unmap"); 1725 /*NOTREACHED*/ 1726 } 1727 1728 if (seg->s_szc != 0) { 1729 size_t pgsz = page_get_pagesize(seg->s_szc); 1730 int err; 1731 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 1732 ASSERT(seg->s_base != addr || seg->s_size != len); 1733 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1734 ASSERT(svd->amp == NULL); 1735 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1736 hat_leave_region(seg->s_as->a_hat, 1737 svd->rcookie, HAT_REGION_TEXT); 1738 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1739 /* 1740 * could pass a flag to segvn_demote_range() 1741 * below to tell it not to do any unloads but 1742 * this case is rare enough to not bother for 1743 * now. 1744 */ 1745 } else if (svd->tr_state == SEGVN_TR_INIT) { 1746 svd->tr_state = SEGVN_TR_OFF; 1747 } else if (svd->tr_state == SEGVN_TR_ON) { 1748 ASSERT(svd->amp != NULL); 1749 segvn_textunrepl(seg, 1); 1750 ASSERT(svd->amp == NULL); 1751 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1752 } 1753 VM_STAT_ADD(segvnvmstats.demoterange[0]); 1754 err = segvn_demote_range(seg, addr, len, SDR_END, 0); 1755 if (err == 0) { 1756 return (IE_RETRY); 1757 } 1758 return (err); 1759 } 1760 } 1761 1762 /* Inform the vnode of the unmapping. */ 1763 if (svd->vp) { 1764 int error; 1765 1766 error = VOP_DELMAP(svd->vp, 1767 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base), 1768 seg->s_as, addr, len, svd->prot, svd->maxprot, 1769 svd->type, svd->cred); 1770 1771 if (error == EAGAIN) 1772 return (error); 1773 } 1774 1775 /* 1776 * Remove any page locks set through this mapping. 1777 * If text replication is not off no page locks could have been 1778 * established via this mapping. 1779 */ 1780 if (svd->tr_state == SEGVN_TR_OFF) { 1781 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0); 1782 } 1783 1784 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1785 ASSERT(svd->amp == NULL); 1786 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1787 ASSERT(svd->type == MAP_PRIVATE); 1788 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 1789 HAT_REGION_TEXT); 1790 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1791 } else if (svd->tr_state == SEGVN_TR_ON) { 1792 ASSERT(svd->amp != NULL); 1793 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE)); 1794 segvn_textunrepl(seg, 1); 1795 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 1796 } else { 1797 if (svd->tr_state != SEGVN_TR_OFF) { 1798 ASSERT(svd->tr_state == SEGVN_TR_INIT); 1799 svd->tr_state = SEGVN_TR_OFF; 1800 } 1801 /* 1802 * Unload any hardware translations in the range to be taken 1803 * out. Use a callback to invoke free_vp_pages() effectively. 1804 */ 1805 if (svd->vp != NULL && free_pages != 0) { 1806 callback.hcb_data = seg; 1807 callback.hcb_function = segvn_hat_unload_callback; 1808 cbp = &callback; 1809 } 1810 hat_unload_callback(seg->s_as->a_hat, addr, len, 1811 HAT_UNLOAD_UNMAP, cbp); 1812 1813 if (svd->type == MAP_SHARED && svd->vp != NULL && 1814 (svd->vp->v_flag & VVMEXEC) && 1815 ((svd->prot & PROT_WRITE) || svd->pageprot)) { 1816 segvn_inval_trcache(svd->vp); 1817 } 1818 } 1819 1820 /* 1821 * Check for entire segment 1822 */ 1823 if (addr == seg->s_base && len == seg->s_size) { 1824 seg_free(seg); 1825 return (0); 1826 } 1827 1828 opages = seg_pages(seg); 1829 dpages = btop(len); 1830 npages = opages - dpages; 1831 amp = svd->amp; 1832 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc); 1833 1834 /* 1835 * Check for beginning of segment 1836 */ 1837 if (addr == seg->s_base) { 1838 if (svd->vpage != NULL) { 1839 size_t nbytes; 1840 struct vpage *ovpage; 1841 1842 ovpage = svd->vpage; /* keep pointer to vpage */ 1843 1844 nbytes = vpgtob(npages); 1845 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 1846 bcopy(&ovpage[dpages], svd->vpage, nbytes); 1847 1848 /* free up old vpage */ 1849 kmem_free(ovpage, vpgtob(opages)); 1850 } 1851 if (amp != NULL) { 1852 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1853 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 1854 /* 1855 * Free up now unused parts of anon_map array. 1856 */ 1857 if (amp->a_szc == seg->s_szc) { 1858 if (seg->s_szc != 0) { 1859 anon_free_pages(amp->ahp, 1860 svd->anon_index, len, 1861 seg->s_szc); 1862 } else { 1863 anon_free(amp->ahp, 1864 svd->anon_index, 1865 len); 1866 } 1867 } else { 1868 ASSERT(svd->type == MAP_SHARED); 1869 ASSERT(amp->a_szc > seg->s_szc); 1870 anon_shmap_free_pages(amp, 1871 svd->anon_index, len); 1872 } 1873 1874 /* 1875 * Unreserve swap space for the 1876 * unmapped chunk of this segment in 1877 * case it's MAP_SHARED 1878 */ 1879 if (svd->type == MAP_SHARED) { 1880 anon_unresv(len); 1881 amp->swresv -= len; 1882 } 1883 } 1884 ANON_LOCK_EXIT(&->a_rwlock); 1885 svd->anon_index += dpages; 1886 } 1887 if (svd->vp != NULL) 1888 svd->offset += len; 1889 1890 if (svd->swresv) { 1891 if (svd->flags & MAP_NORESERVE) { 1892 ASSERT(amp); 1893 oswresv = svd->swresv; 1894 1895 svd->swresv = ptob(anon_pages(amp->ahp, 1896 svd->anon_index, npages)); 1897 anon_unresv(oswresv - svd->swresv); 1898 } else { 1899 anon_unresv(len); 1900 svd->swresv -= len; 1901 } 1902 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 1903 seg, len, 0); 1904 } 1905 1906 seg->s_base += len; 1907 seg->s_size -= len; 1908 return (0); 1909 } 1910 1911 /* 1912 * Check for end of segment 1913 */ 1914 if (addr + len == seg->s_base + seg->s_size) { 1915 if (svd->vpage != NULL) { 1916 size_t nbytes; 1917 struct vpage *ovpage; 1918 1919 ovpage = svd->vpage; /* keep pointer to vpage */ 1920 1921 nbytes = vpgtob(npages); 1922 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 1923 bcopy(ovpage, svd->vpage, nbytes); 1924 1925 /* free up old vpage */ 1926 kmem_free(ovpage, vpgtob(opages)); 1927 1928 } 1929 if (amp != NULL) { 1930 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1931 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 1932 /* 1933 * Free up now unused parts of anon_map array. 1934 */ 1935 ulong_t an_idx = svd->anon_index + npages; 1936 if (amp->a_szc == seg->s_szc) { 1937 if (seg->s_szc != 0) { 1938 anon_free_pages(amp->ahp, 1939 an_idx, len, 1940 seg->s_szc); 1941 } else { 1942 anon_free(amp->ahp, an_idx, 1943 len); 1944 } 1945 } else { 1946 ASSERT(svd->type == MAP_SHARED); 1947 ASSERT(amp->a_szc > seg->s_szc); 1948 anon_shmap_free_pages(amp, 1949 an_idx, len); 1950 } 1951 1952 /* 1953 * Unreserve swap space for the 1954 * unmapped chunk of this segment in 1955 * case it's MAP_SHARED 1956 */ 1957 if (svd->type == MAP_SHARED) { 1958 anon_unresv(len); 1959 amp->swresv -= len; 1960 } 1961 } 1962 ANON_LOCK_EXIT(&->a_rwlock); 1963 } 1964 1965 if (svd->swresv) { 1966 if (svd->flags & MAP_NORESERVE) { 1967 ASSERT(amp); 1968 oswresv = svd->swresv; 1969 svd->swresv = ptob(anon_pages(amp->ahp, 1970 svd->anon_index, npages)); 1971 anon_unresv(oswresv - svd->swresv); 1972 } else { 1973 anon_unresv(len); 1974 svd->swresv -= len; 1975 } 1976 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 1977 "anon proc:%p %lu %u", seg, len, 0); 1978 } 1979 1980 seg->s_size -= len; 1981 return (0); 1982 } 1983 1984 /* 1985 * The section to go is in the middle of the segment, 1986 * have to make it into two segments. nseg is made for 1987 * the high end while seg is cut down at the low end. 1988 */ 1989 nbase = addr + len; /* new seg base */ 1990 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */ 1991 seg->s_size = addr - seg->s_base; /* shrink old seg */ 1992 nseg = seg_alloc(seg->s_as, nbase, nsize); 1993 if (nseg == NULL) { 1994 panic("segvn_unmap seg_alloc"); 1995 /*NOTREACHED*/ 1996 } 1997 nseg->s_ops = seg->s_ops; 1998 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 1999 nseg->s_data = (void *)nsvd; 2000 nseg->s_szc = seg->s_szc; 2001 *nsvd = *svd; 2002 nsvd->seg = nseg; 2003 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base); 2004 nsvd->swresv = 0; 2005 nsvd->softlockcnt = 0; 2006 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 2007 2008 if (svd->vp != NULL) { 2009 VN_HOLD(nsvd->vp); 2010 if (nsvd->type == MAP_SHARED) 2011 lgrp_shm_policy_init(NULL, nsvd->vp); 2012 } 2013 crhold(svd->cred); 2014 2015 if (svd->vpage == NULL) { 2016 nsvd->vpage = NULL; 2017 } else { 2018 /* need to split vpage into two arrays */ 2019 size_t nbytes; 2020 struct vpage *ovpage; 2021 2022 ovpage = svd->vpage; /* keep pointer to vpage */ 2023 2024 npages = seg_pages(seg); /* seg has shrunk */ 2025 nbytes = vpgtob(npages); 2026 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2027 2028 bcopy(ovpage, svd->vpage, nbytes); 2029 2030 npages = seg_pages(nseg); 2031 nbytes = vpgtob(npages); 2032 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2033 2034 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes); 2035 2036 /* free up old vpage */ 2037 kmem_free(ovpage, vpgtob(opages)); 2038 } 2039 2040 if (amp == NULL) { 2041 nsvd->amp = NULL; 2042 nsvd->anon_index = 0; 2043 } else { 2044 /* 2045 * Need to create a new anon map for the new segment. 2046 * We'll also allocate a new smaller array for the old 2047 * smaller segment to save space. 2048 */ 2049 opages = btop((uintptr_t)(addr - seg->s_base)); 2050 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2051 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2052 /* 2053 * Free up now unused parts of anon_map array. 2054 */ 2055 ulong_t an_idx = svd->anon_index + opages; 2056 if (amp->a_szc == seg->s_szc) { 2057 if (seg->s_szc != 0) { 2058 anon_free_pages(amp->ahp, an_idx, len, 2059 seg->s_szc); 2060 } else { 2061 anon_free(amp->ahp, an_idx, 2062 len); 2063 } 2064 } else { 2065 ASSERT(svd->type == MAP_SHARED); 2066 ASSERT(amp->a_szc > seg->s_szc); 2067 anon_shmap_free_pages(amp, an_idx, len); 2068 } 2069 2070 /* 2071 * Unreserve swap space for the 2072 * unmapped chunk of this segment in 2073 * case it's MAP_SHARED 2074 */ 2075 if (svd->type == MAP_SHARED) { 2076 anon_unresv(len); 2077 amp->swresv -= len; 2078 } 2079 } 2080 nsvd->anon_index = svd->anon_index + 2081 btop((uintptr_t)(nseg->s_base - seg->s_base)); 2082 if (svd->type == MAP_SHARED) { 2083 amp->refcnt++; 2084 nsvd->amp = amp; 2085 } else { 2086 struct anon_map *namp; 2087 struct anon_hdr *nahp; 2088 2089 ASSERT(svd->type == MAP_PRIVATE); 2090 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 2091 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 2092 namp->a_szc = seg->s_szc; 2093 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp, 2094 0, btop(seg->s_size), ANON_SLEEP); 2095 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index, 2096 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 2097 anon_release(amp->ahp, btop(amp->size)); 2098 svd->anon_index = 0; 2099 nsvd->anon_index = 0; 2100 amp->ahp = nahp; 2101 amp->size = seg->s_size; 2102 nsvd->amp = namp; 2103 } 2104 ANON_LOCK_EXIT(&->a_rwlock); 2105 } 2106 if (svd->swresv) { 2107 if (svd->flags & MAP_NORESERVE) { 2108 ASSERT(amp); 2109 oswresv = svd->swresv; 2110 svd->swresv = ptob(anon_pages(amp->ahp, 2111 svd->anon_index, btop(seg->s_size))); 2112 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 2113 nsvd->anon_index, btop(nseg->s_size))); 2114 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2115 anon_unresv(oswresv - (svd->swresv + nsvd->swresv)); 2116 } else { 2117 if (seg->s_size + nseg->s_size + len != svd->swresv) { 2118 panic("segvn_unmap: " 2119 "cannot split swap reservation"); 2120 /*NOTREACHED*/ 2121 } 2122 anon_unresv(len); 2123 svd->swresv = seg->s_size; 2124 nsvd->swresv = nseg->s_size; 2125 } 2126 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2127 seg, len, 0); 2128 } 2129 2130 return (0); /* I'm glad that's all over with! */ 2131 } 2132 2133 static void 2134 segvn_free(struct seg *seg) 2135 { 2136 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2137 pgcnt_t npages = seg_pages(seg); 2138 struct anon_map *amp; 2139 size_t len; 2140 2141 /* 2142 * We don't need any segment level locks for "segvn" data 2143 * since the address space is "write" locked. 2144 */ 2145 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2146 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2147 2148 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2149 2150 /* 2151 * Be sure to unlock pages. XXX Why do things get free'ed instead 2152 * of unmapped? XXX 2153 */ 2154 (void) segvn_lockop(seg, seg->s_base, seg->s_size, 2155 0, MC_UNLOCK, NULL, 0); 2156 2157 /* 2158 * Deallocate the vpage and anon pointers if necessary and possible. 2159 */ 2160 if (svd->vpage != NULL) { 2161 kmem_free(svd->vpage, vpgtob(npages)); 2162 svd->vpage = NULL; 2163 } 2164 if ((amp = svd->amp) != NULL) { 2165 /* 2166 * If there are no more references to this anon_map 2167 * structure, then deallocate the structure after freeing 2168 * up all the anon slot pointers that we can. 2169 */ 2170 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2171 ASSERT(amp->a_szc >= seg->s_szc); 2172 if (--amp->refcnt == 0) { 2173 if (svd->type == MAP_PRIVATE) { 2174 /* 2175 * Private - we only need to anon_free 2176 * the part that this segment refers to. 2177 */ 2178 if (seg->s_szc != 0) { 2179 anon_free_pages(amp->ahp, 2180 svd->anon_index, seg->s_size, 2181 seg->s_szc); 2182 } else { 2183 anon_free(amp->ahp, svd->anon_index, 2184 seg->s_size); 2185 } 2186 } else { 2187 /* 2188 * Shared - anon_free the entire 2189 * anon_map's worth of stuff and 2190 * release any swap reservation. 2191 */ 2192 if (amp->a_szc != 0) { 2193 anon_shmap_free_pages(amp, 0, 2194 amp->size); 2195 } else { 2196 anon_free(amp->ahp, 0, amp->size); 2197 } 2198 if ((len = amp->swresv) != 0) { 2199 anon_unresv(len); 2200 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2201 "anon proc:%p %lu %u", 2202 seg, len, 0); 2203 } 2204 } 2205 svd->amp = NULL; 2206 ANON_LOCK_EXIT(&->a_rwlock); 2207 anonmap_free(amp); 2208 } else if (svd->type == MAP_PRIVATE) { 2209 /* 2210 * We had a private mapping which still has 2211 * a held anon_map so just free up all the 2212 * anon slot pointers that we were using. 2213 */ 2214 if (seg->s_szc != 0) { 2215 anon_free_pages(amp->ahp, svd->anon_index, 2216 seg->s_size, seg->s_szc); 2217 } else { 2218 anon_free(amp->ahp, svd->anon_index, 2219 seg->s_size); 2220 } 2221 ANON_LOCK_EXIT(&->a_rwlock); 2222 } else { 2223 ANON_LOCK_EXIT(&->a_rwlock); 2224 } 2225 } 2226 2227 /* 2228 * Release swap reservation. 2229 */ 2230 if ((len = svd->swresv) != 0) { 2231 anon_unresv(svd->swresv); 2232 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2233 seg, len, 0); 2234 svd->swresv = 0; 2235 } 2236 /* 2237 * Release claim on vnode, credentials, and finally free the 2238 * private data. 2239 */ 2240 if (svd->vp != NULL) { 2241 if (svd->type == MAP_SHARED) 2242 lgrp_shm_policy_fini(NULL, svd->vp); 2243 VN_RELE(svd->vp); 2244 svd->vp = NULL; 2245 } 2246 crfree(svd->cred); 2247 svd->cred = NULL; 2248 2249 seg->s_data = NULL; 2250 kmem_cache_free(segvn_cache, svd); 2251 } 2252 2253 ulong_t segvn_lpglck_limit = 0; 2254 /* 2255 * Support routines used by segvn_pagelock() and softlock faults for anonymous 2256 * pages to implement availrmem accounting in a way that makes sure the 2257 * same memory is accounted just once for all softlock/pagelock purposes. 2258 * This prevents a bug when availrmem is quickly incorrectly exausted from 2259 * several pagelocks to different parts of the same large page since each 2260 * pagelock has to decrement availrmem by the size of the entire large 2261 * page. Note those pages are not COW shared until softunlock/pageunlock so 2262 * we don't need to use cow style accounting here. We also need to make sure 2263 * the entire large page is accounted even if softlock range is less than the 2264 * entire large page because large anon pages can't be demoted when any of 2265 * constituent pages is locked. The caller calls this routine for every page_t 2266 * it locks. The very first page in the range may not be the root page of a 2267 * large page. For all other pages it's guranteed we are going to visit the 2268 * root of a particular large page before any other constituent page as we are 2269 * locking sequential pages belonging to the same anon map. So we do all the 2270 * locking when the root is encountered except for the very first page. Since 2271 * softlocking is not supported (except S_READ_NOCOW special case) for vmpss 2272 * segments and since vnode pages can be demoted without locking all 2273 * constituent pages vnode pages don't come here. Unlocking relies on the 2274 * fact that pagesize can't change whenever any of constituent large pages is 2275 * locked at least SE_SHARED. This allows unlocking code to find the right 2276 * root and decrement availrmem by the same amount it was incremented when the 2277 * page was locked. 2278 */ 2279 static int 2280 segvn_pp_lock_anonpages(page_t *pp, int first) 2281 { 2282 pgcnt_t pages; 2283 pfn_t pfn; 2284 uchar_t szc = pp->p_szc; 2285 2286 ASSERT(PAGE_LOCKED(pp)); 2287 ASSERT(pp->p_vnode != NULL); 2288 ASSERT(IS_SWAPFSVP(pp->p_vnode)); 2289 2290 /* 2291 * pagesize won't change as long as any constituent page is locked. 2292 */ 2293 pages = page_get_pagecnt(pp->p_szc); 2294 pfn = page_pptonum(pp); 2295 2296 if (!first) { 2297 if (!IS_P2ALIGNED(pfn, pages)) { 2298 #ifdef DEBUG 2299 pp = &pp[-(spgcnt_t)(pfn & (pages - 1))]; 2300 pfn = page_pptonum(pp); 2301 ASSERT(IS_P2ALIGNED(pfn, pages)); 2302 ASSERT(pp->p_szc == szc); 2303 ASSERT(pp->p_vnode != NULL); 2304 ASSERT(IS_SWAPFSVP(pp->p_vnode)); 2305 ASSERT(pp->p_slckcnt != 0); 2306 #endif /* DEBUG */ 2307 return (1); 2308 } 2309 } else if (!IS_P2ALIGNED(pfn, pages)) { 2310 pp = &pp[-(spgcnt_t)(pfn & (pages - 1))]; 2311 #ifdef DEBUG 2312 pfn = page_pptonum(pp); 2313 ASSERT(IS_P2ALIGNED(pfn, pages)); 2314 ASSERT(pp->p_szc == szc); 2315 ASSERT(pp->p_vnode != NULL); 2316 ASSERT(IS_SWAPFSVP(pp->p_vnode)); 2317 #endif /* DEBUG */ 2318 } 2319 2320 /* 2321 * pp is a root page. 2322 * We haven't locked this large page yet. 2323 */ 2324 page_struct_lock(pp); 2325 if (pp->p_slckcnt != 0) { 2326 if (pp->p_slckcnt < PAGE_SLOCK_MAXIMUM) { 2327 pp->p_slckcnt++; 2328 page_struct_unlock(pp); 2329 return (1); 2330 } 2331 page_struct_unlock(pp); 2332 segvn_lpglck_limit++; 2333 return (0); 2334 } 2335 mutex_enter(&freemem_lock); 2336 if (availrmem < tune.t_minarmem + pages) { 2337 mutex_exit(&freemem_lock); 2338 page_struct_unlock(pp); 2339 return (0); 2340 } 2341 pp->p_slckcnt++; 2342 availrmem -= pages; 2343 mutex_exit(&freemem_lock); 2344 page_struct_unlock(pp); 2345 return (1); 2346 } 2347 2348 static void 2349 segvn_pp_unlock_anonpages(page_t *pp, int first) 2350 { 2351 pgcnt_t pages; 2352 pfn_t pfn; 2353 2354 ASSERT(PAGE_LOCKED(pp)); 2355 ASSERT(pp->p_vnode != NULL); 2356 ASSERT(IS_SWAPFSVP(pp->p_vnode)); 2357 2358 /* 2359 * pagesize won't change as long as any constituent page is locked. 2360 */ 2361 pages = page_get_pagecnt(pp->p_szc); 2362 pfn = page_pptonum(pp); 2363 2364 if (!first) { 2365 if (!IS_P2ALIGNED(pfn, pages)) { 2366 return; 2367 } 2368 } else if (!IS_P2ALIGNED(pfn, pages)) { 2369 pp = &pp[-(spgcnt_t)(pfn & (pages - 1))]; 2370 #ifdef DEBUG 2371 pfn = page_pptonum(pp); 2372 ASSERT(IS_P2ALIGNED(pfn, pages)); 2373 #endif /* DEBUG */ 2374 } 2375 ASSERT(pp->p_vnode != NULL); 2376 ASSERT(IS_SWAPFSVP(pp->p_vnode)); 2377 ASSERT(pp->p_slckcnt != 0); 2378 page_struct_lock(pp); 2379 if (--pp->p_slckcnt == 0) { 2380 mutex_enter(&freemem_lock); 2381 availrmem += pages; 2382 mutex_exit(&freemem_lock); 2383 } 2384 page_struct_unlock(pp); 2385 } 2386 2387 /* 2388 * Do a F_SOFTUNLOCK call over the range requested. The range must have 2389 * already been F_SOFTLOCK'ed. 2390 * Caller must always match addr and len of a softunlock with a previous 2391 * softlock with exactly the same addr and len. 2392 */ 2393 static void 2394 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw) 2395 { 2396 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2397 page_t *pp; 2398 caddr_t adr; 2399 struct vnode *vp; 2400 u_offset_t offset; 2401 ulong_t anon_index; 2402 struct anon_map *amp; 2403 struct anon *ap = NULL; 2404 2405 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2406 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 2407 2408 if ((amp = svd->amp) != NULL) 2409 anon_index = svd->anon_index + seg_page(seg, addr); 2410 2411 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 2412 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2413 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie); 2414 } else { 2415 hat_unlock(seg->s_as->a_hat, addr, len); 2416 } 2417 for (adr = addr; adr < addr + len; adr += PAGESIZE) { 2418 if (amp != NULL) { 2419 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2420 if ((ap = anon_get_ptr(amp->ahp, anon_index++)) 2421 != NULL) { 2422 swap_xlate(ap, &vp, &offset); 2423 } else { 2424 vp = svd->vp; 2425 offset = svd->offset + 2426 (uintptr_t)(adr - seg->s_base); 2427 } 2428 ANON_LOCK_EXIT(&->a_rwlock); 2429 } else { 2430 vp = svd->vp; 2431 offset = svd->offset + 2432 (uintptr_t)(adr - seg->s_base); 2433 } 2434 2435 /* 2436 * Use page_find() instead of page_lookup() to 2437 * find the page since we know that it is locked. 2438 */ 2439 pp = page_find(vp, offset); 2440 if (pp == NULL) { 2441 panic( 2442 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx", 2443 (void *)adr, (void *)ap, (void *)vp, offset); 2444 /*NOTREACHED*/ 2445 } 2446 2447 if (rw == S_WRITE) { 2448 hat_setrefmod(pp); 2449 if (seg->s_as->a_vbits) 2450 hat_setstat(seg->s_as, adr, PAGESIZE, 2451 P_REF | P_MOD); 2452 } else if (rw != S_OTHER) { 2453 hat_setref(pp); 2454 if (seg->s_as->a_vbits) 2455 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF); 2456 } 2457 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2458 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset); 2459 if (svd->vp == NULL) { 2460 segvn_pp_unlock_anonpages(pp, adr == addr); 2461 } 2462 page_unlock(pp); 2463 } 2464 mutex_enter(&freemem_lock); /* for availrmem */ 2465 if (svd->vp != NULL) { 2466 availrmem += btop(len); 2467 } 2468 segvn_pages_locked -= btop(len); 2469 svd->softlockcnt -= btop(len); 2470 mutex_exit(&freemem_lock); 2471 if (svd->softlockcnt == 0) { 2472 /* 2473 * All SOFTLOCKS are gone. Wakeup any waiting 2474 * unmappers so they can try again to unmap. 2475 * Check for waiters first without the mutex 2476 * held so we don't always grab the mutex on 2477 * softunlocks. 2478 */ 2479 if (AS_ISUNMAPWAIT(seg->s_as)) { 2480 mutex_enter(&seg->s_as->a_contents); 2481 if (AS_ISUNMAPWAIT(seg->s_as)) { 2482 AS_CLRUNMAPWAIT(seg->s_as); 2483 cv_broadcast(&seg->s_as->a_cv); 2484 } 2485 mutex_exit(&seg->s_as->a_contents); 2486 } 2487 } 2488 } 2489 2490 #define PAGE_HANDLED ((page_t *)-1) 2491 2492 /* 2493 * Release all the pages in the NULL terminated ppp list 2494 * which haven't already been converted to PAGE_HANDLED. 2495 */ 2496 static void 2497 segvn_pagelist_rele(page_t **ppp) 2498 { 2499 for (; *ppp != NULL; ppp++) { 2500 if (*ppp != PAGE_HANDLED) 2501 page_unlock(*ppp); 2502 } 2503 } 2504 2505 static int stealcow = 1; 2506 2507 /* 2508 * Workaround for viking chip bug. See bug id 1220902. 2509 * To fix this down in pagefault() would require importing so 2510 * much as and segvn code as to be unmaintainable. 2511 */ 2512 int enable_mbit_wa = 0; 2513 2514 /* 2515 * Handles all the dirty work of getting the right 2516 * anonymous pages and loading up the translations. 2517 * This routine is called only from segvn_fault() 2518 * when looping over the range of addresses requested. 2519 * 2520 * The basic algorithm here is: 2521 * If this is an anon_zero case 2522 * Call anon_zero to allocate page 2523 * Load up translation 2524 * Return 2525 * endif 2526 * If this is an anon page 2527 * Use anon_getpage to get the page 2528 * else 2529 * Find page in pl[] list passed in 2530 * endif 2531 * If not a cow 2532 * Load up the translation to the page 2533 * return 2534 * endif 2535 * Call anon_private to handle cow 2536 * Load up (writable) translation to new page 2537 */ 2538 static faultcode_t 2539 segvn_faultpage( 2540 struct hat *hat, /* the hat to use for mapping */ 2541 struct seg *seg, /* seg_vn of interest */ 2542 caddr_t addr, /* address in as */ 2543 u_offset_t off, /* offset in vp */ 2544 struct vpage *vpage, /* pointer to vpage for vp, off */ 2545 page_t *pl[], /* object source page pointer */ 2546 uint_t vpprot, /* access allowed to object pages */ 2547 enum fault_type type, /* type of fault */ 2548 enum seg_rw rw, /* type of access at fault */ 2549 int brkcow, /* we may need to break cow */ 2550 int first) /* first page for this fault if 1 */ 2551 { 2552 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2553 page_t *pp, **ppp; 2554 uint_t pageflags = 0; 2555 page_t *anon_pl[1 + 1]; 2556 page_t *opp = NULL; /* original page */ 2557 uint_t prot; 2558 int err; 2559 int cow; 2560 int claim; 2561 int steal = 0; 2562 ulong_t anon_index; 2563 struct anon *ap, *oldap; 2564 struct anon_map *amp; 2565 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 2566 int anon_lock = 0; 2567 anon_sync_obj_t cookie; 2568 2569 if (svd->flags & MAP_TEXT) { 2570 hat_flag |= HAT_LOAD_TEXT; 2571 } 2572 2573 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 2574 ASSERT(seg->s_szc == 0); 2575 ASSERT(svd->tr_state != SEGVN_TR_INIT); 2576 2577 /* 2578 * Initialize protection value for this page. 2579 * If we have per page protection values check it now. 2580 */ 2581 if (svd->pageprot) { 2582 uint_t protchk; 2583 2584 switch (rw) { 2585 case S_READ: 2586 protchk = PROT_READ; 2587 break; 2588 case S_WRITE: 2589 protchk = PROT_WRITE; 2590 break; 2591 case S_EXEC: 2592 protchk = PROT_EXEC; 2593 break; 2594 case S_OTHER: 2595 default: 2596 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 2597 break; 2598 } 2599 2600 prot = VPP_PROT(vpage); 2601 if ((prot & protchk) == 0) 2602 return (FC_PROT); /* illegal access type */ 2603 } else { 2604 prot = svd->prot; 2605 } 2606 2607 if (type == F_SOFTLOCK && svd->vp != NULL) { 2608 mutex_enter(&freemem_lock); 2609 if (availrmem <= tune.t_minarmem) { 2610 mutex_exit(&freemem_lock); 2611 return (FC_MAKE_ERR(ENOMEM)); /* out of real memory */ 2612 } else { 2613 availrmem--; 2614 svd->softlockcnt++; 2615 segvn_pages_locked++; 2616 } 2617 mutex_exit(&freemem_lock); 2618 } 2619 2620 /* 2621 * Always acquire the anon array lock to prevent 2 threads from 2622 * allocating separate anon slots for the same "addr". 2623 */ 2624 2625 if ((amp = svd->amp) != NULL) { 2626 ASSERT(RW_READ_HELD(&->a_rwlock)); 2627 anon_index = svd->anon_index + seg_page(seg, addr); 2628 anon_array_enter(amp, anon_index, &cookie); 2629 anon_lock = 1; 2630 } 2631 2632 if (svd->vp == NULL && amp != NULL) { 2633 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) { 2634 /* 2635 * Allocate a (normally) writable anonymous page of 2636 * zeroes. If no advance reservations, reserve now. 2637 */ 2638 if (svd->flags & MAP_NORESERVE) { 2639 if (anon_resv_zone(ptob(1), 2640 seg->s_as->a_proc->p_zone)) { 2641 atomic_add_long(&svd->swresv, ptob(1)); 2642 } else { 2643 err = ENOMEM; 2644 goto out; 2645 } 2646 } 2647 if ((pp = anon_zero(seg, addr, &ap, 2648 svd->cred)) == NULL) { 2649 err = ENOMEM; 2650 goto out; /* out of swap space */ 2651 } 2652 /* 2653 * Re-acquire the anon_map lock and 2654 * initialize the anon array entry. 2655 */ 2656 (void) anon_set_ptr(amp->ahp, anon_index, ap, 2657 ANON_SLEEP); 2658 2659 ASSERT(pp->p_szc == 0); 2660 2661 /* 2662 * Handle pages that have been marked for migration 2663 */ 2664 if (lgrp_optimizations()) 2665 page_migrate(seg, addr, &pp, 1); 2666 2667 if (type == F_SOFTLOCK) { 2668 if (!segvn_pp_lock_anonpages(pp, first)) { 2669 page_unlock(pp); 2670 err = ENOMEM; 2671 goto out; 2672 } else { 2673 mutex_enter(&freemem_lock); 2674 svd->softlockcnt++; 2675 segvn_pages_locked++; 2676 mutex_exit(&freemem_lock); 2677 } 2678 } 2679 2680 if (enable_mbit_wa) { 2681 if (rw == S_WRITE) 2682 hat_setmod(pp); 2683 else if (!hat_ismod(pp)) 2684 prot &= ~PROT_WRITE; 2685 } 2686 /* 2687 * If AS_PAGLCK is set in a_flags (via memcntl(2) 2688 * with MC_LOCKAS, MCL_FUTURE) and this is a 2689 * MAP_NORESERVE segment, we may need to 2690 * permanently lock the page as it is being faulted 2691 * for the first time. The following text applies 2692 * only to MAP_NORESERVE segments: 2693 * 2694 * As per memcntl(2), if this segment was created 2695 * after MCL_FUTURE was applied (a "future" 2696 * segment), its pages must be locked. If this 2697 * segment existed at MCL_FUTURE application (a 2698 * "past" segment), the interface is unclear. 2699 * 2700 * We decide to lock only if vpage is present: 2701 * 2702 * - "future" segments will have a vpage array (see 2703 * as_map), and so will be locked as required 2704 * 2705 * - "past" segments may not have a vpage array, 2706 * depending on whether events (such as 2707 * mprotect) have occurred. Locking if vpage 2708 * exists will preserve legacy behavior. Not 2709 * locking if vpage is absent, will not break 2710 * the interface or legacy behavior. Note that 2711 * allocating vpage here if it's absent requires 2712 * upgrading the segvn reader lock, the cost of 2713 * which does not seem worthwhile. 2714 * 2715 * Usually testing and setting VPP_ISPPLOCK and 2716 * VPP_SETPPLOCK requires holding the segvn lock as 2717 * writer, but in this case all readers are 2718 * serializing on the anon array lock. 2719 */ 2720 if (AS_ISPGLCK(seg->s_as) && vpage != NULL && 2721 (svd->flags & MAP_NORESERVE) && 2722 !VPP_ISPPLOCK(vpage)) { 2723 proc_t *p = seg->s_as->a_proc; 2724 ASSERT(svd->type == MAP_PRIVATE); 2725 mutex_enter(&p->p_lock); 2726 if (rctl_incr_locked_mem(p, NULL, PAGESIZE, 2727 1) == 0) { 2728 claim = VPP_PROT(vpage) & PROT_WRITE; 2729 if (page_pp_lock(pp, claim, 0)) { 2730 VPP_SETPPLOCK(vpage); 2731 } else { 2732 rctl_decr_locked_mem(p, NULL, 2733 PAGESIZE, 1); 2734 } 2735 } 2736 mutex_exit(&p->p_lock); 2737 } 2738 2739 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2740 hat_memload(hat, addr, pp, prot, hat_flag); 2741 2742 if (!(hat_flag & HAT_LOAD_LOCK)) 2743 page_unlock(pp); 2744 2745 anon_array_exit(&cookie); 2746 return (0); 2747 } 2748 } 2749 2750 /* 2751 * Obtain the page structure via anon_getpage() if it is 2752 * a private copy of an object (the result of a previous 2753 * copy-on-write). 2754 */ 2755 if (amp != NULL) { 2756 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) { 2757 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE, 2758 seg, addr, rw, svd->cred); 2759 if (err) 2760 goto out; 2761 2762 if (svd->type == MAP_SHARED) { 2763 /* 2764 * If this is a shared mapping to an 2765 * anon_map, then ignore the write 2766 * permissions returned by anon_getpage(). 2767 * They apply to the private mappings 2768 * of this anon_map. 2769 */ 2770 vpprot |= PROT_WRITE; 2771 } 2772 opp = anon_pl[0]; 2773 } 2774 } 2775 2776 /* 2777 * Search the pl[] list passed in if it is from the 2778 * original object (i.e., not a private copy). 2779 */ 2780 if (opp == NULL) { 2781 /* 2782 * Find original page. We must be bringing it in 2783 * from the list in pl[]. 2784 */ 2785 for (ppp = pl; (opp = *ppp) != NULL; ppp++) { 2786 if (opp == PAGE_HANDLED) 2787 continue; 2788 ASSERT(opp->p_vnode == svd->vp); /* XXX */ 2789 if (opp->p_offset == off) 2790 break; 2791 } 2792 if (opp == NULL) { 2793 panic("segvn_faultpage not found"); 2794 /*NOTREACHED*/ 2795 } 2796 *ppp = PAGE_HANDLED; 2797 2798 } 2799 2800 ASSERT(PAGE_LOCKED(opp)); 2801 2802 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2803 "segvn_fault:pp %p vp %p offset %llx", 2804 opp, NULL, 0); 2805 2806 /* 2807 * The fault is treated as a copy-on-write fault if a 2808 * write occurs on a private segment and the object 2809 * page (i.e., mapping) is write protected. We assume 2810 * that fatal protection checks have already been made. 2811 */ 2812 2813 if (brkcow) { 2814 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2815 cow = !(vpprot & PROT_WRITE); 2816 } else if (svd->tr_state == SEGVN_TR_ON) { 2817 /* 2818 * If we are doing text replication COW on first touch. 2819 */ 2820 ASSERT(amp != NULL); 2821 ASSERT(svd->vp != NULL); 2822 ASSERT(rw != S_WRITE); 2823 cow = (ap == NULL); 2824 } else { 2825 cow = 0; 2826 } 2827 2828 /* 2829 * If not a copy-on-write case load the translation 2830 * and return. 2831 */ 2832 if (cow == 0) { 2833 2834 /* 2835 * Handle pages that have been marked for migration 2836 */ 2837 if (lgrp_optimizations()) 2838 page_migrate(seg, addr, &opp, 1); 2839 2840 if (type == F_SOFTLOCK && svd->vp == NULL) { 2841 2842 ASSERT(opp->p_szc == 0 || 2843 (svd->type == MAP_SHARED && 2844 amp != NULL && amp->a_szc != 0)); 2845 2846 if (!segvn_pp_lock_anonpages(opp, first)) { 2847 page_unlock(opp); 2848 err = ENOMEM; 2849 goto out; 2850 } else { 2851 mutex_enter(&freemem_lock); 2852 svd->softlockcnt++; 2853 segvn_pages_locked++; 2854 mutex_exit(&freemem_lock); 2855 } 2856 } 2857 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) { 2858 if (rw == S_WRITE) 2859 hat_setmod(opp); 2860 else if (rw != S_OTHER && !hat_ismod(opp)) 2861 prot &= ~PROT_WRITE; 2862 } 2863 2864 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 2865 (!svd->pageprot && svd->prot == (prot & vpprot))); 2866 ASSERT(amp == NULL || 2867 svd->rcookie == HAT_INVALID_REGION_COOKIE); 2868 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag, 2869 svd->rcookie); 2870 2871 if (!(hat_flag & HAT_LOAD_LOCK)) 2872 page_unlock(opp); 2873 2874 if (anon_lock) { 2875 anon_array_exit(&cookie); 2876 } 2877 return (0); 2878 } 2879 2880 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2881 2882 hat_setref(opp); 2883 2884 ASSERT(amp != NULL && anon_lock); 2885 2886 /* 2887 * Steal the page only if it isn't a private page 2888 * since stealing a private page is not worth the effort. 2889 */ 2890 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) 2891 steal = 1; 2892 2893 /* 2894 * Steal the original page if the following conditions are true: 2895 * 2896 * We are low on memory, the page is not private, page is not large, 2897 * not shared, not modified, not `locked' or if we have it `locked' 2898 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies 2899 * that the page is not shared) and if it doesn't have any 2900 * translations. page_struct_lock isn't needed to look at p_cowcnt 2901 * and p_lckcnt because we first get exclusive lock on page. 2902 */ 2903 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD); 2904 2905 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 && 2906 page_tryupgrade(opp) && !hat_ismod(opp) && 2907 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) || 2908 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 && 2909 vpage != NULL && VPP_ISPPLOCK(vpage)))) { 2910 /* 2911 * Check if this page has other translations 2912 * after unloading our translation. 2913 */ 2914 if (hat_page_is_mapped(opp)) { 2915 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2916 hat_unload(seg->s_as->a_hat, addr, PAGESIZE, 2917 HAT_UNLOAD); 2918 } 2919 2920 /* 2921 * hat_unload() might sync back someone else's recent 2922 * modification, so check again. 2923 */ 2924 if (!hat_ismod(opp) && !hat_page_is_mapped(opp)) 2925 pageflags |= STEAL_PAGE; 2926 } 2927 2928 /* 2929 * If we have a vpage pointer, see if it indicates that we have 2930 * ``locked'' the page we map -- if so, tell anon_private to 2931 * transfer the locking resource to the new page. 2932 * 2933 * See Statement at the beginning of segvn_lockop regarding 2934 * the way lockcnts/cowcnts are handled during COW. 2935 * 2936 */ 2937 if (vpage != NULL && VPP_ISPPLOCK(vpage)) 2938 pageflags |= LOCK_PAGE; 2939 2940 /* 2941 * Allocate a private page and perform the copy. 2942 * For MAP_NORESERVE reserve swap space now, unless this 2943 * is a cow fault on an existing anon page in which case 2944 * MAP_NORESERVE will have made advance reservations. 2945 */ 2946 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) { 2947 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) { 2948 atomic_add_long(&svd->swresv, ptob(1)); 2949 } else { 2950 page_unlock(opp); 2951 err = ENOMEM; 2952 goto out; 2953 } 2954 } 2955 oldap = ap; 2956 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred); 2957 if (pp == NULL) { 2958 err = ENOMEM; /* out of swap space */ 2959 goto out; 2960 } 2961 2962 /* 2963 * If we copied away from an anonymous page, then 2964 * we are one step closer to freeing up an anon slot. 2965 * 2966 * NOTE: The original anon slot must be released while 2967 * holding the "anon_map" lock. This is necessary to prevent 2968 * other threads from obtaining a pointer to the anon slot 2969 * which may be freed if its "refcnt" is 1. 2970 */ 2971 if (oldap != NULL) 2972 anon_decref(oldap); 2973 2974 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 2975 2976 /* 2977 * Handle pages that have been marked for migration 2978 */ 2979 if (lgrp_optimizations()) 2980 page_migrate(seg, addr, &pp, 1); 2981 2982 ASSERT(pp->p_szc == 0); 2983 if (type == F_SOFTLOCK && svd->vp == NULL) { 2984 if (!segvn_pp_lock_anonpages(pp, first)) { 2985 page_unlock(pp); 2986 err = ENOMEM; 2987 goto out; 2988 } else { 2989 mutex_enter(&freemem_lock); 2990 svd->softlockcnt++; 2991 segvn_pages_locked++; 2992 mutex_exit(&freemem_lock); 2993 } 2994 } 2995 2996 ASSERT(!IS_VMODSORT(pp->p_vnode)); 2997 if (enable_mbit_wa) { 2998 if (rw == S_WRITE) 2999 hat_setmod(pp); 3000 else if (!hat_ismod(pp)) 3001 prot &= ~PROT_WRITE; 3002 } 3003 3004 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 3005 hat_memload(hat, addr, pp, prot, hat_flag); 3006 3007 if (!(hat_flag & HAT_LOAD_LOCK)) 3008 page_unlock(pp); 3009 3010 ASSERT(anon_lock); 3011 anon_array_exit(&cookie); 3012 return (0); 3013 out: 3014 if (anon_lock) 3015 anon_array_exit(&cookie); 3016 3017 if (type == F_SOFTLOCK && svd->vp != NULL) { 3018 mutex_enter(&freemem_lock); 3019 availrmem++; 3020 segvn_pages_locked--; 3021 svd->softlockcnt--; 3022 mutex_exit(&freemem_lock); 3023 } 3024 return (FC_MAKE_ERR(err)); 3025 } 3026 3027 /* 3028 * relocate a bunch of smaller targ pages into one large repl page. all targ 3029 * pages must be complete pages smaller than replacement pages. 3030 * it's assumed that no page's szc can change since they are all PAGESIZE or 3031 * complete large pages locked SHARED. 3032 */ 3033 static void 3034 segvn_relocate_pages(page_t **targ, page_t *replacement) 3035 { 3036 page_t *pp; 3037 pgcnt_t repl_npgs, curnpgs; 3038 pgcnt_t i; 3039 uint_t repl_szc = replacement->p_szc; 3040 page_t *first_repl = replacement; 3041 page_t *repl; 3042 spgcnt_t npgs; 3043 3044 VM_STAT_ADD(segvnvmstats.relocatepages[0]); 3045 3046 ASSERT(repl_szc != 0); 3047 npgs = repl_npgs = page_get_pagecnt(repl_szc); 3048 3049 i = 0; 3050 while (repl_npgs) { 3051 spgcnt_t nreloc; 3052 int err; 3053 ASSERT(replacement != NULL); 3054 pp = targ[i]; 3055 ASSERT(pp->p_szc < repl_szc); 3056 ASSERT(PAGE_EXCL(pp)); 3057 ASSERT(!PP_ISFREE(pp)); 3058 curnpgs = page_get_pagecnt(pp->p_szc); 3059 if (curnpgs == 1) { 3060 VM_STAT_ADD(segvnvmstats.relocatepages[1]); 3061 repl = replacement; 3062 page_sub(&replacement, repl); 3063 ASSERT(PAGE_EXCL(repl)); 3064 ASSERT(!PP_ISFREE(repl)); 3065 ASSERT(repl->p_szc == repl_szc); 3066 } else { 3067 page_t *repl_savepp; 3068 int j; 3069 VM_STAT_ADD(segvnvmstats.relocatepages[2]); 3070 repl_savepp = replacement; 3071 for (j = 0; j < curnpgs; j++) { 3072 repl = replacement; 3073 page_sub(&replacement, repl); 3074 ASSERT(PAGE_EXCL(repl)); 3075 ASSERT(!PP_ISFREE(repl)); 3076 ASSERT(repl->p_szc == repl_szc); 3077 ASSERT(page_pptonum(targ[i + j]) == 3078 page_pptonum(targ[i]) + j); 3079 } 3080 repl = repl_savepp; 3081 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs)); 3082 } 3083 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL); 3084 if (err || nreloc != curnpgs) { 3085 panic("segvn_relocate_pages: " 3086 "page_relocate failed err=%d curnpgs=%ld " 3087 "nreloc=%ld", err, curnpgs, nreloc); 3088 } 3089 ASSERT(curnpgs <= repl_npgs); 3090 repl_npgs -= curnpgs; 3091 i += curnpgs; 3092 } 3093 ASSERT(replacement == NULL); 3094 3095 repl = first_repl; 3096 repl_npgs = npgs; 3097 for (i = 0; i < repl_npgs; i++) { 3098 ASSERT(PAGE_EXCL(repl)); 3099 ASSERT(!PP_ISFREE(repl)); 3100 targ[i] = repl; 3101 page_downgrade(targ[i]); 3102 repl++; 3103 } 3104 } 3105 3106 /* 3107 * Check if all pages in ppa array are complete smaller than szc pages and 3108 * their roots will still be aligned relative to their current size if the 3109 * entire ppa array is relocated into one szc page. If these conditions are 3110 * not met return 0. 3111 * 3112 * If all pages are properly aligned attempt to upgrade their locks 3113 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0. 3114 * upgrdfail was set to 0 by caller. 3115 * 3116 * Return 1 if all pages are aligned and locked exclusively. 3117 * 3118 * If all pages in ppa array happen to be physically contiguous to make one 3119 * szc page and all exclusive locks are successfully obtained promote the page 3120 * size to szc and set *pszc to szc. Return 1 with pages locked shared. 3121 */ 3122 static int 3123 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc) 3124 { 3125 page_t *pp; 3126 pfn_t pfn; 3127 pgcnt_t totnpgs = page_get_pagecnt(szc); 3128 pfn_t first_pfn; 3129 int contig = 1; 3130 pgcnt_t i; 3131 pgcnt_t j; 3132 uint_t curszc; 3133 pgcnt_t curnpgs; 3134 int root = 0; 3135 3136 ASSERT(szc > 0); 3137 3138 VM_STAT_ADD(segvnvmstats.fullszcpages[0]); 3139 3140 for (i = 0; i < totnpgs; i++) { 3141 pp = ppa[i]; 3142 ASSERT(PAGE_SHARED(pp)); 3143 ASSERT(!PP_ISFREE(pp)); 3144 pfn = page_pptonum(pp); 3145 if (i == 0) { 3146 if (!IS_P2ALIGNED(pfn, totnpgs)) { 3147 contig = 0; 3148 } else { 3149 first_pfn = pfn; 3150 } 3151 } else if (contig && pfn != first_pfn + i) { 3152 contig = 0; 3153 } 3154 if (pp->p_szc == 0) { 3155 if (root) { 3156 VM_STAT_ADD(segvnvmstats.fullszcpages[1]); 3157 return (0); 3158 } 3159 } else if (!root) { 3160 if ((curszc = pp->p_szc) >= szc) { 3161 VM_STAT_ADD(segvnvmstats.fullszcpages[2]); 3162 return (0); 3163 } 3164 if (curszc == 0) { 3165 /* 3166 * p_szc changed means we don't have all pages 3167 * locked. return failure. 3168 */ 3169 VM_STAT_ADD(segvnvmstats.fullszcpages[3]); 3170 return (0); 3171 } 3172 curnpgs = page_get_pagecnt(curszc); 3173 if (!IS_P2ALIGNED(pfn, curnpgs) || 3174 !IS_P2ALIGNED(i, curnpgs)) { 3175 VM_STAT_ADD(segvnvmstats.fullszcpages[4]); 3176 return (0); 3177 } 3178 root = 1; 3179 } else { 3180 ASSERT(i > 0); 3181 VM_STAT_ADD(segvnvmstats.fullszcpages[5]); 3182 if (pp->p_szc != curszc) { 3183 VM_STAT_ADD(segvnvmstats.fullszcpages[6]); 3184 return (0); 3185 } 3186 if (pfn - 1 != page_pptonum(ppa[i - 1])) { 3187 panic("segvn_full_szcpages: " 3188 "large page not physically contiguous"); 3189 } 3190 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) { 3191 root = 0; 3192 } 3193 } 3194 } 3195 3196 for (i = 0; i < totnpgs; i++) { 3197 ASSERT(ppa[i]->p_szc < szc); 3198 if (!page_tryupgrade(ppa[i])) { 3199 for (j = 0; j < i; j++) { 3200 page_downgrade(ppa[j]); 3201 } 3202 *pszc = ppa[i]->p_szc; 3203 *upgrdfail = 1; 3204 VM_STAT_ADD(segvnvmstats.fullszcpages[7]); 3205 return (0); 3206 } 3207 } 3208 3209 /* 3210 * When a page is put a free cachelist its szc is set to 0. if file 3211 * system reclaimed pages from cachelist targ pages will be physically 3212 * contiguous with 0 p_szc. in this case just upgrade szc of targ 3213 * pages without any relocations. 3214 * To avoid any hat issues with previous small mappings 3215 * hat_pageunload() the target pages first. 3216 */ 3217 if (contig) { 3218 VM_STAT_ADD(segvnvmstats.fullszcpages[8]); 3219 for (i = 0; i < totnpgs; i++) { 3220 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD); 3221 } 3222 for (i = 0; i < totnpgs; i++) { 3223 ppa[i]->p_szc = szc; 3224 } 3225 for (i = 0; i < totnpgs; i++) { 3226 ASSERT(PAGE_EXCL(ppa[i])); 3227 page_downgrade(ppa[i]); 3228 } 3229 if (pszc != NULL) { 3230 *pszc = szc; 3231 } 3232 } 3233 VM_STAT_ADD(segvnvmstats.fullszcpages[9]); 3234 return (1); 3235 } 3236 3237 /* 3238 * Create physically contiguous pages for [vp, off] - [vp, off + 3239 * page_size(szc)) range and for private segment return them in ppa array. 3240 * Pages are created either via IO or relocations. 3241 * 3242 * Return 1 on sucess and 0 on failure. 3243 * 3244 * If physically contiguos pages already exist for this range return 1 without 3245 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa 3246 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE(). 3247 */ 3248 3249 static int 3250 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off, 3251 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc, 3252 int *downsize) 3253 3254 { 3255 page_t *pplist = *ppplist; 3256 size_t pgsz = page_get_pagesize(szc); 3257 pgcnt_t pages = btop(pgsz); 3258 ulong_t start_off = off; 3259 u_offset_t eoff = off + pgsz; 3260 spgcnt_t nreloc; 3261 u_offset_t io_off = off; 3262 size_t io_len; 3263 page_t *io_pplist = NULL; 3264 page_t *done_pplist = NULL; 3265 pgcnt_t pgidx = 0; 3266 page_t *pp; 3267 page_t *newpp; 3268 page_t *targpp; 3269 int io_err = 0; 3270 int i; 3271 pfn_t pfn; 3272 ulong_t ppages; 3273 page_t *targ_pplist = NULL; 3274 page_t *repl_pplist = NULL; 3275 page_t *tmp_pplist; 3276 int nios = 0; 3277 uint_t pszc; 3278 struct vattr va; 3279 3280 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]); 3281 3282 ASSERT(szc != 0); 3283 ASSERT(pplist->p_szc == szc); 3284 3285 /* 3286 * downsize will be set to 1 only if we fail to lock pages. this will 3287 * allow subsequent faults to try to relocate the page again. If we 3288 * fail due to misalignment don't downsize and let the caller map the 3289 * whole region with small mappings to avoid more faults into the area 3290 * where we can't get large pages anyway. 3291 */ 3292 *downsize = 0; 3293 3294 while (off < eoff) { 3295 newpp = pplist; 3296 ASSERT(newpp != NULL); 3297 ASSERT(PAGE_EXCL(newpp)); 3298 ASSERT(!PP_ISFREE(newpp)); 3299 /* 3300 * we pass NULL for nrelocp to page_lookup_create() 3301 * so that it doesn't relocate. We relocate here 3302 * later only after we make sure we can lock all 3303 * pages in the range we handle and they are all 3304 * aligned. 3305 */ 3306 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0); 3307 ASSERT(pp != NULL); 3308 ASSERT(!PP_ISFREE(pp)); 3309 ASSERT(pp->p_vnode == vp); 3310 ASSERT(pp->p_offset == off); 3311 if (pp == newpp) { 3312 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]); 3313 page_sub(&pplist, pp); 3314 ASSERT(PAGE_EXCL(pp)); 3315 ASSERT(page_iolock_assert(pp)); 3316 page_list_concat(&io_pplist, &pp); 3317 off += PAGESIZE; 3318 continue; 3319 } 3320 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]); 3321 pfn = page_pptonum(pp); 3322 pszc = pp->p_szc; 3323 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL && 3324 IS_P2ALIGNED(pfn, pages)) { 3325 ASSERT(repl_pplist == NULL); 3326 ASSERT(done_pplist == NULL); 3327 ASSERT(pplist == *ppplist); 3328 page_unlock(pp); 3329 page_free_replacement_page(pplist); 3330 page_create_putback(pages); 3331 *ppplist = NULL; 3332 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]); 3333 return (1); 3334 } 3335 if (pszc >= szc) { 3336 page_unlock(pp); 3337 segvn_faultvnmpss_align_err1++; 3338 goto out; 3339 } 3340 ppages = page_get_pagecnt(pszc); 3341 if (!IS_P2ALIGNED(pfn, ppages)) { 3342 ASSERT(pszc > 0); 3343 /* 3344 * sizing down to pszc won't help. 3345 */ 3346 page_unlock(pp); 3347 segvn_faultvnmpss_align_err2++; 3348 goto out; 3349 } 3350 pfn = page_pptonum(newpp); 3351 if (!IS_P2ALIGNED(pfn, ppages)) { 3352 ASSERT(pszc > 0); 3353 /* 3354 * sizing down to pszc won't help. 3355 */ 3356 page_unlock(pp); 3357 segvn_faultvnmpss_align_err3++; 3358 goto out; 3359 } 3360 if (!PAGE_EXCL(pp)) { 3361 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]); 3362 page_unlock(pp); 3363 *downsize = 1; 3364 *ret_pszc = pp->p_szc; 3365 goto out; 3366 } 3367 targpp = pp; 3368 if (io_pplist != NULL) { 3369 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]); 3370 io_len = off - io_off; 3371 /* 3372 * Some file systems like NFS don't check EOF 3373 * conditions in VOP_PAGEIO(). Check it here 3374 * now that pages are locked SE_EXCL. Any file 3375 * truncation will wait until the pages are 3376 * unlocked so no need to worry that file will 3377 * be truncated after we check its size here. 3378 * XXX fix NFS to remove this check. 3379 */ 3380 va.va_mask = AT_SIZE; 3381 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred) != 0) { 3382 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]); 3383 page_unlock(targpp); 3384 goto out; 3385 } 3386 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3387 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]); 3388 *downsize = 1; 3389 *ret_pszc = 0; 3390 page_unlock(targpp); 3391 goto out; 3392 } 3393 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3394 B_READ, svd->cred); 3395 if (io_err) { 3396 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]); 3397 page_unlock(targpp); 3398 if (io_err == EDEADLK) { 3399 segvn_vmpss_pageio_deadlk_err++; 3400 } 3401 goto out; 3402 } 3403 nios++; 3404 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]); 3405 while (io_pplist != NULL) { 3406 pp = io_pplist; 3407 page_sub(&io_pplist, pp); 3408 ASSERT(page_iolock_assert(pp)); 3409 page_io_unlock(pp); 3410 pgidx = (pp->p_offset - start_off) >> 3411 PAGESHIFT; 3412 ASSERT(pgidx < pages); 3413 ppa[pgidx] = pp; 3414 page_list_concat(&done_pplist, &pp); 3415 } 3416 } 3417 pp = targpp; 3418 ASSERT(PAGE_EXCL(pp)); 3419 ASSERT(pp->p_szc <= pszc); 3420 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) { 3421 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]); 3422 page_unlock(pp); 3423 *downsize = 1; 3424 *ret_pszc = pp->p_szc; 3425 goto out; 3426 } 3427 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]); 3428 /* 3429 * page szc chould have changed before the entire group was 3430 * locked. reread page szc. 3431 */ 3432 pszc = pp->p_szc; 3433 ppages = page_get_pagecnt(pszc); 3434 3435 /* link just the roots */ 3436 page_list_concat(&targ_pplist, &pp); 3437 page_sub(&pplist, newpp); 3438 page_list_concat(&repl_pplist, &newpp); 3439 off += PAGESIZE; 3440 while (--ppages != 0) { 3441 newpp = pplist; 3442 page_sub(&pplist, newpp); 3443 off += PAGESIZE; 3444 } 3445 io_off = off; 3446 } 3447 if (io_pplist != NULL) { 3448 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]); 3449 io_len = eoff - io_off; 3450 va.va_mask = AT_SIZE; 3451 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred) != 0) { 3452 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]); 3453 goto out; 3454 } 3455 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3456 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]); 3457 *downsize = 1; 3458 *ret_pszc = 0; 3459 goto out; 3460 } 3461 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3462 B_READ, svd->cred); 3463 if (io_err) { 3464 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]); 3465 if (io_err == EDEADLK) { 3466 segvn_vmpss_pageio_deadlk_err++; 3467 } 3468 goto out; 3469 } 3470 nios++; 3471 while (io_pplist != NULL) { 3472 pp = io_pplist; 3473 page_sub(&io_pplist, pp); 3474 ASSERT(page_iolock_assert(pp)); 3475 page_io_unlock(pp); 3476 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3477 ASSERT(pgidx < pages); 3478 ppa[pgidx] = pp; 3479 } 3480 } 3481 /* 3482 * we're now bound to succeed or panic. 3483 * remove pages from done_pplist. it's not needed anymore. 3484 */ 3485 while (done_pplist != NULL) { 3486 pp = done_pplist; 3487 page_sub(&done_pplist, pp); 3488 } 3489 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]); 3490 ASSERT(pplist == NULL); 3491 *ppplist = NULL; 3492 while (targ_pplist != NULL) { 3493 int ret; 3494 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]); 3495 ASSERT(repl_pplist); 3496 pp = targ_pplist; 3497 page_sub(&targ_pplist, pp); 3498 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3499 newpp = repl_pplist; 3500 page_sub(&repl_pplist, newpp); 3501 #ifdef DEBUG 3502 pfn = page_pptonum(pp); 3503 pszc = pp->p_szc; 3504 ppages = page_get_pagecnt(pszc); 3505 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3506 pfn = page_pptonum(newpp); 3507 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3508 ASSERT(P2PHASE(pfn, pages) == pgidx); 3509 #endif 3510 nreloc = 0; 3511 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL); 3512 if (ret != 0 || nreloc == 0) { 3513 panic("segvn_fill_vp_pages: " 3514 "page_relocate failed"); 3515 } 3516 pp = newpp; 3517 while (nreloc-- != 0) { 3518 ASSERT(PAGE_EXCL(pp)); 3519 ASSERT(pp->p_vnode == vp); 3520 ASSERT(pgidx == 3521 ((pp->p_offset - start_off) >> PAGESHIFT)); 3522 ppa[pgidx++] = pp; 3523 pp++; 3524 } 3525 } 3526 3527 if (svd->type == MAP_PRIVATE) { 3528 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]); 3529 for (i = 0; i < pages; i++) { 3530 ASSERT(ppa[i] != NULL); 3531 ASSERT(PAGE_EXCL(ppa[i])); 3532 ASSERT(ppa[i]->p_vnode == vp); 3533 ASSERT(ppa[i]->p_offset == 3534 start_off + (i << PAGESHIFT)); 3535 page_downgrade(ppa[i]); 3536 } 3537 ppa[pages] = NULL; 3538 } else { 3539 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]); 3540 /* 3541 * the caller will still call VOP_GETPAGE() for shared segments 3542 * to check FS write permissions. For private segments we map 3543 * file read only anyway. so no VOP_GETPAGE is needed. 3544 */ 3545 for (i = 0; i < pages; i++) { 3546 ASSERT(ppa[i] != NULL); 3547 ASSERT(PAGE_EXCL(ppa[i])); 3548 ASSERT(ppa[i]->p_vnode == vp); 3549 ASSERT(ppa[i]->p_offset == 3550 start_off + (i << PAGESHIFT)); 3551 page_unlock(ppa[i]); 3552 } 3553 ppa[0] = NULL; 3554 } 3555 3556 return (1); 3557 out: 3558 /* 3559 * Do the cleanup. Unlock target pages we didn't relocate. They are 3560 * linked on targ_pplist by root pages. reassemble unused replacement 3561 * and io pages back to pplist. 3562 */ 3563 if (io_pplist != NULL) { 3564 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]); 3565 pp = io_pplist; 3566 do { 3567 ASSERT(pp->p_vnode == vp); 3568 ASSERT(pp->p_offset == io_off); 3569 ASSERT(page_iolock_assert(pp)); 3570 page_io_unlock(pp); 3571 page_hashout(pp, NULL); 3572 io_off += PAGESIZE; 3573 } while ((pp = pp->p_next) != io_pplist); 3574 page_list_concat(&io_pplist, &pplist); 3575 pplist = io_pplist; 3576 } 3577 tmp_pplist = NULL; 3578 while (targ_pplist != NULL) { 3579 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]); 3580 pp = targ_pplist; 3581 ASSERT(PAGE_EXCL(pp)); 3582 page_sub(&targ_pplist, pp); 3583 3584 pszc = pp->p_szc; 3585 ppages = page_get_pagecnt(pszc); 3586 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3587 3588 if (pszc != 0) { 3589 group_page_unlock(pp); 3590 } 3591 page_unlock(pp); 3592 3593 pp = repl_pplist; 3594 ASSERT(pp != NULL); 3595 ASSERT(PAGE_EXCL(pp)); 3596 ASSERT(pp->p_szc == szc); 3597 page_sub(&repl_pplist, pp); 3598 3599 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3600 3601 /* relink replacement page */ 3602 page_list_concat(&tmp_pplist, &pp); 3603 while (--ppages != 0) { 3604 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]); 3605 pp++; 3606 ASSERT(PAGE_EXCL(pp)); 3607 ASSERT(pp->p_szc == szc); 3608 page_list_concat(&tmp_pplist, &pp); 3609 } 3610 } 3611 if (tmp_pplist != NULL) { 3612 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]); 3613 page_list_concat(&tmp_pplist, &pplist); 3614 pplist = tmp_pplist; 3615 } 3616 /* 3617 * at this point all pages are either on done_pplist or 3618 * pplist. They can't be all on done_pplist otherwise 3619 * we'd've been done. 3620 */ 3621 ASSERT(pplist != NULL); 3622 if (nios != 0) { 3623 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]); 3624 pp = pplist; 3625 do { 3626 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]); 3627 ASSERT(pp->p_szc == szc); 3628 ASSERT(PAGE_EXCL(pp)); 3629 ASSERT(pp->p_vnode != vp); 3630 pp->p_szc = 0; 3631 } while ((pp = pp->p_next) != pplist); 3632 3633 pp = done_pplist; 3634 do { 3635 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]); 3636 ASSERT(pp->p_szc == szc); 3637 ASSERT(PAGE_EXCL(pp)); 3638 ASSERT(pp->p_vnode == vp); 3639 pp->p_szc = 0; 3640 } while ((pp = pp->p_next) != done_pplist); 3641 3642 while (pplist != NULL) { 3643 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]); 3644 pp = pplist; 3645 page_sub(&pplist, pp); 3646 page_free(pp, 0); 3647 } 3648 3649 while (done_pplist != NULL) { 3650 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]); 3651 pp = done_pplist; 3652 page_sub(&done_pplist, pp); 3653 page_unlock(pp); 3654 } 3655 *ppplist = NULL; 3656 return (0); 3657 } 3658 ASSERT(pplist == *ppplist); 3659 if (io_err) { 3660 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]); 3661 /* 3662 * don't downsize on io error. 3663 * see if vop_getpage succeeds. 3664 * pplist may still be used in this case 3665 * for relocations. 3666 */ 3667 return (0); 3668 } 3669 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]); 3670 page_free_replacement_page(pplist); 3671 page_create_putback(pages); 3672 *ppplist = NULL; 3673 return (0); 3674 } 3675 3676 int segvn_anypgsz = 0; 3677 3678 #define SEGVN_RESTORE_SOFTLOCK(type, pages) \ 3679 if ((type) == F_SOFTLOCK) { \ 3680 mutex_enter(&freemem_lock); \ 3681 availrmem += (pages); \ 3682 segvn_pages_locked -= (pages); \ 3683 svd->softlockcnt -= (pages); \ 3684 mutex_exit(&freemem_lock); \ 3685 } 3686 3687 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \ 3688 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \ 3689 if ((rw) == S_WRITE) { \ 3690 for (i = 0; i < (pages); i++) { \ 3691 ASSERT((ppa)[i]->p_vnode == \ 3692 (ppa)[0]->p_vnode); \ 3693 hat_setmod((ppa)[i]); \ 3694 } \ 3695 } else if ((rw) != S_OTHER && \ 3696 ((prot) & (vpprot) & PROT_WRITE)) { \ 3697 for (i = 0; i < (pages); i++) { \ 3698 ASSERT((ppa)[i]->p_vnode == \ 3699 (ppa)[0]->p_vnode); \ 3700 if (!hat_ismod((ppa)[i])) { \ 3701 prot &= ~PROT_WRITE; \ 3702 break; \ 3703 } \ 3704 } \ 3705 } \ 3706 } 3707 3708 #ifdef VM_STATS 3709 3710 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \ 3711 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]); 3712 3713 #else /* VM_STATS */ 3714 3715 #define SEGVN_VMSTAT_FLTVNPAGES(idx) 3716 3717 #endif 3718 3719 static faultcode_t 3720 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 3721 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 3722 caddr_t eaddr, int brkcow) 3723 { 3724 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 3725 struct anon_map *amp = svd->amp; 3726 uchar_t segtype = svd->type; 3727 uint_t szc = seg->s_szc; 3728 size_t pgsz = page_get_pagesize(szc); 3729 size_t maxpgsz = pgsz; 3730 pgcnt_t pages = btop(pgsz); 3731 pgcnt_t maxpages = pages; 3732 size_t ppasize = (pages + 1) * sizeof (page_t *); 3733 caddr_t a = lpgaddr; 3734 caddr_t maxlpgeaddr = lpgeaddr; 3735 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base); 3736 ulong_t aindx = svd->anon_index + seg_page(seg, a); 3737 struct vpage *vpage = (svd->vpage != NULL) ? 3738 &svd->vpage[seg_page(seg, a)] : NULL; 3739 vnode_t *vp = svd->vp; 3740 page_t **ppa; 3741 uint_t pszc; 3742 size_t ppgsz; 3743 pgcnt_t ppages; 3744 faultcode_t err = 0; 3745 int ierr; 3746 int vop_size_err = 0; 3747 uint_t protchk, prot, vpprot; 3748 ulong_t i; 3749 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 3750 anon_sync_obj_t an_cookie; 3751 enum seg_rw arw; 3752 int alloc_failed = 0; 3753 int adjszc_chk; 3754 struct vattr va; 3755 int xhat = 0; 3756 page_t *pplist; 3757 pfn_t pfn; 3758 int physcontig; 3759 int upgrdfail; 3760 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */ 3761 int tron = (svd->tr_state == SEGVN_TR_ON); 3762 3763 ASSERT(szc != 0); 3764 ASSERT(vp != NULL); 3765 ASSERT(brkcow == 0 || amp != NULL); 3766 ASSERT(tron == 0 || amp != NULL); 3767 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 3768 ASSERT(!(svd->flags & MAP_NORESERVE)); 3769 ASSERT(type != F_SOFTUNLOCK); 3770 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 3771 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages)); 3772 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 3773 ASSERT(seg->s_szc < NBBY * sizeof (int)); 3774 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz); 3775 ASSERT(svd->tr_state != SEGVN_TR_INIT); 3776 3777 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]); 3778 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]); 3779 3780 if (svd->flags & MAP_TEXT) { 3781 hat_flag |= HAT_LOAD_TEXT; 3782 } 3783 3784 if (svd->pageprot) { 3785 switch (rw) { 3786 case S_READ: 3787 protchk = PROT_READ; 3788 break; 3789 case S_WRITE: 3790 protchk = PROT_WRITE; 3791 break; 3792 case S_EXEC: 3793 protchk = PROT_EXEC; 3794 break; 3795 case S_OTHER: 3796 default: 3797 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 3798 break; 3799 } 3800 } else { 3801 prot = svd->prot; 3802 /* caller has already done segment level protection check. */ 3803 } 3804 3805 if (seg->s_as->a_hat != hat) { 3806 xhat = 1; 3807 } 3808 3809 if (rw == S_WRITE && segtype == MAP_PRIVATE) { 3810 SEGVN_VMSTAT_FLTVNPAGES(2); 3811 arw = S_READ; 3812 } else { 3813 arw = rw; 3814 } 3815 3816 ppa = kmem_alloc(ppasize, KM_SLEEP); 3817 3818 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]); 3819 3820 for (;;) { 3821 adjszc_chk = 0; 3822 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) { 3823 if (adjszc_chk) { 3824 while (szc < seg->s_szc) { 3825 uintptr_t e; 3826 uint_t tszc; 3827 tszc = segvn_anypgsz_vnode ? szc + 1 : 3828 seg->s_szc; 3829 ppgsz = page_get_pagesize(tszc); 3830 if (!IS_P2ALIGNED(a, ppgsz) || 3831 ((alloc_failed >> tszc) & 3832 0x1)) { 3833 break; 3834 } 3835 SEGVN_VMSTAT_FLTVNPAGES(4); 3836 szc = tszc; 3837 pgsz = ppgsz; 3838 pages = btop(pgsz); 3839 e = P2ROUNDUP((uintptr_t)eaddr, pgsz); 3840 lpgeaddr = (caddr_t)e; 3841 } 3842 } 3843 3844 again: 3845 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) { 3846 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 3847 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 3848 anon_array_enter(amp, aindx, &an_cookie); 3849 if (anon_get_ptr(amp->ahp, aindx) != NULL) { 3850 SEGVN_VMSTAT_FLTVNPAGES(5); 3851 ASSERT(anon_pages(amp->ahp, aindx, 3852 maxpages) == maxpages); 3853 anon_array_exit(&an_cookie); 3854 ANON_LOCK_EXIT(&->a_rwlock); 3855 err = segvn_fault_anonpages(hat, seg, 3856 a, a + maxpgsz, type, rw, 3857 MAX(a, addr), 3858 MIN(a + maxpgsz, eaddr), brkcow); 3859 if (err != 0) { 3860 SEGVN_VMSTAT_FLTVNPAGES(6); 3861 goto out; 3862 } 3863 if (szc < seg->s_szc) { 3864 szc = seg->s_szc; 3865 pgsz = maxpgsz; 3866 pages = maxpages; 3867 lpgeaddr = maxlpgeaddr; 3868 } 3869 goto next; 3870 } else { 3871 ASSERT(anon_pages(amp->ahp, aindx, 3872 maxpages) == 0); 3873 SEGVN_VMSTAT_FLTVNPAGES(7); 3874 anon_array_exit(&an_cookie); 3875 ANON_LOCK_EXIT(&->a_rwlock); 3876 } 3877 } 3878 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz)); 3879 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz)); 3880 3881 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 3882 ASSERT(vpage != NULL); 3883 prot = VPP_PROT(vpage); 3884 ASSERT(sameprot(seg, a, maxpgsz)); 3885 if ((prot & protchk) == 0) { 3886 SEGVN_VMSTAT_FLTVNPAGES(8); 3887 err = FC_PROT; 3888 goto out; 3889 } 3890 } 3891 if (type == F_SOFTLOCK) { 3892 mutex_enter(&freemem_lock); 3893 if (availrmem < tune.t_minarmem + pages) { 3894 mutex_exit(&freemem_lock); 3895 err = FC_MAKE_ERR(ENOMEM); 3896 goto out; 3897 } else { 3898 availrmem -= pages; 3899 segvn_pages_locked += pages; 3900 svd->softlockcnt += pages; 3901 } 3902 mutex_exit(&freemem_lock); 3903 } 3904 3905 pplist = NULL; 3906 physcontig = 0; 3907 ppa[0] = NULL; 3908 if (!brkcow && !tron && szc && 3909 !page_exists_physcontig(vp, off, szc, 3910 segtype == MAP_PRIVATE ? ppa : NULL)) { 3911 SEGVN_VMSTAT_FLTVNPAGES(9); 3912 if (page_alloc_pages(vp, seg, a, &pplist, NULL, 3913 szc, 0, 0) && type != F_SOFTLOCK) { 3914 SEGVN_VMSTAT_FLTVNPAGES(10); 3915 pszc = 0; 3916 ierr = -1; 3917 alloc_failed |= (1 << szc); 3918 break; 3919 } 3920 if (pplist != NULL && 3921 vp->v_mpssdata == SEGVN_PAGEIO) { 3922 int downsize; 3923 SEGVN_VMSTAT_FLTVNPAGES(11); 3924 physcontig = segvn_fill_vp_pages(svd, 3925 vp, off, szc, ppa, &pplist, 3926 &pszc, &downsize); 3927 ASSERT(!physcontig || pplist == NULL); 3928 if (!physcontig && downsize && 3929 type != F_SOFTLOCK) { 3930 ASSERT(pplist == NULL); 3931 SEGVN_VMSTAT_FLTVNPAGES(12); 3932 ierr = -1; 3933 break; 3934 } 3935 ASSERT(!physcontig || 3936 segtype == MAP_PRIVATE || 3937 ppa[0] == NULL); 3938 if (physcontig && ppa[0] == NULL) { 3939 physcontig = 0; 3940 } 3941 } 3942 } else if (!brkcow && !tron && szc && ppa[0] != NULL) { 3943 SEGVN_VMSTAT_FLTVNPAGES(13); 3944 ASSERT(segtype == MAP_PRIVATE); 3945 physcontig = 1; 3946 } 3947 3948 if (!physcontig) { 3949 SEGVN_VMSTAT_FLTVNPAGES(14); 3950 ppa[0] = NULL; 3951 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz, 3952 &vpprot, ppa, pgsz, seg, a, arw, 3953 svd->cred); 3954 #ifdef DEBUG 3955 if (ierr == 0) { 3956 for (i = 0; i < pages; i++) { 3957 ASSERT(PAGE_LOCKED(ppa[i])); 3958 ASSERT(!PP_ISFREE(ppa[i])); 3959 ASSERT(ppa[i]->p_vnode == vp); 3960 ASSERT(ppa[i]->p_offset == 3961 off + (i << PAGESHIFT)); 3962 } 3963 } 3964 #endif /* DEBUG */ 3965 if (segtype == MAP_PRIVATE) { 3966 SEGVN_VMSTAT_FLTVNPAGES(15); 3967 vpprot &= ~PROT_WRITE; 3968 } 3969 } else { 3970 ASSERT(segtype == MAP_PRIVATE); 3971 SEGVN_VMSTAT_FLTVNPAGES(16); 3972 vpprot = PROT_ALL & ~PROT_WRITE; 3973 ierr = 0; 3974 } 3975 3976 if (ierr != 0) { 3977 SEGVN_VMSTAT_FLTVNPAGES(17); 3978 if (pplist != NULL) { 3979 SEGVN_VMSTAT_FLTVNPAGES(18); 3980 page_free_replacement_page(pplist); 3981 page_create_putback(pages); 3982 } 3983 SEGVN_RESTORE_SOFTLOCK(type, pages); 3984 if (a + pgsz <= eaddr) { 3985 SEGVN_VMSTAT_FLTVNPAGES(19); 3986 err = FC_MAKE_ERR(ierr); 3987 goto out; 3988 } 3989 va.va_mask = AT_SIZE; 3990 if (VOP_GETATTR(vp, &va, 0, svd->cred) != 0) { 3991 SEGVN_VMSTAT_FLTVNPAGES(20); 3992 err = FC_MAKE_ERR(EIO); 3993 goto out; 3994 } 3995 if (btopr(va.va_size) >= btopr(off + pgsz)) { 3996 SEGVN_VMSTAT_FLTVNPAGES(21); 3997 err = FC_MAKE_ERR(ierr); 3998 goto out; 3999 } 4000 if (btopr(va.va_size) < 4001 btopr(off + (eaddr - a))) { 4002 SEGVN_VMSTAT_FLTVNPAGES(22); 4003 err = FC_MAKE_ERR(ierr); 4004 goto out; 4005 } 4006 if (brkcow || tron || type == F_SOFTLOCK) { 4007 /* can't reduce map area */ 4008 SEGVN_VMSTAT_FLTVNPAGES(23); 4009 vop_size_err = 1; 4010 goto out; 4011 } 4012 SEGVN_VMSTAT_FLTVNPAGES(24); 4013 ASSERT(szc != 0); 4014 pszc = 0; 4015 ierr = -1; 4016 break; 4017 } 4018 4019 if (amp != NULL) { 4020 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4021 anon_array_enter(amp, aindx, &an_cookie); 4022 } 4023 if (amp != NULL && 4024 anon_get_ptr(amp->ahp, aindx) != NULL) { 4025 ulong_t taindx = P2ALIGN(aindx, maxpages); 4026 4027 SEGVN_VMSTAT_FLTVNPAGES(25); 4028 ASSERT(anon_pages(amp->ahp, taindx, 4029 maxpages) == maxpages); 4030 for (i = 0; i < pages; i++) { 4031 page_unlock(ppa[i]); 4032 } 4033 anon_array_exit(&an_cookie); 4034 ANON_LOCK_EXIT(&->a_rwlock); 4035 if (pplist != NULL) { 4036 page_free_replacement_page(pplist); 4037 page_create_putback(pages); 4038 } 4039 SEGVN_RESTORE_SOFTLOCK(type, pages); 4040 if (szc < seg->s_szc) { 4041 SEGVN_VMSTAT_FLTVNPAGES(26); 4042 /* 4043 * For private segments SOFTLOCK 4044 * either always breaks cow (any rw 4045 * type except S_READ_NOCOW) or 4046 * address space is locked as writer 4047 * (S_READ_NOCOW case) and anon slots 4048 * can't show up on second check. 4049 * Therefore if we are here for 4050 * SOFTLOCK case it must be a cow 4051 * break but cow break never reduces 4052 * szc. text replication (tron) in 4053 * this case works as cow break. 4054 * Thus the assert below. 4055 */ 4056 ASSERT(!brkcow && !tron && 4057 type != F_SOFTLOCK); 4058 pszc = seg->s_szc; 4059 ierr = -2; 4060 break; 4061 } 4062 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4063 goto again; 4064 } 4065 #ifdef DEBUG 4066 if (amp != NULL) { 4067 ulong_t taindx = P2ALIGN(aindx, maxpages); 4068 ASSERT(!anon_pages(amp->ahp, taindx, maxpages)); 4069 } 4070 #endif /* DEBUG */ 4071 4072 if (brkcow || tron) { 4073 ASSERT(amp != NULL); 4074 ASSERT(pplist == NULL); 4075 ASSERT(szc == seg->s_szc); 4076 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4077 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 4078 SEGVN_VMSTAT_FLTVNPAGES(27); 4079 ierr = anon_map_privatepages(amp, aindx, szc, 4080 seg, a, prot, ppa, vpage, segvn_anypgsz, 4081 tron ? PG_LOCAL : 0, svd->cred); 4082 if (ierr != 0) { 4083 SEGVN_VMSTAT_FLTVNPAGES(28); 4084 anon_array_exit(&an_cookie); 4085 ANON_LOCK_EXIT(&->a_rwlock); 4086 SEGVN_RESTORE_SOFTLOCK(type, pages); 4087 err = FC_MAKE_ERR(ierr); 4088 goto out; 4089 } 4090 4091 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4092 /* 4093 * p_szc can't be changed for locked 4094 * swapfs pages. 4095 */ 4096 ASSERT(svd->rcookie == 4097 HAT_INVALID_REGION_COOKIE); 4098 hat_memload_array(hat, a, pgsz, ppa, prot, 4099 hat_flag); 4100 4101 if (!(hat_flag & HAT_LOAD_LOCK)) { 4102 SEGVN_VMSTAT_FLTVNPAGES(29); 4103 for (i = 0; i < pages; i++) { 4104 page_unlock(ppa[i]); 4105 } 4106 } 4107 anon_array_exit(&an_cookie); 4108 ANON_LOCK_EXIT(&->a_rwlock); 4109 goto next; 4110 } 4111 4112 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 4113 (!svd->pageprot && svd->prot == (prot & vpprot))); 4114 4115 pfn = page_pptonum(ppa[0]); 4116 /* 4117 * hat_page_demote() needs an SE_EXCL lock on one of 4118 * constituent page_t's and it decreases root's p_szc 4119 * last. This means if root's p_szc is equal szc and 4120 * all its constituent pages are locked 4121 * hat_page_demote() that could have changed p_szc to 4122 * szc is already done and no new have page_demote() 4123 * can start for this large page. 4124 */ 4125 4126 /* 4127 * we need to make sure same mapping size is used for 4128 * the same address range if there's a possibility the 4129 * adddress is already mapped because hat layer panics 4130 * when translation is loaded for the range already 4131 * mapped with a different page size. We achieve it 4132 * by always using largest page size possible subject 4133 * to the constraints of page size, segment page size 4134 * and page alignment. Since mappings are invalidated 4135 * when those constraints change and make it 4136 * impossible to use previously used mapping size no 4137 * mapping size conflicts should happen. 4138 */ 4139 4140 chkszc: 4141 if ((pszc = ppa[0]->p_szc) == szc && 4142 IS_P2ALIGNED(pfn, pages)) { 4143 4144 SEGVN_VMSTAT_FLTVNPAGES(30); 4145 #ifdef DEBUG 4146 for (i = 0; i < pages; i++) { 4147 ASSERT(PAGE_LOCKED(ppa[i])); 4148 ASSERT(!PP_ISFREE(ppa[i])); 4149 ASSERT(page_pptonum(ppa[i]) == 4150 pfn + i); 4151 ASSERT(ppa[i]->p_szc == szc); 4152 ASSERT(ppa[i]->p_vnode == vp); 4153 ASSERT(ppa[i]->p_offset == 4154 off + (i << PAGESHIFT)); 4155 } 4156 #endif /* DEBUG */ 4157 /* 4158 * All pages are of szc we need and they are 4159 * all locked so they can't change szc. load 4160 * translations. 4161 * 4162 * if page got promoted since last check 4163 * we don't need pplist. 4164 */ 4165 if (pplist != NULL) { 4166 page_free_replacement_page(pplist); 4167 page_create_putback(pages); 4168 } 4169 if (PP_ISMIGRATE(ppa[0])) { 4170 page_migrate(seg, a, ppa, pages); 4171 } 4172 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4173 prot, vpprot); 4174 if (!xhat) { 4175 hat_memload_array_region(hat, a, pgsz, 4176 ppa, prot & vpprot, hat_flag, 4177 svd->rcookie); 4178 } else { 4179 /* 4180 * avoid large xhat mappings to FS 4181 * pages so that hat_page_demote() 4182 * doesn't need to check for xhat 4183 * large mappings. 4184 * Don't use regions with xhats. 4185 */ 4186 for (i = 0; i < pages; i++) { 4187 hat_memload(hat, 4188 a + (i << PAGESHIFT), 4189 ppa[i], prot & vpprot, 4190 hat_flag); 4191 } 4192 } 4193 4194 if (!(hat_flag & HAT_LOAD_LOCK)) { 4195 for (i = 0; i < pages; i++) { 4196 page_unlock(ppa[i]); 4197 } 4198 } 4199 if (amp != NULL) { 4200 anon_array_exit(&an_cookie); 4201 ANON_LOCK_EXIT(&->a_rwlock); 4202 } 4203 goto next; 4204 } 4205 4206 /* 4207 * See if upsize is possible. 4208 */ 4209 if (pszc > szc && szc < seg->s_szc && 4210 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) { 4211 pgcnt_t aphase; 4212 uint_t pszc1 = MIN(pszc, seg->s_szc); 4213 ppgsz = page_get_pagesize(pszc1); 4214 ppages = btop(ppgsz); 4215 aphase = btop(P2PHASE((uintptr_t)a, ppgsz)); 4216 4217 ASSERT(type != F_SOFTLOCK); 4218 4219 SEGVN_VMSTAT_FLTVNPAGES(31); 4220 if (aphase != P2PHASE(pfn, ppages)) { 4221 segvn_faultvnmpss_align_err4++; 4222 } else { 4223 SEGVN_VMSTAT_FLTVNPAGES(32); 4224 if (pplist != NULL) { 4225 page_t *pl = pplist; 4226 page_free_replacement_page(pl); 4227 page_create_putback(pages); 4228 } 4229 for (i = 0; i < pages; i++) { 4230 page_unlock(ppa[i]); 4231 } 4232 if (amp != NULL) { 4233 anon_array_exit(&an_cookie); 4234 ANON_LOCK_EXIT(&->a_rwlock); 4235 } 4236 pszc = pszc1; 4237 ierr = -2; 4238 break; 4239 } 4240 } 4241 4242 /* 4243 * check if we should use smallest mapping size. 4244 */ 4245 upgrdfail = 0; 4246 if (szc == 0 || xhat || 4247 (pszc >= szc && 4248 !IS_P2ALIGNED(pfn, pages)) || 4249 (pszc < szc && 4250 !segvn_full_szcpages(ppa, szc, &upgrdfail, 4251 &pszc))) { 4252 4253 if (upgrdfail && type != F_SOFTLOCK) { 4254 /* 4255 * segvn_full_szcpages failed to lock 4256 * all pages EXCL. Size down. 4257 */ 4258 ASSERT(pszc < szc); 4259 4260 SEGVN_VMSTAT_FLTVNPAGES(33); 4261 4262 if (pplist != NULL) { 4263 page_t *pl = pplist; 4264 page_free_replacement_page(pl); 4265 page_create_putback(pages); 4266 } 4267 4268 for (i = 0; i < pages; i++) { 4269 page_unlock(ppa[i]); 4270 } 4271 if (amp != NULL) { 4272 anon_array_exit(&an_cookie); 4273 ANON_LOCK_EXIT(&->a_rwlock); 4274 } 4275 ierr = -1; 4276 break; 4277 } 4278 if (szc != 0 && !xhat && !upgrdfail) { 4279 segvn_faultvnmpss_align_err5++; 4280 } 4281 SEGVN_VMSTAT_FLTVNPAGES(34); 4282 if (pplist != NULL) { 4283 page_free_replacement_page(pplist); 4284 page_create_putback(pages); 4285 } 4286 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4287 prot, vpprot); 4288 if (upgrdfail && segvn_anypgsz_vnode) { 4289 /* SOFTLOCK case */ 4290 hat_memload_array_region(hat, a, pgsz, 4291 ppa, prot & vpprot, hat_flag, 4292 svd->rcookie); 4293 } else { 4294 for (i = 0; i < pages; i++) { 4295 hat_memload_region(hat, 4296 a + (i << PAGESHIFT), 4297 ppa[i], prot & vpprot, 4298 hat_flag, svd->rcookie); 4299 } 4300 } 4301 if (!(hat_flag & HAT_LOAD_LOCK)) { 4302 for (i = 0; i < pages; i++) { 4303 page_unlock(ppa[i]); 4304 } 4305 } 4306 if (amp != NULL) { 4307 anon_array_exit(&an_cookie); 4308 ANON_LOCK_EXIT(&->a_rwlock); 4309 } 4310 goto next; 4311 } 4312 4313 if (pszc == szc) { 4314 /* 4315 * segvn_full_szcpages() upgraded pages szc. 4316 */ 4317 ASSERT(pszc == ppa[0]->p_szc); 4318 ASSERT(IS_P2ALIGNED(pfn, pages)); 4319 goto chkszc; 4320 } 4321 4322 if (pszc > szc) { 4323 kmutex_t *szcmtx; 4324 SEGVN_VMSTAT_FLTVNPAGES(35); 4325 /* 4326 * p_szc of ppa[0] can change since we haven't 4327 * locked all constituent pages. Call 4328 * page_lock_szc() to prevent szc changes. 4329 * This should be a rare case that happens when 4330 * multiple segments use a different page size 4331 * to map the same file offsets. 4332 */ 4333 szcmtx = page_szc_lock(ppa[0]); 4334 pszc = ppa[0]->p_szc; 4335 ASSERT(szcmtx != NULL || pszc == 0); 4336 ASSERT(ppa[0]->p_szc <= pszc); 4337 if (pszc <= szc) { 4338 SEGVN_VMSTAT_FLTVNPAGES(36); 4339 if (szcmtx != NULL) { 4340 mutex_exit(szcmtx); 4341 } 4342 goto chkszc; 4343 } 4344 if (pplist != NULL) { 4345 /* 4346 * page got promoted since last check. 4347 * we don't need preaalocated large 4348 * page. 4349 */ 4350 SEGVN_VMSTAT_FLTVNPAGES(37); 4351 page_free_replacement_page(pplist); 4352 page_create_putback(pages); 4353 } 4354 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4355 prot, vpprot); 4356 hat_memload_array_region(hat, a, pgsz, ppa, 4357 prot & vpprot, hat_flag, svd->rcookie); 4358 mutex_exit(szcmtx); 4359 if (!(hat_flag & HAT_LOAD_LOCK)) { 4360 for (i = 0; i < pages; i++) { 4361 page_unlock(ppa[i]); 4362 } 4363 } 4364 if (amp != NULL) { 4365 anon_array_exit(&an_cookie); 4366 ANON_LOCK_EXIT(&->a_rwlock); 4367 } 4368 goto next; 4369 } 4370 4371 /* 4372 * if page got demoted since last check 4373 * we could have not allocated larger page. 4374 * allocate now. 4375 */ 4376 if (pplist == NULL && 4377 page_alloc_pages(vp, seg, a, &pplist, NULL, 4378 szc, 0, 0) && type != F_SOFTLOCK) { 4379 SEGVN_VMSTAT_FLTVNPAGES(38); 4380 for (i = 0; i < pages; i++) { 4381 page_unlock(ppa[i]); 4382 } 4383 if (amp != NULL) { 4384 anon_array_exit(&an_cookie); 4385 ANON_LOCK_EXIT(&->a_rwlock); 4386 } 4387 ierr = -1; 4388 alloc_failed |= (1 << szc); 4389 break; 4390 } 4391 4392 SEGVN_VMSTAT_FLTVNPAGES(39); 4393 4394 if (pplist != NULL) { 4395 segvn_relocate_pages(ppa, pplist); 4396 #ifdef DEBUG 4397 } else { 4398 ASSERT(type == F_SOFTLOCK); 4399 SEGVN_VMSTAT_FLTVNPAGES(40); 4400 #endif /* DEBUG */ 4401 } 4402 4403 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot); 4404 4405 if (pplist == NULL && segvn_anypgsz_vnode == 0) { 4406 ASSERT(type == F_SOFTLOCK); 4407 for (i = 0; i < pages; i++) { 4408 ASSERT(ppa[i]->p_szc < szc); 4409 hat_memload_region(hat, 4410 a + (i << PAGESHIFT), 4411 ppa[i], prot & vpprot, hat_flag, 4412 svd->rcookie); 4413 } 4414 } else { 4415 ASSERT(pplist != NULL || type == F_SOFTLOCK); 4416 hat_memload_array_region(hat, a, pgsz, ppa, 4417 prot & vpprot, hat_flag, svd->rcookie); 4418 } 4419 if (!(hat_flag & HAT_LOAD_LOCK)) { 4420 for (i = 0; i < pages; i++) { 4421 ASSERT(PAGE_SHARED(ppa[i])); 4422 page_unlock(ppa[i]); 4423 } 4424 } 4425 if (amp != NULL) { 4426 anon_array_exit(&an_cookie); 4427 ANON_LOCK_EXIT(&->a_rwlock); 4428 } 4429 4430 next: 4431 if (vpage != NULL) { 4432 vpage += pages; 4433 } 4434 adjszc_chk = 1; 4435 } 4436 if (a == lpgeaddr) 4437 break; 4438 ASSERT(a < lpgeaddr); 4439 4440 ASSERT(!brkcow && !tron && type != F_SOFTLOCK); 4441 4442 /* 4443 * ierr == -1 means we failed to map with a large page. 4444 * (either due to allocation/relocation failures or 4445 * misalignment with other mappings to this file. 4446 * 4447 * ierr == -2 means some other thread allocated a large page 4448 * after we gave up tp map with a large page. retry with 4449 * larger mapping. 4450 */ 4451 ASSERT(ierr == -1 || ierr == -2); 4452 ASSERT(ierr == -2 || szc != 0); 4453 ASSERT(ierr == -1 || szc < seg->s_szc); 4454 if (ierr == -2) { 4455 SEGVN_VMSTAT_FLTVNPAGES(41); 4456 ASSERT(pszc > szc && pszc <= seg->s_szc); 4457 szc = pszc; 4458 } else if (segvn_anypgsz_vnode) { 4459 SEGVN_VMSTAT_FLTVNPAGES(42); 4460 szc--; 4461 } else { 4462 SEGVN_VMSTAT_FLTVNPAGES(43); 4463 ASSERT(pszc < szc); 4464 /* 4465 * other process created pszc large page. 4466 * but we still have to drop to 0 szc. 4467 */ 4468 szc = 0; 4469 } 4470 4471 pgsz = page_get_pagesize(szc); 4472 pages = btop(pgsz); 4473 if (ierr == -2) { 4474 /* 4475 * Size up case. Note lpgaddr may only be needed for 4476 * softlock case so we don't adjust it here. 4477 */ 4478 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4479 ASSERT(a >= lpgaddr); 4480 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4481 off = svd->offset + (uintptr_t)(a - seg->s_base); 4482 aindx = svd->anon_index + seg_page(seg, a); 4483 vpage = (svd->vpage != NULL) ? 4484 &svd->vpage[seg_page(seg, a)] : NULL; 4485 } else { 4486 /* 4487 * Size down case. Note lpgaddr may only be needed for 4488 * softlock case so we don't adjust it here. 4489 */ 4490 ASSERT(IS_P2ALIGNED(a, pgsz)); 4491 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4492 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4493 ASSERT(a < lpgeaddr); 4494 if (a < addr) { 4495 SEGVN_VMSTAT_FLTVNPAGES(44); 4496 /* 4497 * The beginning of the large page region can 4498 * be pulled to the right to make a smaller 4499 * region. We haven't yet faulted a single 4500 * page. 4501 */ 4502 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4503 ASSERT(a >= lpgaddr); 4504 off = svd->offset + 4505 (uintptr_t)(a - seg->s_base); 4506 aindx = svd->anon_index + seg_page(seg, a); 4507 vpage = (svd->vpage != NULL) ? 4508 &svd->vpage[seg_page(seg, a)] : NULL; 4509 } 4510 } 4511 } 4512 out: 4513 kmem_free(ppa, ppasize); 4514 if (!err && !vop_size_err) { 4515 SEGVN_VMSTAT_FLTVNPAGES(45); 4516 return (0); 4517 } 4518 if (type == F_SOFTLOCK && a > lpgaddr) { 4519 SEGVN_VMSTAT_FLTVNPAGES(46); 4520 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4521 } 4522 if (!vop_size_err) { 4523 SEGVN_VMSTAT_FLTVNPAGES(47); 4524 return (err); 4525 } 4526 ASSERT(brkcow || tron || type == F_SOFTLOCK); 4527 /* 4528 * Large page end is mapped beyond the end of file and it's a cow 4529 * fault (can be a text replication induced cow) or softlock so we can't 4530 * reduce the map area. For now just demote the segment. This should 4531 * really only happen if the end of the file changed after the mapping 4532 * was established since when large page segments are created we make 4533 * sure they don't extend beyond the end of the file. 4534 */ 4535 SEGVN_VMSTAT_FLTVNPAGES(48); 4536 4537 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4538 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4539 err = 0; 4540 if (seg->s_szc != 0) { 4541 segvn_fltvnpages_clrszc_cnt++; 4542 ASSERT(svd->softlockcnt == 0); 4543 err = segvn_clrszc(seg); 4544 if (err != 0) { 4545 segvn_fltvnpages_clrszc_err++; 4546 } 4547 } 4548 ASSERT(err || seg->s_szc == 0); 4549 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock); 4550 /* segvn_fault will do its job as if szc had been zero to begin with */ 4551 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err)); 4552 } 4553 4554 /* 4555 * This routine will attempt to fault in one large page. 4556 * it will use smaller pages if that fails. 4557 * It should only be called for pure anonymous segments. 4558 */ 4559 static faultcode_t 4560 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 4561 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 4562 caddr_t eaddr, int brkcow) 4563 { 4564 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4565 struct anon_map *amp = svd->amp; 4566 uchar_t segtype = svd->type; 4567 uint_t szc = seg->s_szc; 4568 size_t pgsz = page_get_pagesize(szc); 4569 size_t maxpgsz = pgsz; 4570 pgcnt_t pages = btop(pgsz); 4571 size_t ppasize = pages * sizeof (page_t *); 4572 caddr_t a = lpgaddr; 4573 ulong_t aindx = svd->anon_index + seg_page(seg, a); 4574 struct vpage *vpage = (svd->vpage != NULL) ? 4575 &svd->vpage[seg_page(seg, a)] : NULL; 4576 page_t **ppa; 4577 uint_t ppa_szc; 4578 faultcode_t err; 4579 int ierr; 4580 uint_t protchk, prot, vpprot; 4581 ulong_t i; 4582 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 4583 anon_sync_obj_t cookie; 4584 int first = 1; 4585 int adjszc_chk; 4586 int purged = 0; 4587 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0; 4588 4589 ASSERT(szc != 0); 4590 ASSERT(amp != NULL); 4591 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 4592 ASSERT(!(svd->flags & MAP_NORESERVE)); 4593 ASSERT(type != F_SOFTUNLOCK); 4594 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4595 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF); 4596 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4597 4598 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 4599 4600 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]); 4601 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]); 4602 4603 if (svd->flags & MAP_TEXT) { 4604 hat_flag |= HAT_LOAD_TEXT; 4605 } 4606 4607 if (svd->pageprot) { 4608 switch (rw) { 4609 case S_READ: 4610 protchk = PROT_READ; 4611 break; 4612 case S_WRITE: 4613 protchk = PROT_WRITE; 4614 break; 4615 case S_EXEC: 4616 protchk = PROT_EXEC; 4617 break; 4618 case S_OTHER: 4619 default: 4620 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 4621 break; 4622 } 4623 VM_STAT_ADD(segvnvmstats.fltanpages[2]); 4624 } else { 4625 prot = svd->prot; 4626 /* caller has already done segment level protection check. */ 4627 } 4628 4629 ppa = kmem_alloc(ppasize, KM_SLEEP); 4630 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4631 for (;;) { 4632 adjszc_chk = 0; 4633 for (; a < lpgeaddr; a += pgsz, aindx += pages) { 4634 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 4635 VM_STAT_ADD(segvnvmstats.fltanpages[3]); 4636 ASSERT(vpage != NULL); 4637 prot = VPP_PROT(vpage); 4638 ASSERT(sameprot(seg, a, maxpgsz)); 4639 if ((prot & protchk) == 0) { 4640 err = FC_PROT; 4641 goto error; 4642 } 4643 } 4644 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) && 4645 pgsz < maxpgsz) { 4646 ASSERT(a > lpgaddr); 4647 szc = seg->s_szc; 4648 pgsz = maxpgsz; 4649 pages = btop(pgsz); 4650 ASSERT(IS_P2ALIGNED(aindx, pages)); 4651 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, 4652 pgsz); 4653 } 4654 if (type == F_SOFTLOCK && svd->vp != NULL) { 4655 mutex_enter(&freemem_lock); 4656 if (availrmem < tune.t_minarmem + pages) { 4657 mutex_exit(&freemem_lock); 4658 err = FC_MAKE_ERR(ENOMEM); 4659 goto error; 4660 } else { 4661 availrmem -= pages; 4662 segvn_pages_locked += pages; 4663 svd->softlockcnt += pages; 4664 } 4665 mutex_exit(&freemem_lock); 4666 } 4667 anon_array_enter(amp, aindx, &cookie); 4668 ppa_szc = (uint_t)-1; 4669 ierr = anon_map_getpages(amp, aindx, szc, seg, a, 4670 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow, 4671 segvn_anypgsz, pgflags, svd->cred); 4672 if (ierr != 0) { 4673 anon_array_exit(&cookie); 4674 VM_STAT_ADD(segvnvmstats.fltanpages[4]); 4675 if (type == F_SOFTLOCK && svd->vp != NULL) { 4676 VM_STAT_ADD(segvnvmstats.fltanpages[5]); 4677 mutex_enter(&freemem_lock); 4678 availrmem += pages; 4679 segvn_pages_locked -= pages; 4680 svd->softlockcnt -= pages; 4681 mutex_exit(&freemem_lock); 4682 } 4683 if (ierr > 0) { 4684 VM_STAT_ADD(segvnvmstats.fltanpages[6]); 4685 err = FC_MAKE_ERR(ierr); 4686 goto error; 4687 } 4688 break; 4689 } 4690 4691 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4692 4693 ASSERT(segtype == MAP_SHARED || 4694 ppa[0]->p_szc <= szc); 4695 ASSERT(segtype == MAP_PRIVATE || 4696 ppa[0]->p_szc >= szc); 4697 4698 /* 4699 * Handle pages that have been marked for migration 4700 */ 4701 if (lgrp_optimizations()) 4702 page_migrate(seg, a, ppa, pages); 4703 4704 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 4705 if (type == F_SOFTLOCK && svd->vp == NULL) { 4706 /* 4707 * All pages in ppa array belong to the same 4708 * large page. This means it's ok to call 4709 * segvn_pp_lock_anonpages just for ppa[0]. 4710 */ 4711 if (!segvn_pp_lock_anonpages(ppa[0], first)) { 4712 for (i = 0; i < pages; i++) { 4713 page_unlock(ppa[i]); 4714 } 4715 err = FC_MAKE_ERR(ENOMEM); 4716 goto error; 4717 } 4718 first = 0; 4719 mutex_enter(&freemem_lock); 4720 svd->softlockcnt += pages; 4721 segvn_pages_locked += pages; 4722 mutex_exit(&freemem_lock); 4723 } 4724 4725 if (segtype == MAP_SHARED) { 4726 vpprot |= PROT_WRITE; 4727 } 4728 4729 hat_memload_array(hat, a, pgsz, ppa, 4730 prot & vpprot, hat_flag); 4731 4732 if (hat_flag & HAT_LOAD_LOCK) { 4733 VM_STAT_ADD(segvnvmstats.fltanpages[7]); 4734 } else { 4735 VM_STAT_ADD(segvnvmstats.fltanpages[8]); 4736 for (i = 0; i < pages; i++) 4737 page_unlock(ppa[i]); 4738 } 4739 if (vpage != NULL) 4740 vpage += pages; 4741 4742 anon_array_exit(&cookie); 4743 adjszc_chk = 1; 4744 } 4745 if (a == lpgeaddr) 4746 break; 4747 ASSERT(a < lpgeaddr); 4748 /* 4749 * ierr == -1 means we failed to allocate a large page. 4750 * so do a size down operation. 4751 * 4752 * ierr == -2 means some other process that privately shares 4753 * pages with this process has allocated a larger page and we 4754 * need to retry with larger pages. So do a size up 4755 * operation. This relies on the fact that large pages are 4756 * never partially shared i.e. if we share any constituent 4757 * page of a large page with another process we must share the 4758 * entire large page. Note this cannot happen for SOFTLOCK 4759 * case, unless current address (a) is at the beginning of the 4760 * next page size boundary because the other process couldn't 4761 * have relocated locked pages. 4762 */ 4763 ASSERT(ierr == -1 || ierr == -2); 4764 /* 4765 * For the very first relocation failure try to purge this 4766 * segment's cache so that the relocator can obtain an 4767 * exclusive lock on pages we want to relocate. 4768 */ 4769 if (!purged && ierr == -1 && ppa_szc != (uint_t)-1 && 4770 svd->softlockcnt != 0) { 4771 purged = 1; 4772 segvn_purge(seg); 4773 continue; 4774 } 4775 4776 if (segvn_anypgsz) { 4777 ASSERT(ierr == -2 || szc != 0); 4778 ASSERT(ierr == -1 || szc < seg->s_szc); 4779 szc = (ierr == -1) ? szc - 1 : szc + 1; 4780 } else { 4781 /* 4782 * For non COW faults and segvn_anypgsz == 0 4783 * we need to be careful not to loop forever 4784 * if existing page is found with szc other 4785 * than 0 or seg->s_szc. This could be due 4786 * to page relocations on behalf of DR or 4787 * more likely large page creation. For this 4788 * case simply re-size to existing page's szc 4789 * if returned by anon_map_getpages(). 4790 */ 4791 if (ppa_szc == (uint_t)-1) { 4792 szc = (ierr == -1) ? 0 : seg->s_szc; 4793 } else { 4794 ASSERT(ppa_szc <= seg->s_szc); 4795 ASSERT(ierr == -2 || ppa_szc < szc); 4796 ASSERT(ierr == -1 || ppa_szc > szc); 4797 szc = ppa_szc; 4798 } 4799 } 4800 4801 pgsz = page_get_pagesize(szc); 4802 pages = btop(pgsz); 4803 ASSERT(type != F_SOFTLOCK || ierr == -1 || 4804 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz))); 4805 if (type == F_SOFTLOCK) { 4806 /* 4807 * For softlocks we cannot reduce the fault area 4808 * (calculated based on the largest page size for this 4809 * segment) for size down and a is already next 4810 * page size aligned as assertted above for size 4811 * ups. Therefore just continue in case of softlock. 4812 */ 4813 VM_STAT_ADD(segvnvmstats.fltanpages[9]); 4814 continue; /* keep lint happy */ 4815 } else if (ierr == -2) { 4816 4817 /* 4818 * Size up case. Note lpgaddr may only be needed for 4819 * softlock case so we don't adjust it here. 4820 */ 4821 VM_STAT_ADD(segvnvmstats.fltanpages[10]); 4822 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4823 ASSERT(a >= lpgaddr); 4824 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4825 aindx = svd->anon_index + seg_page(seg, a); 4826 vpage = (svd->vpage != NULL) ? 4827 &svd->vpage[seg_page(seg, a)] : NULL; 4828 } else { 4829 /* 4830 * Size down case. Note lpgaddr may only be needed for 4831 * softlock case so we don't adjust it here. 4832 */ 4833 VM_STAT_ADD(segvnvmstats.fltanpages[11]); 4834 ASSERT(IS_P2ALIGNED(a, pgsz)); 4835 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4836 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4837 ASSERT(a < lpgeaddr); 4838 if (a < addr) { 4839 /* 4840 * The beginning of the large page region can 4841 * be pulled to the right to make a smaller 4842 * region. We haven't yet faulted a single 4843 * page. 4844 */ 4845 VM_STAT_ADD(segvnvmstats.fltanpages[12]); 4846 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4847 ASSERT(a >= lpgaddr); 4848 aindx = svd->anon_index + seg_page(seg, a); 4849 vpage = (svd->vpage != NULL) ? 4850 &svd->vpage[seg_page(seg, a)] : NULL; 4851 } 4852 } 4853 } 4854 VM_STAT_ADD(segvnvmstats.fltanpages[13]); 4855 ANON_LOCK_EXIT(&->a_rwlock); 4856 kmem_free(ppa, ppasize); 4857 return (0); 4858 error: 4859 VM_STAT_ADD(segvnvmstats.fltanpages[14]); 4860 ANON_LOCK_EXIT(&->a_rwlock); 4861 kmem_free(ppa, ppasize); 4862 if (type == F_SOFTLOCK && a > lpgaddr) { 4863 VM_STAT_ADD(segvnvmstats.fltanpages[15]); 4864 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4865 } 4866 return (err); 4867 } 4868 4869 int fltadvice = 1; /* set to free behind pages for sequential access */ 4870 4871 /* 4872 * This routine is called via a machine specific fault handling routine. 4873 * It is also called by software routines wishing to lock or unlock 4874 * a range of addresses. 4875 * 4876 * Here is the basic algorithm: 4877 * If unlocking 4878 * Call segvn_softunlock 4879 * Return 4880 * endif 4881 * Checking and set up work 4882 * If we will need some non-anonymous pages 4883 * Call VOP_GETPAGE over the range of non-anonymous pages 4884 * endif 4885 * Loop over all addresses requested 4886 * Call segvn_faultpage passing in page list 4887 * to load up translations and handle anonymous pages 4888 * endloop 4889 * Load up translation to any additional pages in page list not 4890 * already handled that fit into this segment 4891 */ 4892 static faultcode_t 4893 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len, 4894 enum fault_type type, enum seg_rw rw) 4895 { 4896 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4897 page_t **plp, **ppp, *pp; 4898 u_offset_t off; 4899 caddr_t a; 4900 struct vpage *vpage; 4901 uint_t vpprot, prot; 4902 int err; 4903 page_t *pl[PVN_GETPAGE_NUM + 1]; 4904 size_t plsz, pl_alloc_sz; 4905 size_t page; 4906 ulong_t anon_index; 4907 struct anon_map *amp; 4908 int dogetpage = 0; 4909 caddr_t lpgaddr, lpgeaddr; 4910 size_t pgsz; 4911 anon_sync_obj_t cookie; 4912 int brkcow = BREAK_COW_SHARE(rw, type, svd->type); 4913 4914 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 4915 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE); 4916 4917 /* 4918 * First handle the easy stuff 4919 */ 4920 if (type == F_SOFTUNLOCK) { 4921 if (rw == S_READ_NOCOW) { 4922 rw = S_READ; 4923 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 4924 } 4925 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 4926 pgsz = (seg->s_szc == 0) ? PAGESIZE : 4927 page_get_pagesize(seg->s_szc); 4928 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]); 4929 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 4930 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw); 4931 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4932 return (0); 4933 } 4934 4935 ASSERT(svd->tr_state == SEGVN_TR_OFF || 4936 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 4937 if (brkcow == 0) { 4938 if (svd->tr_state == SEGVN_TR_INIT) { 4939 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4940 if (svd->tr_state == SEGVN_TR_INIT) { 4941 ASSERT(svd->vp != NULL && svd->amp == NULL); 4942 ASSERT(svd->flags & MAP_TEXT); 4943 ASSERT(svd->type == MAP_PRIVATE); 4944 segvn_textrepl(seg); 4945 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4946 ASSERT(svd->tr_state != SEGVN_TR_ON || 4947 svd->amp != NULL); 4948 } 4949 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4950 } 4951 } else if (svd->tr_state != SEGVN_TR_OFF) { 4952 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4953 4954 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) { 4955 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 4956 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4957 return (FC_PROT); 4958 } 4959 4960 if (svd->tr_state == SEGVN_TR_ON) { 4961 ASSERT(svd->vp != NULL && svd->amp != NULL); 4962 segvn_textunrepl(seg, 0); 4963 ASSERT(svd->amp == NULL && 4964 svd->tr_state == SEGVN_TR_OFF); 4965 } else if (svd->tr_state != SEGVN_TR_OFF) { 4966 svd->tr_state = SEGVN_TR_OFF; 4967 } 4968 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 4969 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4970 } 4971 4972 top: 4973 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 4974 4975 /* 4976 * If we have the same protections for the entire segment, 4977 * insure that the access being attempted is legitimate. 4978 */ 4979 4980 if (svd->pageprot == 0) { 4981 uint_t protchk; 4982 4983 switch (rw) { 4984 case S_READ: 4985 case S_READ_NOCOW: 4986 protchk = PROT_READ; 4987 break; 4988 case S_WRITE: 4989 protchk = PROT_WRITE; 4990 break; 4991 case S_EXEC: 4992 protchk = PROT_EXEC; 4993 break; 4994 case S_OTHER: 4995 default: 4996 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 4997 break; 4998 } 4999 5000 if ((svd->prot & protchk) == 0) { 5001 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5002 return (FC_PROT); /* illegal access type */ 5003 } 5004 } 5005 5006 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5007 /* this must be SOFTLOCK S_READ fault */ 5008 ASSERT(svd->amp == NULL); 5009 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5010 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5011 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5012 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5013 /* 5014 * this must be the first ever non S_READ_NOCOW 5015 * softlock for this segment. 5016 */ 5017 ASSERT(svd->softlockcnt == 0); 5018 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5019 HAT_REGION_TEXT); 5020 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5021 } 5022 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5023 goto top; 5024 } 5025 5026 /* 5027 * We can't allow the long term use of softlocks for vmpss segments, 5028 * because in some file truncation cases we should be able to demote 5029 * the segment, which requires that there are no softlocks. The 5030 * only case where it's ok to allow a SOFTLOCK fault against a vmpss 5031 * segment is S_READ_NOCOW, where the caller holds the address space 5032 * locked as writer and calls softunlock before dropping the as lock. 5033 * S_READ_NOCOW is used by /proc to read memory from another user. 5034 * 5035 * Another deadlock between SOFTLOCK and file truncation can happen 5036 * because segvn_fault_vnodepages() calls the FS one pagesize at 5037 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages() 5038 * can cause a deadlock because the first set of page_t's remain 5039 * locked SE_SHARED. To avoid this, we demote segments on a first 5040 * SOFTLOCK if they have a length greater than the segment's 5041 * page size. 5042 * 5043 * So for now, we only avoid demoting a segment on a SOFTLOCK when 5044 * the access type is S_READ_NOCOW and the fault length is less than 5045 * or equal to the segment's page size. While this is quite restrictive, 5046 * it should be the most common case of SOFTLOCK against a vmpss 5047 * segment. 5048 * 5049 * For S_READ_NOCOW, it's safe not to do a copy on write because the 5050 * caller makes sure no COW will be caused by another thread for a 5051 * softlocked page. 5052 */ 5053 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) { 5054 int demote = 0; 5055 5056 if (rw != S_READ_NOCOW) { 5057 demote = 1; 5058 } 5059 if (!demote && len > PAGESIZE) { 5060 pgsz = page_get_pagesize(seg->s_szc); 5061 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, 5062 lpgeaddr); 5063 if (lpgeaddr - lpgaddr > pgsz) { 5064 demote = 1; 5065 } 5066 } 5067 5068 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5069 5070 if (demote) { 5071 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5072 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5073 if (seg->s_szc != 0) { 5074 segvn_vmpss_clrszc_cnt++; 5075 ASSERT(svd->softlockcnt == 0); 5076 err = segvn_clrszc(seg); 5077 if (err) { 5078 segvn_vmpss_clrszc_err++; 5079 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5080 return (FC_MAKE_ERR(err)); 5081 } 5082 } 5083 ASSERT(seg->s_szc == 0); 5084 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5085 goto top; 5086 } 5087 } 5088 5089 /* 5090 * Check to see if we need to allocate an anon_map structure. 5091 */ 5092 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) { 5093 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5094 /* 5095 * Drop the "read" lock on the segment and acquire 5096 * the "write" version since we have to allocate the 5097 * anon_map. 5098 */ 5099 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5100 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5101 5102 if (svd->amp == NULL) { 5103 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 5104 svd->amp->a_szc = seg->s_szc; 5105 } 5106 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5107 5108 /* 5109 * Start all over again since segment protections 5110 * may have changed after we dropped the "read" lock. 5111 */ 5112 goto top; 5113 } 5114 5115 /* 5116 * S_READ_NOCOW vs S_READ distinction was 5117 * only needed for the code above. After 5118 * that we treat it as S_READ. 5119 */ 5120 if (rw == S_READ_NOCOW) { 5121 ASSERT(type == F_SOFTLOCK); 5122 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5123 rw = S_READ; 5124 } 5125 5126 amp = svd->amp; 5127 5128 /* 5129 * MADV_SEQUENTIAL work is ignored for large page segments. 5130 */ 5131 if (seg->s_szc != 0) { 5132 pgsz = page_get_pagesize(seg->s_szc); 5133 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 5134 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 5135 if (svd->vp == NULL) { 5136 err = segvn_fault_anonpages(hat, seg, lpgaddr, 5137 lpgeaddr, type, rw, addr, addr + len, brkcow); 5138 } else { 5139 err = segvn_fault_vnodepages(hat, seg, lpgaddr, 5140 lpgeaddr, type, rw, addr, addr + len, brkcow); 5141 if (err == IE_RETRY) { 5142 ASSERT(seg->s_szc == 0); 5143 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 5144 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5145 goto top; 5146 } 5147 } 5148 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5149 return (err); 5150 } 5151 5152 page = seg_page(seg, addr); 5153 if (amp != NULL) { 5154 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5155 anon_index = svd->anon_index + page; 5156 5157 if (type == F_PROT && rw == S_READ && 5158 svd->tr_state == SEGVN_TR_OFF && 5159 svd->type == MAP_PRIVATE && svd->pageprot == 0) { 5160 size_t index = anon_index; 5161 struct anon *ap; 5162 5163 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5164 /* 5165 * The fast path could apply to S_WRITE also, except 5166 * that the protection fault could be caused by lazy 5167 * tlb flush when ro->rw. In this case, the pte is 5168 * RW already. But RO in the other cpu's tlb causes 5169 * the fault. Since hat_chgprot won't do anything if 5170 * pte doesn't change, we may end up faulting 5171 * indefinitely until the RO tlb entry gets replaced. 5172 */ 5173 for (a = addr; a < addr + len; a += PAGESIZE, index++) { 5174 anon_array_enter(amp, index, &cookie); 5175 ap = anon_get_ptr(amp->ahp, index); 5176 anon_array_exit(&cookie); 5177 if ((ap == NULL) || (ap->an_refcnt != 1)) { 5178 ANON_LOCK_EXIT(&->a_rwlock); 5179 goto slow; 5180 } 5181 } 5182 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot); 5183 ANON_LOCK_EXIT(&->a_rwlock); 5184 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5185 return (0); 5186 } 5187 } 5188 slow: 5189 5190 if (svd->vpage == NULL) 5191 vpage = NULL; 5192 else 5193 vpage = &svd->vpage[page]; 5194 5195 off = svd->offset + (uintptr_t)(addr - seg->s_base); 5196 5197 /* 5198 * If MADV_SEQUENTIAL has been set for the particular page we 5199 * are faulting on, free behind all pages in the segment and put 5200 * them on the free list. 5201 */ 5202 5203 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) { 5204 struct vpage *vpp; 5205 ulong_t fanon_index; 5206 size_t fpage; 5207 u_offset_t pgoff, fpgoff; 5208 struct vnode *fvp; 5209 struct anon *fap = NULL; 5210 5211 if (svd->advice == MADV_SEQUENTIAL || 5212 (svd->pageadvice && 5213 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) { 5214 pgoff = off - PAGESIZE; 5215 fpage = page - 1; 5216 if (vpage != NULL) 5217 vpp = &svd->vpage[fpage]; 5218 if (amp != NULL) 5219 fanon_index = svd->anon_index + fpage; 5220 5221 while (pgoff > svd->offset) { 5222 if (svd->advice != MADV_SEQUENTIAL && 5223 (!svd->pageadvice || (vpage && 5224 VPP_ADVICE(vpp) != MADV_SEQUENTIAL))) 5225 break; 5226 5227 /* 5228 * If this is an anon page, we must find the 5229 * correct <vp, offset> for it 5230 */ 5231 fap = NULL; 5232 if (amp != NULL) { 5233 ANON_LOCK_ENTER(&->a_rwlock, 5234 RW_READER); 5235 anon_array_enter(amp, fanon_index, 5236 &cookie); 5237 fap = anon_get_ptr(amp->ahp, 5238 fanon_index); 5239 if (fap != NULL) { 5240 swap_xlate(fap, &fvp, &fpgoff); 5241 } else { 5242 fpgoff = pgoff; 5243 fvp = svd->vp; 5244 } 5245 anon_array_exit(&cookie); 5246 ANON_LOCK_EXIT(&->a_rwlock); 5247 } else { 5248 fpgoff = pgoff; 5249 fvp = svd->vp; 5250 } 5251 if (fvp == NULL) 5252 break; /* XXX */ 5253 /* 5254 * Skip pages that are free or have an 5255 * "exclusive" lock. 5256 */ 5257 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED); 5258 if (pp == NULL) 5259 break; 5260 /* 5261 * We don't need the page_struct_lock to test 5262 * as this is only advisory; even if we 5263 * acquire it someone might race in and lock 5264 * the page after we unlock and before the 5265 * PUTPAGE, then VOP_PUTPAGE will do nothing. 5266 */ 5267 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) { 5268 /* 5269 * Hold the vnode before releasing 5270 * the page lock to prevent it from 5271 * being freed and re-used by some 5272 * other thread. 5273 */ 5274 VN_HOLD(fvp); 5275 page_unlock(pp); 5276 /* 5277 * We should build a page list 5278 * to kluster putpages XXX 5279 */ 5280 (void) VOP_PUTPAGE(fvp, 5281 (offset_t)fpgoff, PAGESIZE, 5282 (B_DONTNEED|B_FREE|B_ASYNC), 5283 svd->cred); 5284 VN_RELE(fvp); 5285 } else { 5286 /* 5287 * XXX - Should the loop terminate if 5288 * the page is `locked'? 5289 */ 5290 page_unlock(pp); 5291 } 5292 --vpp; 5293 --fanon_index; 5294 pgoff -= PAGESIZE; 5295 } 5296 } 5297 } 5298 5299 plp = pl; 5300 *plp = NULL; 5301 pl_alloc_sz = 0; 5302 5303 /* 5304 * See if we need to call VOP_GETPAGE for 5305 * *any* of the range being faulted on. 5306 * We can skip all of this work if there 5307 * was no original vnode. 5308 */ 5309 if (svd->vp != NULL) { 5310 u_offset_t vp_off; 5311 size_t vp_len; 5312 struct anon *ap; 5313 vnode_t *vp; 5314 5315 vp_off = off; 5316 vp_len = len; 5317 5318 if (amp == NULL) 5319 dogetpage = 1; 5320 else { 5321 /* 5322 * Only acquire reader lock to prevent amp->ahp 5323 * from being changed. It's ok to miss pages, 5324 * hence we don't do anon_array_enter 5325 */ 5326 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5327 ap = anon_get_ptr(amp->ahp, anon_index); 5328 5329 if (len <= PAGESIZE) 5330 /* inline non_anon() */ 5331 dogetpage = (ap == NULL); 5332 else 5333 dogetpage = non_anon(amp->ahp, anon_index, 5334 &vp_off, &vp_len); 5335 ANON_LOCK_EXIT(&->a_rwlock); 5336 } 5337 5338 if (dogetpage) { 5339 enum seg_rw arw; 5340 struct as *as = seg->s_as; 5341 5342 if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) { 5343 /* 5344 * Page list won't fit in local array, 5345 * allocate one of the needed size. 5346 */ 5347 pl_alloc_sz = 5348 (btop(len) + 1) * sizeof (page_t *); 5349 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP); 5350 plp[0] = NULL; 5351 plsz = len; 5352 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE || 5353 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER || 5354 (((size_t)(addr + PAGESIZE) < 5355 (size_t)(seg->s_base + seg->s_size)) && 5356 hat_probe(as->a_hat, addr + PAGESIZE))) { 5357 /* 5358 * Ask VOP_GETPAGE to return the exact number 5359 * of pages if 5360 * (a) this is a COW fault, or 5361 * (b) this is a software fault, or 5362 * (c) next page is already mapped. 5363 */ 5364 plsz = len; 5365 } else { 5366 /* 5367 * Ask VOP_GETPAGE to return adjacent pages 5368 * within the segment. 5369 */ 5370 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t) 5371 ((seg->s_base + seg->s_size) - addr)); 5372 ASSERT((addr + plsz) <= 5373 (seg->s_base + seg->s_size)); 5374 } 5375 5376 /* 5377 * Need to get some non-anonymous pages. 5378 * We need to make only one call to GETPAGE to do 5379 * this to prevent certain deadlocking conditions 5380 * when we are doing locking. In this case 5381 * non_anon() should have picked up the smallest 5382 * range which includes all the non-anonymous 5383 * pages in the requested range. We have to 5384 * be careful regarding which rw flag to pass in 5385 * because on a private mapping, the underlying 5386 * object is never allowed to be written. 5387 */ 5388 if (rw == S_WRITE && svd->type == MAP_PRIVATE) { 5389 arw = S_READ; 5390 } else { 5391 arw = rw; 5392 } 5393 vp = svd->vp; 5394 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5395 "segvn_getpage:seg %p addr %p vp %p", 5396 seg, addr, vp); 5397 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len, 5398 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw, 5399 svd->cred); 5400 if (err) { 5401 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5402 segvn_pagelist_rele(plp); 5403 if (pl_alloc_sz) 5404 kmem_free(plp, pl_alloc_sz); 5405 return (FC_MAKE_ERR(err)); 5406 } 5407 if (svd->type == MAP_PRIVATE) 5408 vpprot &= ~PROT_WRITE; 5409 } 5410 } 5411 5412 /* 5413 * N.B. at this time the plp array has all the needed non-anon 5414 * pages in addition to (possibly) having some adjacent pages. 5415 */ 5416 5417 /* 5418 * Always acquire the anon_array_lock to prevent 5419 * 2 threads from allocating separate anon slots for 5420 * the same "addr". 5421 * 5422 * If this is a copy-on-write fault and we don't already 5423 * have the anon_array_lock, acquire it to prevent the 5424 * fault routine from handling multiple copy-on-write faults 5425 * on the same "addr" in the same address space. 5426 * 5427 * Only one thread should deal with the fault since after 5428 * it is handled, the other threads can acquire a translation 5429 * to the newly created private page. This prevents two or 5430 * more threads from creating different private pages for the 5431 * same fault. 5432 * 5433 * We grab "serialization" lock here if this is a MAP_PRIVATE segment 5434 * to prevent deadlock between this thread and another thread 5435 * which has soft-locked this page and wants to acquire serial_lock. 5436 * ( bug 4026339 ) 5437 * 5438 * The fix for bug 4026339 becomes unnecessary when using the 5439 * locking scheme with per amp rwlock and a global set of hash 5440 * lock, anon_array_lock. If we steal a vnode page when low 5441 * on memory and upgrad the page lock through page_rename, 5442 * then the page is PAGE_HANDLED, nothing needs to be done 5443 * for this page after returning from segvn_faultpage. 5444 * 5445 * But really, the page lock should be downgraded after 5446 * the stolen page is page_rename'd. 5447 */ 5448 5449 if (amp != NULL) 5450 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5451 5452 /* 5453 * Ok, now loop over the address range and handle faults 5454 */ 5455 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) { 5456 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot, 5457 type, rw, brkcow, a == addr); 5458 if (err) { 5459 if (amp != NULL) 5460 ANON_LOCK_EXIT(&->a_rwlock); 5461 if (type == F_SOFTLOCK && a > addr) { 5462 segvn_softunlock(seg, addr, (a - addr), 5463 S_OTHER); 5464 } 5465 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5466 segvn_pagelist_rele(plp); 5467 if (pl_alloc_sz) 5468 kmem_free(plp, pl_alloc_sz); 5469 return (err); 5470 } 5471 if (vpage) { 5472 vpage++; 5473 } else if (svd->vpage) { 5474 page = seg_page(seg, addr); 5475 vpage = &svd->vpage[++page]; 5476 } 5477 } 5478 5479 /* Didn't get pages from the underlying fs so we're done */ 5480 if (!dogetpage) 5481 goto done; 5482 5483 /* 5484 * Now handle any other pages in the list returned. 5485 * If the page can be used, load up the translations now. 5486 * Note that the for loop will only be entered if "plp" 5487 * is pointing to a non-NULL page pointer which means that 5488 * VOP_GETPAGE() was called and vpprot has been initialized. 5489 */ 5490 if (svd->pageprot == 0) 5491 prot = svd->prot & vpprot; 5492 5493 5494 /* 5495 * Large Files: diff should be unsigned value because we started 5496 * supporting > 2GB segment sizes from 2.5.1 and when a 5497 * large file of size > 2GB gets mapped to address space 5498 * the diff value can be > 2GB. 5499 */ 5500 5501 for (ppp = plp; (pp = *ppp) != NULL; ppp++) { 5502 size_t diff; 5503 struct anon *ap; 5504 int anon_index; 5505 anon_sync_obj_t cookie; 5506 int hat_flag = HAT_LOAD_ADV; 5507 5508 if (svd->flags & MAP_TEXT) { 5509 hat_flag |= HAT_LOAD_TEXT; 5510 } 5511 5512 if (pp == PAGE_HANDLED) 5513 continue; 5514 5515 if (svd->tr_state != SEGVN_TR_ON && 5516 pp->p_offset >= svd->offset && 5517 pp->p_offset < svd->offset + seg->s_size) { 5518 5519 diff = pp->p_offset - svd->offset; 5520 5521 /* 5522 * Large Files: Following is the assertion 5523 * validating the above cast. 5524 */ 5525 ASSERT(svd->vp == pp->p_vnode); 5526 5527 page = btop(diff); 5528 if (svd->pageprot) 5529 prot = VPP_PROT(&svd->vpage[page]) & vpprot; 5530 5531 /* 5532 * Prevent other threads in the address space from 5533 * creating private pages (i.e., allocating anon slots) 5534 * while we are in the process of loading translations 5535 * to additional pages returned by the underlying 5536 * object. 5537 */ 5538 if (amp != NULL) { 5539 anon_index = svd->anon_index + page; 5540 anon_array_enter(amp, anon_index, &cookie); 5541 ap = anon_get_ptr(amp->ahp, anon_index); 5542 } 5543 if ((amp == NULL) || (ap == NULL)) { 5544 if (IS_VMODSORT(pp->p_vnode) || 5545 enable_mbit_wa) { 5546 if (rw == S_WRITE) 5547 hat_setmod(pp); 5548 else if (rw != S_OTHER && 5549 !hat_ismod(pp)) 5550 prot &= ~PROT_WRITE; 5551 } 5552 /* 5553 * Skip mapping read ahead pages marked 5554 * for migration, so they will get migrated 5555 * properly on fault 5556 */ 5557 ASSERT(amp == NULL || 5558 svd->rcookie == HAT_INVALID_REGION_COOKIE); 5559 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) { 5560 hat_memload_region(hat, 5561 seg->s_base + diff, 5562 pp, prot, hat_flag, 5563 svd->rcookie); 5564 } 5565 } 5566 if (amp != NULL) 5567 anon_array_exit(&cookie); 5568 } 5569 page_unlock(pp); 5570 } 5571 done: 5572 if (amp != NULL) 5573 ANON_LOCK_EXIT(&->a_rwlock); 5574 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5575 if (pl_alloc_sz) 5576 kmem_free(plp, pl_alloc_sz); 5577 return (0); 5578 } 5579 5580 /* 5581 * This routine is used to start I/O on pages asynchronously. XXX it will 5582 * only create PAGESIZE pages. At fault time they will be relocated into 5583 * larger pages. 5584 */ 5585 static faultcode_t 5586 segvn_faulta(struct seg *seg, caddr_t addr) 5587 { 5588 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5589 int err; 5590 struct anon_map *amp; 5591 vnode_t *vp; 5592 5593 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5594 5595 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 5596 if ((amp = svd->amp) != NULL) { 5597 struct anon *ap; 5598 5599 /* 5600 * Reader lock to prevent amp->ahp from being changed. 5601 * This is advisory, it's ok to miss a page, so 5602 * we don't do anon_array_enter lock. 5603 */ 5604 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5605 if ((ap = anon_get_ptr(amp->ahp, 5606 svd->anon_index + seg_page(seg, addr))) != NULL) { 5607 5608 err = anon_getpage(&ap, NULL, NULL, 5609 0, seg, addr, S_READ, svd->cred); 5610 5611 ANON_LOCK_EXIT(&->a_rwlock); 5612 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5613 if (err) 5614 return (FC_MAKE_ERR(err)); 5615 return (0); 5616 } 5617 ANON_LOCK_EXIT(&->a_rwlock); 5618 } 5619 5620 if (svd->vp == NULL) { 5621 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5622 return (0); /* zfod page - do nothing now */ 5623 } 5624 5625 vp = svd->vp; 5626 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5627 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp); 5628 err = VOP_GETPAGE(vp, 5629 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)), 5630 PAGESIZE, NULL, NULL, 0, seg, addr, 5631 S_OTHER, svd->cred); 5632 5633 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5634 if (err) 5635 return (FC_MAKE_ERR(err)); 5636 return (0); 5637 } 5638 5639 static int 5640 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 5641 { 5642 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5643 struct vpage *svp, *evp; 5644 struct vnode *vp; 5645 size_t pgsz; 5646 pgcnt_t pgcnt; 5647 anon_sync_obj_t cookie; 5648 int unload_done = 0; 5649 5650 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5651 5652 if ((svd->maxprot & prot) != prot) 5653 return (EACCES); /* violated maxprot */ 5654 5655 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5656 5657 /* return if prot is the same */ 5658 if (!svd->pageprot && svd->prot == prot) { 5659 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5660 return (0); 5661 } 5662 5663 /* 5664 * Since we change protections we first have to flush the cache. 5665 * This makes sure all the pagelock calls have to recheck 5666 * protections. 5667 */ 5668 if (svd->softlockcnt > 0) { 5669 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5670 /* 5671 * Since we do have the segvn writers lock nobody can fill 5672 * the cache with entries belonging to this seg during 5673 * the purge. The flush either succeeds or we still have 5674 * pending I/Os. 5675 */ 5676 segvn_purge(seg); 5677 if (svd->softlockcnt > 0) { 5678 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5679 return (EAGAIN); 5680 } 5681 } 5682 5683 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5684 ASSERT(svd->amp == NULL); 5685 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5686 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5687 HAT_REGION_TEXT); 5688 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5689 unload_done = 1; 5690 } else if (svd->tr_state == SEGVN_TR_INIT) { 5691 svd->tr_state = SEGVN_TR_OFF; 5692 } else if (svd->tr_state == SEGVN_TR_ON) { 5693 ASSERT(svd->amp != NULL); 5694 segvn_textunrepl(seg, 0); 5695 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 5696 unload_done = 1; 5697 } 5698 5699 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED && 5700 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) { 5701 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 5702 segvn_inval_trcache(svd->vp); 5703 } 5704 if (seg->s_szc != 0) { 5705 int err; 5706 pgsz = page_get_pagesize(seg->s_szc); 5707 pgcnt = pgsz >> PAGESHIFT; 5708 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 5709 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 5710 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5711 ASSERT(seg->s_base != addr || seg->s_size != len); 5712 /* 5713 * If we are holding the as lock as a reader then 5714 * we need to return IE_RETRY and let the as 5715 * layer drop and re-aquire the lock as a writer. 5716 */ 5717 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) 5718 return (IE_RETRY); 5719 VM_STAT_ADD(segvnvmstats.demoterange[1]); 5720 if (svd->type == MAP_PRIVATE || svd->vp != NULL) { 5721 err = segvn_demote_range(seg, addr, len, 5722 SDR_END, 0); 5723 } else { 5724 uint_t szcvec = map_pgszcvec(seg->s_base, 5725 pgsz, (uintptr_t)seg->s_base, 5726 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0); 5727 err = segvn_demote_range(seg, addr, len, 5728 SDR_END, szcvec); 5729 } 5730 if (err == 0) 5731 return (IE_RETRY); 5732 if (err == ENOMEM) 5733 return (IE_NOMEM); 5734 return (err); 5735 } 5736 } 5737 5738 5739 /* 5740 * If it's a private mapping and we're making it writable 5741 * and no swap space has been reserved, have to reserve 5742 * it all now. If it's a private mapping to a file (i.e., vp != NULL) 5743 * and we're removing write permission on the entire segment and 5744 * we haven't modified any pages, we can release the swap space. 5745 */ 5746 if (svd->type == MAP_PRIVATE) { 5747 if (prot & PROT_WRITE) { 5748 size_t sz; 5749 if (svd->swresv == 0 && !(svd->flags & MAP_NORESERVE)) { 5750 if (anon_resv_zone(seg->s_size, 5751 seg->s_as->a_proc->p_zone) == 0) { 5752 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5753 return (IE_NOMEM); 5754 } 5755 sz = svd->swresv = seg->s_size; 5756 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 5757 "anon proc:%p %lu %u", 5758 seg, sz, 1); 5759 } 5760 } else { 5761 /* 5762 * Swap space is released only if this segment 5763 * does not map anonymous memory, since read faults 5764 * on such segments still need an anon slot to read 5765 * in the data. 5766 */ 5767 if (svd->swresv != 0 && svd->vp != NULL && 5768 svd->amp == NULL && addr == seg->s_base && 5769 len == seg->s_size && svd->pageprot == 0) { 5770 anon_unresv_zone(svd->swresv, 5771 seg->s_as->a_proc->p_zone); 5772 svd->swresv = 0; 5773 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 5774 "anon proc:%p %lu %u", 5775 seg, 0, 0); 5776 } 5777 } 5778 } 5779 5780 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) { 5781 if (svd->prot == prot) { 5782 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5783 return (0); /* all done */ 5784 } 5785 svd->prot = (uchar_t)prot; 5786 } else if (svd->type == MAP_PRIVATE) { 5787 struct anon *ap = NULL; 5788 page_t *pp; 5789 u_offset_t offset, off; 5790 struct anon_map *amp; 5791 ulong_t anon_idx = 0; 5792 5793 /* 5794 * A vpage structure exists or else the change does not 5795 * involve the entire segment. Establish a vpage structure 5796 * if none is there. Then, for each page in the range, 5797 * adjust its individual permissions. Note that write- 5798 * enabling a MAP_PRIVATE page can affect the claims for 5799 * locked down memory. Overcommitting memory terminates 5800 * the operation. 5801 */ 5802 segvn_vpage(seg); 5803 svd->pageprot = 1; 5804 if ((amp = svd->amp) != NULL) { 5805 anon_idx = svd->anon_index + seg_page(seg, addr); 5806 ASSERT(seg->s_szc == 0 || 5807 IS_P2ALIGNED(anon_idx, pgcnt)); 5808 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5809 } 5810 5811 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 5812 evp = &svd->vpage[seg_page(seg, addr + len)]; 5813 5814 /* 5815 * See Statement at the beginning of segvn_lockop regarding 5816 * the way cowcnts and lckcnts are handled. 5817 */ 5818 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 5819 5820 if (seg->s_szc != 0) { 5821 if (amp != NULL) { 5822 anon_array_enter(amp, anon_idx, 5823 &cookie); 5824 } 5825 if (IS_P2ALIGNED(anon_idx, pgcnt) && 5826 !segvn_claim_pages(seg, svp, offset, 5827 anon_idx, prot)) { 5828 if (amp != NULL) { 5829 anon_array_exit(&cookie); 5830 } 5831 break; 5832 } 5833 if (amp != NULL) { 5834 anon_array_exit(&cookie); 5835 } 5836 anon_idx++; 5837 } else { 5838 if (amp != NULL) { 5839 anon_array_enter(amp, anon_idx, 5840 &cookie); 5841 ap = anon_get_ptr(amp->ahp, anon_idx++); 5842 } 5843 5844 if (VPP_ISPPLOCK(svp) && 5845 VPP_PROT(svp) != prot) { 5846 5847 if (amp == NULL || ap == NULL) { 5848 vp = svd->vp; 5849 off = offset; 5850 } else 5851 swap_xlate(ap, &vp, &off); 5852 if (amp != NULL) 5853 anon_array_exit(&cookie); 5854 5855 if ((pp = page_lookup(vp, off, 5856 SE_SHARED)) == NULL) { 5857 panic("segvn_setprot: no page"); 5858 /*NOTREACHED*/ 5859 } 5860 ASSERT(seg->s_szc == 0); 5861 if ((VPP_PROT(svp) ^ prot) & 5862 PROT_WRITE) { 5863 if (prot & PROT_WRITE) { 5864 if (!page_addclaim(pp)) { 5865 page_unlock(pp); 5866 break; 5867 } 5868 } else { 5869 if (!page_subclaim(pp)) { 5870 page_unlock(pp); 5871 break; 5872 } 5873 } 5874 } 5875 page_unlock(pp); 5876 } else if (amp != NULL) 5877 anon_array_exit(&cookie); 5878 } 5879 VPP_SETPROT(svp, prot); 5880 offset += PAGESIZE; 5881 } 5882 if (amp != NULL) 5883 ANON_LOCK_EXIT(&->a_rwlock); 5884 5885 /* 5886 * Did we terminate prematurely? If so, simply unload 5887 * the translations to the things we've updated so far. 5888 */ 5889 if (svp != evp) { 5890 if (unload_done) { 5891 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5892 return (IE_NOMEM); 5893 } 5894 len = (svp - &svd->vpage[seg_page(seg, addr)]) * 5895 PAGESIZE; 5896 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz)); 5897 if (len != 0) 5898 hat_unload(seg->s_as->a_hat, addr, 5899 len, HAT_UNLOAD); 5900 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5901 return (IE_NOMEM); 5902 } 5903 } else { 5904 segvn_vpage(seg); 5905 svd->pageprot = 1; 5906 evp = &svd->vpage[seg_page(seg, addr + len)]; 5907 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 5908 VPP_SETPROT(svp, prot); 5909 } 5910 } 5911 5912 if (unload_done) { 5913 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5914 return (0); 5915 } 5916 5917 if (((prot & PROT_WRITE) != 0 && 5918 (svd->vp != NULL || svd->type == MAP_PRIVATE)) || 5919 (prot & ~PROT_USER) == PROT_NONE) { 5920 /* 5921 * Either private or shared data with write access (in 5922 * which case we need to throw out all former translations 5923 * so that we get the right translations set up on fault 5924 * and we don't allow write access to any copy-on-write pages 5925 * that might be around or to prevent write access to pages 5926 * representing holes in a file), or we don't have permission 5927 * to access the memory at all (in which case we have to 5928 * unload any current translations that might exist). 5929 */ 5930 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 5931 } else { 5932 /* 5933 * A shared mapping or a private mapping in which write 5934 * protection is going to be denied - just change all the 5935 * protections over the range of addresses in question. 5936 * segvn does not support any other attributes other 5937 * than prot so we can use hat_chgattr. 5938 */ 5939 hat_chgattr(seg->s_as->a_hat, addr, len, prot); 5940 } 5941 5942 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5943 5944 return (0); 5945 } 5946 5947 /* 5948 * segvn_setpagesize is called via SEGOP_SETPAGESIZE from as_setpagesize, 5949 * to determine if the seg is capable of mapping the requested szc. 5950 */ 5951 static int 5952 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 5953 { 5954 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5955 struct segvn_data *nsvd; 5956 struct anon_map *amp = svd->amp; 5957 struct seg *nseg; 5958 caddr_t eaddr = addr + len, a; 5959 size_t pgsz = page_get_pagesize(szc); 5960 pgcnt_t pgcnt = page_get_pagecnt(szc); 5961 int err; 5962 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base); 5963 extern struct vnode kvp; 5964 5965 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5966 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 5967 5968 if (seg->s_szc == szc || segvn_lpg_disable != 0) { 5969 return (0); 5970 } 5971 5972 /* 5973 * addr should always be pgsz aligned but eaddr may be misaligned if 5974 * it's at the end of the segment. 5975 * 5976 * XXX we should assert this condition since as_setpagesize() logic 5977 * guarantees it. 5978 */ 5979 if (!IS_P2ALIGNED(addr, pgsz) || 5980 (!IS_P2ALIGNED(eaddr, pgsz) && 5981 eaddr != seg->s_base + seg->s_size)) { 5982 5983 segvn_setpgsz_align_err++; 5984 return (EINVAL); 5985 } 5986 5987 if (amp != NULL && svd->type == MAP_SHARED) { 5988 ulong_t an_idx = svd->anon_index + seg_page(seg, addr); 5989 if (!IS_P2ALIGNED(an_idx, pgcnt)) { 5990 5991 segvn_setpgsz_anon_align_err++; 5992 return (EINVAL); 5993 } 5994 } 5995 5996 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas || 5997 szc > segvn_maxpgszc) { 5998 return (EINVAL); 5999 } 6000 6001 /* paranoid check */ 6002 if (svd->vp != NULL && 6003 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) { 6004 return (EINVAL); 6005 } 6006 6007 if (seg->s_szc == 0 && svd->vp != NULL && 6008 map_addr_vacalign_check(addr, off)) { 6009 return (EINVAL); 6010 } 6011 6012 /* 6013 * Check that protections are the same within new page 6014 * size boundaries. 6015 */ 6016 if (svd->pageprot) { 6017 for (a = addr; a < eaddr; a += pgsz) { 6018 if ((a + pgsz) > eaddr) { 6019 if (!sameprot(seg, a, eaddr - a)) { 6020 return (EINVAL); 6021 } 6022 } else { 6023 if (!sameprot(seg, a, pgsz)) { 6024 return (EINVAL); 6025 } 6026 } 6027 } 6028 } 6029 6030 /* 6031 * Since we are changing page size we first have to flush 6032 * the cache. This makes sure all the pagelock calls have 6033 * to recheck protections. 6034 */ 6035 if (svd->softlockcnt > 0) { 6036 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6037 /* 6038 * Since we do have the segvn writers lock nobody can fill 6039 * the cache with entries belonging to this seg during 6040 * the purge. The flush either succeeds or we still have 6041 * pending I/Os. 6042 */ 6043 segvn_purge(seg); 6044 if (svd->softlockcnt > 0) { 6045 return (EAGAIN); 6046 } 6047 } 6048 6049 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6050 ASSERT(svd->amp == NULL); 6051 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6052 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6053 HAT_REGION_TEXT); 6054 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6055 } else if (svd->tr_state == SEGVN_TR_INIT) { 6056 svd->tr_state = SEGVN_TR_OFF; 6057 } else if (svd->tr_state == SEGVN_TR_ON) { 6058 ASSERT(svd->amp != NULL); 6059 segvn_textunrepl(seg, 1); 6060 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6061 amp = NULL; 6062 } 6063 6064 /* 6065 * Operation for sub range of existing segment. 6066 */ 6067 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) { 6068 if (szc < seg->s_szc) { 6069 VM_STAT_ADD(segvnvmstats.demoterange[2]); 6070 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0); 6071 if (err == 0) { 6072 return (IE_RETRY); 6073 } 6074 if (err == ENOMEM) { 6075 return (IE_NOMEM); 6076 } 6077 return (err); 6078 } 6079 if (addr != seg->s_base) { 6080 nseg = segvn_split_seg(seg, addr); 6081 if (eaddr != (nseg->s_base + nseg->s_size)) { 6082 /* eaddr is szc aligned */ 6083 (void) segvn_split_seg(nseg, eaddr); 6084 } 6085 return (IE_RETRY); 6086 } 6087 if (eaddr != (seg->s_base + seg->s_size)) { 6088 /* eaddr is szc aligned */ 6089 (void) segvn_split_seg(seg, eaddr); 6090 } 6091 return (IE_RETRY); 6092 } 6093 6094 /* 6095 * Break any low level sharing and reset seg->s_szc to 0. 6096 */ 6097 if ((err = segvn_clrszc(seg)) != 0) { 6098 if (err == ENOMEM) { 6099 err = IE_NOMEM; 6100 } 6101 return (err); 6102 } 6103 ASSERT(seg->s_szc == 0); 6104 6105 /* 6106 * If the end of the current segment is not pgsz aligned 6107 * then attempt to concatenate with the next segment. 6108 */ 6109 if (!IS_P2ALIGNED(eaddr, pgsz)) { 6110 nseg = AS_SEGNEXT(seg->s_as, seg); 6111 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) { 6112 return (ENOMEM); 6113 } 6114 if (nseg->s_ops != &segvn_ops) { 6115 return (EINVAL); 6116 } 6117 nsvd = (struct segvn_data *)nseg->s_data; 6118 if (nsvd->softlockcnt > 0) { 6119 segvn_purge(nseg); 6120 if (nsvd->softlockcnt > 0) { 6121 return (EAGAIN); 6122 } 6123 } 6124 err = segvn_clrszc(nseg); 6125 if (err == ENOMEM) { 6126 err = IE_NOMEM; 6127 } 6128 if (err != 0) { 6129 return (err); 6130 } 6131 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6132 err = segvn_concat(seg, nseg, 1); 6133 if (err == -1) { 6134 return (EINVAL); 6135 } 6136 if (err == -2) { 6137 return (IE_NOMEM); 6138 } 6139 return (IE_RETRY); 6140 } 6141 6142 /* 6143 * May need to re-align anon array to 6144 * new szc. 6145 */ 6146 if (amp != NULL) { 6147 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) { 6148 struct anon_hdr *nahp; 6149 6150 ASSERT(svd->type == MAP_PRIVATE); 6151 6152 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6153 ASSERT(amp->refcnt == 1); 6154 nahp = anon_create(btop(amp->size), ANON_NOSLEEP); 6155 if (nahp == NULL) { 6156 ANON_LOCK_EXIT(&->a_rwlock); 6157 return (IE_NOMEM); 6158 } 6159 if (anon_copy_ptr(amp->ahp, svd->anon_index, 6160 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) { 6161 anon_release(nahp, btop(amp->size)); 6162 ANON_LOCK_EXIT(&->a_rwlock); 6163 return (IE_NOMEM); 6164 } 6165 anon_release(amp->ahp, btop(amp->size)); 6166 amp->ahp = nahp; 6167 svd->anon_index = 0; 6168 ANON_LOCK_EXIT(&->a_rwlock); 6169 } 6170 } 6171 if (svd->vp != NULL && szc != 0) { 6172 struct vattr va; 6173 u_offset_t eoffpage = svd->offset; 6174 va.va_mask = AT_SIZE; 6175 eoffpage += seg->s_size; 6176 eoffpage = btopr(eoffpage); 6177 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred) != 0) { 6178 segvn_setpgsz_getattr_err++; 6179 return (EINVAL); 6180 } 6181 if (btopr(va.va_size) < eoffpage) { 6182 segvn_setpgsz_eof_err++; 6183 return (EINVAL); 6184 } 6185 if (amp != NULL) { 6186 /* 6187 * anon_fill_cow_holes() may call VOP_GETPAGE(). 6188 * don't take anon map lock here to avoid holding it 6189 * across VOP_GETPAGE() calls that may call back into 6190 * segvn for klsutering checks. We don't really need 6191 * anon map lock here since it's a private segment and 6192 * we hold as level lock as writers. 6193 */ 6194 if ((err = anon_fill_cow_holes(seg, seg->s_base, 6195 amp->ahp, svd->anon_index, svd->vp, svd->offset, 6196 seg->s_size, szc, svd->prot, svd->vpage, 6197 svd->cred)) != 0) { 6198 return (EINVAL); 6199 } 6200 } 6201 segvn_setvnode_mpss(svd->vp); 6202 } 6203 6204 if (amp != NULL) { 6205 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6206 if (svd->type == MAP_PRIVATE) { 6207 amp->a_szc = szc; 6208 } else if (szc > amp->a_szc) { 6209 amp->a_szc = szc; 6210 } 6211 ANON_LOCK_EXIT(&->a_rwlock); 6212 } 6213 6214 seg->s_szc = szc; 6215 6216 return (0); 6217 } 6218 6219 static int 6220 segvn_clrszc(struct seg *seg) 6221 { 6222 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6223 struct anon_map *amp = svd->amp; 6224 size_t pgsz; 6225 pgcnt_t pages; 6226 int err = 0; 6227 caddr_t a = seg->s_base; 6228 caddr_t ea = a + seg->s_size; 6229 ulong_t an_idx = svd->anon_index; 6230 vnode_t *vp = svd->vp; 6231 struct vpage *vpage = svd->vpage; 6232 page_t *anon_pl[1 + 1], *pp; 6233 struct anon *ap, *oldap; 6234 uint_t prot = svd->prot, vpprot; 6235 int pageflag = 0; 6236 6237 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6238 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 6239 ASSERT(svd->softlockcnt == 0); 6240 6241 if (vp == NULL && amp == NULL) { 6242 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6243 seg->s_szc = 0; 6244 return (0); 6245 } 6246 6247 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6248 ASSERT(svd->amp == NULL); 6249 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6250 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6251 HAT_REGION_TEXT); 6252 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6253 } else if (svd->tr_state == SEGVN_TR_ON) { 6254 ASSERT(svd->amp != NULL); 6255 segvn_textunrepl(seg, 1); 6256 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6257 amp = NULL; 6258 } else { 6259 if (svd->tr_state != SEGVN_TR_OFF) { 6260 ASSERT(svd->tr_state == SEGVN_TR_INIT); 6261 svd->tr_state = SEGVN_TR_OFF; 6262 } 6263 6264 /* 6265 * do HAT_UNLOAD_UNMAP since we are changing the pagesize. 6266 * unload argument is 0 when we are freeing the segment 6267 * and unload was already done. 6268 */ 6269 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size, 6270 HAT_UNLOAD_UNMAP); 6271 } 6272 6273 if (amp == NULL || svd->type == MAP_SHARED) { 6274 seg->s_szc = 0; 6275 return (0); 6276 } 6277 6278 pgsz = page_get_pagesize(seg->s_szc); 6279 pages = btop(pgsz); 6280 6281 /* 6282 * XXX anon rwlock is not really needed because this is a 6283 * private segment and we are writers. 6284 */ 6285 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6286 6287 for (; a < ea; a += pgsz, an_idx += pages) { 6288 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) { 6289 ASSERT(vpage != NULL || svd->pageprot == 0); 6290 if (vpage != NULL) { 6291 ASSERT(sameprot(seg, a, pgsz)); 6292 prot = VPP_PROT(vpage); 6293 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0; 6294 } 6295 if (seg->s_szc != 0) { 6296 ASSERT(vp == NULL || anon_pages(amp->ahp, 6297 an_idx, pages) == pages); 6298 if ((err = anon_map_demotepages(amp, an_idx, 6299 seg, a, prot, vpage, svd->cred)) != 0) { 6300 goto out; 6301 } 6302 } else { 6303 if (oldap->an_refcnt == 1) { 6304 continue; 6305 } 6306 if ((err = anon_getpage(&oldap, &vpprot, 6307 anon_pl, PAGESIZE, seg, a, S_READ, 6308 svd->cred))) { 6309 goto out; 6310 } 6311 if ((pp = anon_private(&ap, seg, a, prot, 6312 anon_pl[0], pageflag, svd->cred)) == NULL) { 6313 err = ENOMEM; 6314 goto out; 6315 } 6316 anon_decref(oldap); 6317 (void) anon_set_ptr(amp->ahp, an_idx, ap, 6318 ANON_SLEEP); 6319 page_unlock(pp); 6320 } 6321 } 6322 vpage = (vpage == NULL) ? NULL : vpage + pages; 6323 } 6324 6325 amp->a_szc = 0; 6326 seg->s_szc = 0; 6327 out: 6328 ANON_LOCK_EXIT(&->a_rwlock); 6329 return (err); 6330 } 6331 6332 static int 6333 segvn_claim_pages( 6334 struct seg *seg, 6335 struct vpage *svp, 6336 u_offset_t off, 6337 ulong_t anon_idx, 6338 uint_t prot) 6339 { 6340 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6341 size_t ppasize = (pgcnt + 1) * sizeof (page_t *); 6342 page_t **ppa; 6343 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6344 struct anon_map *amp = svd->amp; 6345 struct vpage *evp = svp + pgcnt; 6346 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT) 6347 + seg->s_base; 6348 struct anon *ap; 6349 struct vnode *vp = svd->vp; 6350 page_t *pp; 6351 pgcnt_t pg_idx, i; 6352 int err = 0; 6353 anoff_t aoff; 6354 int anon = (amp != NULL) ? 1 : 0; 6355 6356 ASSERT(svd->type == MAP_PRIVATE); 6357 ASSERT(svd->vpage != NULL); 6358 ASSERT(seg->s_szc != 0); 6359 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 6360 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt)); 6361 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT)); 6362 6363 if (VPP_PROT(svp) == prot) 6364 return (1); 6365 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE)) 6366 return (1); 6367 6368 ppa = kmem_alloc(ppasize, KM_SLEEP); 6369 if (anon && vp != NULL) { 6370 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) { 6371 anon = 0; 6372 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt)); 6373 } 6374 ASSERT(!anon || 6375 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt); 6376 } 6377 6378 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) { 6379 if (!VPP_ISPPLOCK(svp)) 6380 continue; 6381 if (anon) { 6382 ap = anon_get_ptr(amp->ahp, anon_idx); 6383 if (ap == NULL) { 6384 panic("segvn_claim_pages: no anon slot"); 6385 } 6386 swap_xlate(ap, &vp, &aoff); 6387 off = (u_offset_t)aoff; 6388 } 6389 ASSERT(vp != NULL); 6390 if ((pp = page_lookup(vp, 6391 (u_offset_t)off, SE_SHARED)) == NULL) { 6392 panic("segvn_claim_pages: no page"); 6393 } 6394 ppa[pg_idx++] = pp; 6395 off += PAGESIZE; 6396 } 6397 6398 if (ppa[0] == NULL) { 6399 kmem_free(ppa, ppasize); 6400 return (1); 6401 } 6402 6403 ASSERT(pg_idx <= pgcnt); 6404 ppa[pg_idx] = NULL; 6405 6406 if (prot & PROT_WRITE) 6407 err = page_addclaim_pages(ppa); 6408 else 6409 err = page_subclaim_pages(ppa); 6410 6411 for (i = 0; i < pg_idx; i++) { 6412 ASSERT(ppa[i] != NULL); 6413 page_unlock(ppa[i]); 6414 } 6415 6416 kmem_free(ppa, ppasize); 6417 return (err); 6418 } 6419 6420 /* 6421 * Returns right (upper address) segment if split occured. 6422 * If the address is equal to the beginning or end of its segment it returns 6423 * the current segment. 6424 */ 6425 static struct seg * 6426 segvn_split_seg(struct seg *seg, caddr_t addr) 6427 { 6428 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6429 struct seg *nseg; 6430 size_t nsize; 6431 struct segvn_data *nsvd; 6432 6433 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6434 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6435 6436 ASSERT(addr >= seg->s_base); 6437 ASSERT(addr <= seg->s_base + seg->s_size); 6438 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6439 6440 if (addr == seg->s_base || addr == seg->s_base + seg->s_size) 6441 return (seg); 6442 6443 nsize = seg->s_base + seg->s_size - addr; 6444 seg->s_size = addr - seg->s_base; 6445 nseg = seg_alloc(seg->s_as, addr, nsize); 6446 ASSERT(nseg != NULL); 6447 nseg->s_ops = seg->s_ops; 6448 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 6449 nseg->s_data = (void *)nsvd; 6450 nseg->s_szc = seg->s_szc; 6451 *nsvd = *svd; 6452 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6453 nsvd->seg = nseg; 6454 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL); 6455 6456 if (nsvd->vp != NULL) { 6457 VN_HOLD(nsvd->vp); 6458 nsvd->offset = svd->offset + 6459 (uintptr_t)(nseg->s_base - seg->s_base); 6460 if (nsvd->type == MAP_SHARED) 6461 lgrp_shm_policy_init(NULL, nsvd->vp); 6462 } else { 6463 /* 6464 * The offset for an anonymous segment has no signifigance in 6465 * terms of an offset into a file. If we were to use the above 6466 * calculation instead, the structures read out of 6467 * /proc/<pid>/xmap would be more difficult to decipher since 6468 * it would be unclear whether two seemingly contiguous 6469 * prxmap_t structures represented different segments or a 6470 * single segment that had been split up into multiple prxmap_t 6471 * structures (e.g. if some part of the segment had not yet 6472 * been faulted in). 6473 */ 6474 nsvd->offset = 0; 6475 } 6476 6477 ASSERT(svd->softlockcnt == 0); 6478 crhold(svd->cred); 6479 6480 if (svd->vpage != NULL) { 6481 size_t bytes = vpgtob(seg_pages(seg)); 6482 size_t nbytes = vpgtob(seg_pages(nseg)); 6483 struct vpage *ovpage = svd->vpage; 6484 6485 svd->vpage = kmem_alloc(bytes, KM_SLEEP); 6486 bcopy(ovpage, svd->vpage, bytes); 6487 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 6488 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes); 6489 kmem_free(ovpage, bytes + nbytes); 6490 } 6491 if (svd->amp != NULL && svd->type == MAP_PRIVATE) { 6492 struct anon_map *oamp = svd->amp, *namp; 6493 struct anon_hdr *nahp; 6494 6495 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER); 6496 ASSERT(oamp->refcnt == 1); 6497 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 6498 (void) anon_copy_ptr(oamp->ahp, svd->anon_index, 6499 nahp, 0, btop(seg->s_size), ANON_SLEEP); 6500 6501 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 6502 namp->a_szc = nseg->s_szc; 6503 (void) anon_copy_ptr(oamp->ahp, 6504 svd->anon_index + btop(seg->s_size), 6505 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 6506 anon_release(oamp->ahp, btop(oamp->size)); 6507 oamp->ahp = nahp; 6508 oamp->size = seg->s_size; 6509 svd->anon_index = 0; 6510 nsvd->amp = namp; 6511 nsvd->anon_index = 0; 6512 ANON_LOCK_EXIT(&oamp->a_rwlock); 6513 } else if (svd->amp != NULL) { 6514 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6515 ASSERT(svd->amp == nsvd->amp); 6516 ASSERT(seg->s_szc <= svd->amp->a_szc); 6517 nsvd->anon_index = svd->anon_index + seg_pages(seg); 6518 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt)); 6519 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER); 6520 svd->amp->refcnt++; 6521 ANON_LOCK_EXIT(&svd->amp->a_rwlock); 6522 } 6523 6524 /* 6525 * Split amount of swap reserve 6526 */ 6527 if (svd->swresv) { 6528 /* 6529 * For MAP_NORESERVE, only allocate swap reserve for pages 6530 * being used. Other segments get enough to cover whole 6531 * segment. 6532 */ 6533 if (svd->flags & MAP_NORESERVE) { 6534 size_t oswresv; 6535 6536 ASSERT(svd->amp); 6537 oswresv = svd->swresv; 6538 svd->swresv = ptob(anon_pages(svd->amp->ahp, 6539 svd->anon_index, btop(seg->s_size))); 6540 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 6541 nsvd->anon_index, btop(nseg->s_size))); 6542 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 6543 } else { 6544 ASSERT(svd->swresv == seg->s_size + nseg->s_size); 6545 svd->swresv = seg->s_size; 6546 nsvd->swresv = nseg->s_size; 6547 } 6548 } 6549 6550 return (nseg); 6551 } 6552 6553 /* 6554 * called on memory operations (unmap, setprot, setpagesize) for a subset 6555 * of a large page segment to either demote the memory range (SDR_RANGE) 6556 * or the ends (SDR_END) by addr/len. 6557 * 6558 * returns 0 on success. returns errno, including ENOMEM, on failure. 6559 */ 6560 static int 6561 segvn_demote_range( 6562 struct seg *seg, 6563 caddr_t addr, 6564 size_t len, 6565 int flag, 6566 uint_t szcvec) 6567 { 6568 caddr_t eaddr = addr + len; 6569 caddr_t lpgaddr, lpgeaddr; 6570 struct seg *nseg; 6571 struct seg *badseg1 = NULL; 6572 struct seg *badseg2 = NULL; 6573 size_t pgsz; 6574 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6575 int err; 6576 uint_t szc = seg->s_szc; 6577 uint_t tszcvec; 6578 6579 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6580 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6581 ASSERT(szc != 0); 6582 pgsz = page_get_pagesize(szc); 6583 ASSERT(seg->s_base != addr || seg->s_size != len); 6584 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6585 ASSERT(svd->softlockcnt == 0); 6586 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6587 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED)); 6588 6589 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 6590 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr); 6591 if (flag == SDR_RANGE) { 6592 /* demote entire range */ 6593 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6594 (void) segvn_split_seg(nseg, lpgeaddr); 6595 ASSERT(badseg1->s_base == lpgaddr); 6596 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr); 6597 } else if (addr != lpgaddr) { 6598 ASSERT(flag == SDR_END); 6599 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6600 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz && 6601 eaddr < lpgaddr + 2 * pgsz) { 6602 (void) segvn_split_seg(nseg, lpgeaddr); 6603 ASSERT(badseg1->s_base == lpgaddr); 6604 ASSERT(badseg1->s_size == 2 * pgsz); 6605 } else { 6606 nseg = segvn_split_seg(nseg, lpgaddr + pgsz); 6607 ASSERT(badseg1->s_base == lpgaddr); 6608 ASSERT(badseg1->s_size == pgsz); 6609 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) { 6610 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz); 6611 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz); 6612 badseg2 = nseg; 6613 (void) segvn_split_seg(nseg, lpgeaddr); 6614 ASSERT(badseg2->s_base == lpgeaddr - pgsz); 6615 ASSERT(badseg2->s_size == pgsz); 6616 } 6617 } 6618 } else { 6619 ASSERT(flag == SDR_END); 6620 ASSERT(eaddr < lpgeaddr); 6621 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz); 6622 (void) segvn_split_seg(nseg, lpgeaddr); 6623 ASSERT(badseg1->s_base == lpgeaddr - pgsz); 6624 ASSERT(badseg1->s_size == pgsz); 6625 } 6626 6627 ASSERT(badseg1 != NULL); 6628 ASSERT(badseg1->s_szc == szc); 6629 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz || 6630 badseg1->s_size == 2 * pgsz); 6631 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz)); 6632 ASSERT(badseg1->s_size == pgsz || 6633 sameprot(badseg1, badseg1->s_base + pgsz, pgsz)); 6634 if (err = segvn_clrszc(badseg1)) { 6635 return (err); 6636 } 6637 ASSERT(badseg1->s_szc == 0); 6638 6639 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6640 uint_t tszc = highbit(tszcvec) - 1; 6641 caddr_t ta = MAX(addr, badseg1->s_base); 6642 caddr_t te; 6643 size_t tpgsz = page_get_pagesize(tszc); 6644 6645 ASSERT(svd->type == MAP_SHARED); 6646 ASSERT(flag == SDR_END); 6647 ASSERT(tszc < szc && tszc > 0); 6648 6649 if (eaddr > badseg1->s_base + badseg1->s_size) { 6650 te = badseg1->s_base + badseg1->s_size; 6651 } else { 6652 te = eaddr; 6653 } 6654 6655 ASSERT(ta <= te); 6656 badseg1->s_szc = tszc; 6657 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) { 6658 if (badseg2 != NULL) { 6659 err = segvn_demote_range(badseg1, ta, te - ta, 6660 SDR_END, tszcvec); 6661 if (err != 0) { 6662 return (err); 6663 } 6664 } else { 6665 return (segvn_demote_range(badseg1, ta, 6666 te - ta, SDR_END, tszcvec)); 6667 } 6668 } 6669 } 6670 6671 if (badseg2 == NULL) 6672 return (0); 6673 ASSERT(badseg2->s_szc == szc); 6674 ASSERT(badseg2->s_size == pgsz); 6675 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size)); 6676 if (err = segvn_clrszc(badseg2)) { 6677 return (err); 6678 } 6679 ASSERT(badseg2->s_szc == 0); 6680 6681 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6682 uint_t tszc = highbit(tszcvec) - 1; 6683 size_t tpgsz = page_get_pagesize(tszc); 6684 6685 ASSERT(svd->type == MAP_SHARED); 6686 ASSERT(flag == SDR_END); 6687 ASSERT(tszc < szc && tszc > 0); 6688 ASSERT(badseg2->s_base > addr); 6689 ASSERT(eaddr > badseg2->s_base); 6690 ASSERT(eaddr < badseg2->s_base + badseg2->s_size); 6691 6692 badseg2->s_szc = tszc; 6693 if (!IS_P2ALIGNED(eaddr, tpgsz)) { 6694 return (segvn_demote_range(badseg2, badseg2->s_base, 6695 eaddr - badseg2->s_base, SDR_END, tszcvec)); 6696 } 6697 } 6698 6699 return (0); 6700 } 6701 6702 static int 6703 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 6704 { 6705 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6706 struct vpage *vp, *evp; 6707 6708 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6709 6710 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6711 /* 6712 * If segment protection can be used, simply check against them. 6713 */ 6714 if (svd->pageprot == 0) { 6715 int err; 6716 6717 err = ((svd->prot & prot) != prot) ? EACCES : 0; 6718 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6719 return (err); 6720 } 6721 6722 /* 6723 * Have to check down to the vpage level. 6724 */ 6725 evp = &svd->vpage[seg_page(seg, addr + len)]; 6726 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) { 6727 if ((VPP_PROT(vp) & prot) != prot) { 6728 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6729 return (EACCES); 6730 } 6731 } 6732 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6733 return (0); 6734 } 6735 6736 static int 6737 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 6738 { 6739 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6740 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1; 6741 6742 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6743 6744 if (pgno != 0) { 6745 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6746 if (svd->pageprot == 0) { 6747 do 6748 protv[--pgno] = svd->prot; 6749 while (pgno != 0); 6750 } else { 6751 size_t pgoff = seg_page(seg, addr); 6752 6753 do { 6754 pgno--; 6755 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]); 6756 } while (pgno != 0); 6757 } 6758 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6759 } 6760 return (0); 6761 } 6762 6763 static u_offset_t 6764 segvn_getoffset(struct seg *seg, caddr_t addr) 6765 { 6766 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6767 6768 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6769 6770 return (svd->offset + (uintptr_t)(addr - seg->s_base)); 6771 } 6772 6773 /*ARGSUSED*/ 6774 static int 6775 segvn_gettype(struct seg *seg, caddr_t addr) 6776 { 6777 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6778 6779 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6780 6781 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT | 6782 MAP_INITDATA))); 6783 } 6784 6785 /*ARGSUSED*/ 6786 static int 6787 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 6788 { 6789 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6790 6791 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6792 6793 *vpp = svd->vp; 6794 return (0); 6795 } 6796 6797 /* 6798 * Check to see if it makes sense to do kluster/read ahead to 6799 * addr + delta relative to the mapping at addr. We assume here 6800 * that delta is a signed PAGESIZE'd multiple (which can be negative). 6801 * 6802 * For segvn, we currently "approve" of the action if we are 6803 * still in the segment and it maps from the same vp/off, 6804 * or if the advice stored in segvn_data or vpages allows it. 6805 * Currently, klustering is not allowed only if MADV_RANDOM is set. 6806 */ 6807 static int 6808 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 6809 { 6810 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6811 struct anon *oap, *ap; 6812 ssize_t pd; 6813 size_t page; 6814 struct vnode *vp1, *vp2; 6815 u_offset_t off1, off2; 6816 struct anon_map *amp; 6817 6818 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6819 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6820 SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 6821 6822 if (addr + delta < seg->s_base || 6823 addr + delta >= (seg->s_base + seg->s_size)) 6824 return (-1); /* exceeded segment bounds */ 6825 6826 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */ 6827 page = seg_page(seg, addr); 6828 6829 /* 6830 * Check to see if either of the pages addr or addr + delta 6831 * have advice set that prevents klustering (if MADV_RANDOM advice 6832 * is set for entire segment, or MADV_SEQUENTIAL is set and delta 6833 * is negative). 6834 */ 6835 if (svd->advice == MADV_RANDOM || 6836 svd->advice == MADV_SEQUENTIAL && delta < 0) 6837 return (-1); 6838 else if (svd->pageadvice && svd->vpage) { 6839 struct vpage *bvpp, *evpp; 6840 6841 bvpp = &svd->vpage[page]; 6842 evpp = &svd->vpage[page + pd]; 6843 if (VPP_ADVICE(bvpp) == MADV_RANDOM || 6844 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0) 6845 return (-1); 6846 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) && 6847 VPP_ADVICE(evpp) == MADV_RANDOM) 6848 return (-1); 6849 } 6850 6851 if (svd->type == MAP_SHARED) 6852 return (0); /* shared mapping - all ok */ 6853 6854 if ((amp = svd->amp) == NULL) 6855 return (0); /* off original vnode */ 6856 6857 page += svd->anon_index; 6858 6859 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 6860 6861 oap = anon_get_ptr(amp->ahp, page); 6862 ap = anon_get_ptr(amp->ahp, page + pd); 6863 6864 ANON_LOCK_EXIT(&->a_rwlock); 6865 6866 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) { 6867 return (-1); /* one with and one without an anon */ 6868 } 6869 6870 if (oap == NULL) { /* implies that ap == NULL */ 6871 return (0); /* off original vnode */ 6872 } 6873 6874 /* 6875 * Now we know we have two anon pointers - check to 6876 * see if they happen to be properly allocated. 6877 */ 6878 6879 /* 6880 * XXX We cheat here and don't lock the anon slots. We can't because 6881 * we may have been called from the anon layer which might already 6882 * have locked them. We are holding a refcnt on the slots so they 6883 * can't disappear. The worst that will happen is we'll get the wrong 6884 * names (vp, off) for the slots and make a poor klustering decision. 6885 */ 6886 swap_xlate(ap, &vp1, &off1); 6887 swap_xlate(oap, &vp2, &off2); 6888 6889 6890 if (!VOP_CMP(vp1, vp2) || off1 - off2 != delta) 6891 return (-1); 6892 return (0); 6893 } 6894 6895 /* 6896 * Swap the pages of seg out to secondary storage, returning the 6897 * number of bytes of storage freed. 6898 * 6899 * The basic idea is first to unload all translations and then to call 6900 * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the 6901 * swap device. Pages to which other segments have mappings will remain 6902 * mapped and won't be swapped. Our caller (as_swapout) has already 6903 * performed the unloading step. 6904 * 6905 * The value returned is intended to correlate well with the process's 6906 * memory requirements. However, there are some caveats: 6907 * 1) When given a shared segment as argument, this routine will 6908 * only succeed in swapping out pages for the last sharer of the 6909 * segment. (Previous callers will only have decremented mapping 6910 * reference counts.) 6911 * 2) We assume that the hat layer maintains a large enough translation 6912 * cache to capture process reference patterns. 6913 */ 6914 static size_t 6915 segvn_swapout(struct seg *seg) 6916 { 6917 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6918 struct anon_map *amp; 6919 pgcnt_t pgcnt = 0; 6920 pgcnt_t npages; 6921 pgcnt_t page; 6922 ulong_t anon_index; 6923 6924 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6925 6926 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6927 /* 6928 * Find pages unmapped by our caller and force them 6929 * out to the virtual swap device. 6930 */ 6931 if ((amp = svd->amp) != NULL) 6932 anon_index = svd->anon_index; 6933 npages = seg->s_size >> PAGESHIFT; 6934 for (page = 0; page < npages; page++) { 6935 page_t *pp; 6936 struct anon *ap; 6937 struct vnode *vp; 6938 u_offset_t off; 6939 anon_sync_obj_t cookie; 6940 6941 /* 6942 * Obtain <vp, off> pair for the page, then look it up. 6943 * 6944 * Note that this code is willing to consider regular 6945 * pages as well as anon pages. Is this appropriate here? 6946 */ 6947 ap = NULL; 6948 if (amp != NULL) { 6949 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 6950 if (anon_array_try_enter(amp, anon_index + page, 6951 &cookie)) { 6952 ANON_LOCK_EXIT(&->a_rwlock); 6953 continue; 6954 } 6955 ap = anon_get_ptr(amp->ahp, anon_index + page); 6956 if (ap != NULL) { 6957 swap_xlate(ap, &vp, &off); 6958 } else { 6959 vp = svd->vp; 6960 off = svd->offset + ptob(page); 6961 } 6962 anon_array_exit(&cookie); 6963 ANON_LOCK_EXIT(&->a_rwlock); 6964 } else { 6965 vp = svd->vp; 6966 off = svd->offset + ptob(page); 6967 } 6968 if (vp == NULL) { /* untouched zfod page */ 6969 ASSERT(ap == NULL); 6970 continue; 6971 } 6972 6973 pp = page_lookup_nowait(vp, off, SE_SHARED); 6974 if (pp == NULL) 6975 continue; 6976 6977 6978 /* 6979 * Examine the page to see whether it can be tossed out, 6980 * keeping track of how many we've found. 6981 */ 6982 if (!page_tryupgrade(pp)) { 6983 /* 6984 * If the page has an i/o lock and no mappings, 6985 * it's very likely that the page is being 6986 * written out as a result of klustering. 6987 * Assume this is so and take credit for it here. 6988 */ 6989 if (!page_io_trylock(pp)) { 6990 if (!hat_page_is_mapped(pp)) 6991 pgcnt++; 6992 } else { 6993 page_io_unlock(pp); 6994 } 6995 page_unlock(pp); 6996 continue; 6997 } 6998 ASSERT(!page_iolock_assert(pp)); 6999 7000 7001 /* 7002 * Skip if page is locked or has mappings. 7003 * We don't need the page_struct_lock to look at lckcnt 7004 * and cowcnt because the page is exclusive locked. 7005 */ 7006 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 7007 hat_page_is_mapped(pp)) { 7008 page_unlock(pp); 7009 continue; 7010 } 7011 7012 /* 7013 * dispose skips large pages so try to demote first. 7014 */ 7015 if (pp->p_szc != 0 && !page_try_demote_pages(pp)) { 7016 page_unlock(pp); 7017 /* 7018 * XXX should skip the remaining page_t's of this 7019 * large page. 7020 */ 7021 continue; 7022 } 7023 7024 ASSERT(pp->p_szc == 0); 7025 7026 /* 7027 * No longer mapped -- we can toss it out. How 7028 * we do so depends on whether or not it's dirty. 7029 */ 7030 if (hat_ismod(pp) && pp->p_vnode) { 7031 /* 7032 * We must clean the page before it can be 7033 * freed. Setting B_FREE will cause pvn_done 7034 * to free the page when the i/o completes. 7035 * XXX: This also causes it to be accounted 7036 * as a pageout instead of a swap: need 7037 * B_SWAPOUT bit to use instead of B_FREE. 7038 * 7039 * Hold the vnode before releasing the page lock 7040 * to prevent it from being freed and re-used by 7041 * some other thread. 7042 */ 7043 VN_HOLD(vp); 7044 page_unlock(pp); 7045 7046 /* 7047 * Queue all i/o requests for the pageout thread 7048 * to avoid saturating the pageout devices. 7049 */ 7050 if (!queue_io_request(vp, off)) 7051 VN_RELE(vp); 7052 } else { 7053 /* 7054 * The page was clean, free it. 7055 * 7056 * XXX: Can we ever encounter modified pages 7057 * with no associated vnode here? 7058 */ 7059 ASSERT(pp->p_vnode != NULL); 7060 /*LINTED: constant in conditional context*/ 7061 VN_DISPOSE(pp, B_FREE, 0, kcred); 7062 } 7063 7064 /* 7065 * Credit now even if i/o is in progress. 7066 */ 7067 pgcnt++; 7068 } 7069 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7070 7071 /* 7072 * Wakeup pageout to initiate i/o on all queued requests. 7073 */ 7074 cv_signal_pageout(); 7075 return (ptob(pgcnt)); 7076 } 7077 7078 /* 7079 * Synchronize primary storage cache with real object in virtual memory. 7080 * 7081 * XXX - Anonymous pages should not be sync'ed out at all. 7082 */ 7083 static int 7084 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) 7085 { 7086 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7087 struct vpage *vpp; 7088 page_t *pp; 7089 u_offset_t offset; 7090 struct vnode *vp; 7091 u_offset_t off; 7092 caddr_t eaddr; 7093 int bflags; 7094 int err = 0; 7095 int segtype; 7096 int pageprot; 7097 int prot; 7098 ulong_t anon_index; 7099 struct anon_map *amp; 7100 struct anon *ap; 7101 anon_sync_obj_t cookie; 7102 7103 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7104 7105 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7106 7107 if (svd->softlockcnt > 0) { 7108 /* 7109 * flush all pages from seg cache 7110 * otherwise we may deadlock in swap_putpage 7111 * for B_INVAL page (4175402). 7112 * 7113 * Even if we grab segvn WRITER's lock or segp_slock 7114 * here, there might be another thread which could've 7115 * successfully performed lookup/insert just before 7116 * we acquired the lock here. So, grabbing either 7117 * lock here is of not much use. Until we devise 7118 * a strategy at upper layers to solve the 7119 * synchronization issues completely, we expect 7120 * applications to handle this appropriately. 7121 */ 7122 segvn_purge(seg); 7123 if (svd->softlockcnt > 0) { 7124 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7125 return (EAGAIN); 7126 } 7127 } 7128 7129 vpp = svd->vpage; 7130 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7131 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) | 7132 ((flags & MS_INVALIDATE) ? B_INVAL : 0); 7133 7134 if (attr) { 7135 pageprot = attr & ~(SHARED|PRIVATE); 7136 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE; 7137 7138 /* 7139 * We are done if the segment types don't match 7140 * or if we have segment level protections and 7141 * they don't match. 7142 */ 7143 if (svd->type != segtype) { 7144 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7145 return (0); 7146 } 7147 if (vpp == NULL) { 7148 if (svd->prot != pageprot) { 7149 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7150 return (0); 7151 } 7152 prot = svd->prot; 7153 } else 7154 vpp = &svd->vpage[seg_page(seg, addr)]; 7155 7156 } else if (svd->vp && svd->amp == NULL && 7157 (flags & MS_INVALIDATE) == 0) { 7158 7159 /* 7160 * No attributes, no anonymous pages and MS_INVALIDATE flag 7161 * is not on, just use one big request. 7162 */ 7163 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len, 7164 bflags, svd->cred); 7165 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7166 return (err); 7167 } 7168 7169 if ((amp = svd->amp) != NULL) 7170 anon_index = svd->anon_index + seg_page(seg, addr); 7171 7172 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) { 7173 ap = NULL; 7174 if (amp != NULL) { 7175 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7176 anon_array_enter(amp, anon_index, &cookie); 7177 ap = anon_get_ptr(amp->ahp, anon_index++); 7178 if (ap != NULL) { 7179 swap_xlate(ap, &vp, &off); 7180 } else { 7181 vp = svd->vp; 7182 off = offset; 7183 } 7184 anon_array_exit(&cookie); 7185 ANON_LOCK_EXIT(&->a_rwlock); 7186 } else { 7187 vp = svd->vp; 7188 off = offset; 7189 } 7190 offset += PAGESIZE; 7191 7192 if (vp == NULL) /* untouched zfod page */ 7193 continue; 7194 7195 if (attr) { 7196 if (vpp) { 7197 prot = VPP_PROT(vpp); 7198 vpp++; 7199 } 7200 if (prot != pageprot) { 7201 continue; 7202 } 7203 } 7204 7205 /* 7206 * See if any of these pages are locked -- if so, then we 7207 * will have to truncate an invalidate request at the first 7208 * locked one. We don't need the page_struct_lock to test 7209 * as this is only advisory; even if we acquire it someone 7210 * might race in and lock the page after we unlock and before 7211 * we do the PUTPAGE, then PUTPAGE simply does nothing. 7212 */ 7213 if (flags & MS_INVALIDATE) { 7214 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) { 7215 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 7216 page_unlock(pp); 7217 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7218 return (EBUSY); 7219 } 7220 if (ap != NULL && pp->p_szc != 0 && 7221 page_tryupgrade(pp)) { 7222 if (pp->p_lckcnt == 0 && 7223 pp->p_cowcnt == 0) { 7224 /* 7225 * swapfs VN_DISPOSE() won't 7226 * invalidate large pages. 7227 * Attempt to demote. 7228 * XXX can't help it if it 7229 * fails. But for swapfs 7230 * pages it is no big deal. 7231 */ 7232 (void) page_try_demote_pages( 7233 pp); 7234 } 7235 } 7236 page_unlock(pp); 7237 } 7238 } else if (svd->type == MAP_SHARED && amp != NULL) { 7239 /* 7240 * Avoid writting out to disk ISM's large pages 7241 * because segspt_free_pages() relies on NULL an_pvp 7242 * of anon slots of such pages. 7243 */ 7244 7245 ASSERT(svd->vp == NULL); 7246 /* 7247 * swapfs uses page_lookup_nowait if not freeing or 7248 * invalidating and skips a page if 7249 * page_lookup_nowait returns NULL. 7250 */ 7251 pp = page_lookup_nowait(vp, off, SE_SHARED); 7252 if (pp == NULL) { 7253 continue; 7254 } 7255 if (pp->p_szc != 0) { 7256 page_unlock(pp); 7257 continue; 7258 } 7259 7260 /* 7261 * Note ISM pages are created large so (vp, off)'s 7262 * page cannot suddenly become large after we unlock 7263 * pp. 7264 */ 7265 page_unlock(pp); 7266 } 7267 /* 7268 * XXX - Should ultimately try to kluster 7269 * calls to VOP_PUTPAGE() for performance. 7270 */ 7271 VN_HOLD(vp); 7272 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 7273 bflags, svd->cred); 7274 VN_RELE(vp); 7275 if (err) 7276 break; 7277 } 7278 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7279 return (err); 7280 } 7281 7282 /* 7283 * Determine if we have data corresponding to pages in the 7284 * primary storage virtual memory cache (i.e., "in core"). 7285 */ 7286 static size_t 7287 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec) 7288 { 7289 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7290 struct vnode *vp, *avp; 7291 u_offset_t offset, aoffset; 7292 size_t p, ep; 7293 int ret; 7294 struct vpage *vpp; 7295 page_t *pp; 7296 uint_t start; 7297 struct anon_map *amp; /* XXX - for locknest */ 7298 struct anon *ap; 7299 uint_t attr; 7300 anon_sync_obj_t cookie; 7301 7302 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7303 7304 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7305 if (svd->amp == NULL && svd->vp == NULL) { 7306 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7307 bzero(vec, btopr(len)); 7308 return (len); /* no anonymous pages created yet */ 7309 } 7310 7311 p = seg_page(seg, addr); 7312 ep = seg_page(seg, addr + len); 7313 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0; 7314 7315 amp = svd->amp; 7316 for (; p < ep; p++, addr += PAGESIZE) { 7317 vpp = (svd->vpage) ? &svd->vpage[p]: NULL; 7318 ret = start; 7319 ap = NULL; 7320 avp = NULL; 7321 /* Grab the vnode/offset for the anon slot */ 7322 if (amp != NULL) { 7323 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7324 anon_array_enter(amp, svd->anon_index + p, &cookie); 7325 ap = anon_get_ptr(amp->ahp, svd->anon_index + p); 7326 if (ap != NULL) { 7327 swap_xlate(ap, &avp, &aoffset); 7328 } 7329 anon_array_exit(&cookie); 7330 ANON_LOCK_EXIT(&->a_rwlock); 7331 } 7332 if ((avp != NULL) && page_exists(avp, aoffset)) { 7333 /* A page exists for the anon slot */ 7334 ret |= SEG_PAGE_INCORE; 7335 7336 /* 7337 * If page is mapped and writable 7338 */ 7339 attr = (uint_t)0; 7340 if ((hat_getattr(seg->s_as->a_hat, addr, 7341 &attr) != -1) && (attr & PROT_WRITE)) { 7342 ret |= SEG_PAGE_ANON; 7343 } 7344 /* 7345 * Don't get page_struct lock for lckcnt and cowcnt, 7346 * since this is purely advisory. 7347 */ 7348 if ((pp = page_lookup_nowait(avp, aoffset, 7349 SE_SHARED)) != NULL) { 7350 if (pp->p_lckcnt) 7351 ret |= SEG_PAGE_SOFTLOCK; 7352 if (pp->p_cowcnt) 7353 ret |= SEG_PAGE_HASCOW; 7354 page_unlock(pp); 7355 } 7356 } 7357 7358 /* Gather vnode statistics */ 7359 vp = svd->vp; 7360 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7361 7362 if (vp != NULL) { 7363 /* 7364 * Try to obtain a "shared" lock on the page 7365 * without blocking. If this fails, determine 7366 * if the page is in memory. 7367 */ 7368 pp = page_lookup_nowait(vp, offset, SE_SHARED); 7369 if ((pp == NULL) && (page_exists(vp, offset))) { 7370 /* Page is incore, and is named */ 7371 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7372 } 7373 /* 7374 * Don't get page_struct lock for lckcnt and cowcnt, 7375 * since this is purely advisory. 7376 */ 7377 if (pp != NULL) { 7378 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7379 if (pp->p_lckcnt) 7380 ret |= SEG_PAGE_SOFTLOCK; 7381 if (pp->p_cowcnt) 7382 ret |= SEG_PAGE_HASCOW; 7383 page_unlock(pp); 7384 } 7385 } 7386 7387 /* Gather virtual page information */ 7388 if (vpp) { 7389 if (VPP_ISPPLOCK(vpp)) 7390 ret |= SEG_PAGE_LOCKED; 7391 vpp++; 7392 } 7393 7394 *vec++ = (char)ret; 7395 } 7396 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7397 return (len); 7398 } 7399 7400 /* 7401 * Statement for p_cowcnts/p_lckcnts. 7402 * 7403 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region 7404 * irrespective of the following factors or anything else: 7405 * 7406 * (1) anon slots are populated or not 7407 * (2) cow is broken or not 7408 * (3) refcnt on ap is 1 or greater than 1 7409 * 7410 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock 7411 * and munlock. 7412 * 7413 * 7414 * Handling p_cowcnts/p_lckcnts during copy-on-write fault: 7415 * 7416 * if vpage has PROT_WRITE 7417 * transfer cowcnt on the oldpage -> cowcnt on the newpage 7418 * else 7419 * transfer lckcnt on the oldpage -> lckcnt on the newpage 7420 * 7421 * During copy-on-write, decrement p_cowcnt on the oldpage and increment 7422 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE. 7423 * 7424 * We may also break COW if softlocking on read access in the physio case. 7425 * In this case, vpage may not have PROT_WRITE. So, we need to decrement 7426 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the 7427 * vpage doesn't have PROT_WRITE. 7428 * 7429 * 7430 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region: 7431 * 7432 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and 7433 * increment p_lckcnt by calling page_subclaim() which takes care of 7434 * availrmem accounting and p_lckcnt overflow. 7435 * 7436 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and 7437 * increment p_cowcnt by calling page_addclaim() which takes care of 7438 * availrmem availability and p_cowcnt overflow. 7439 */ 7440 7441 /* 7442 * Lock down (or unlock) pages mapped by this segment. 7443 * 7444 * XXX only creates PAGESIZE pages if anon slots are not initialized. 7445 * At fault time they will be relocated into larger pages. 7446 */ 7447 static int 7448 segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 7449 int attr, int op, ulong_t *lockmap, size_t pos) 7450 { 7451 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7452 struct vpage *vpp; 7453 struct vpage *evp; 7454 page_t *pp; 7455 u_offset_t offset; 7456 u_offset_t off; 7457 int segtype; 7458 int pageprot; 7459 int claim; 7460 struct vnode *vp; 7461 ulong_t anon_index; 7462 struct anon_map *amp; 7463 struct anon *ap; 7464 struct vattr va; 7465 anon_sync_obj_t cookie; 7466 struct kshmid *sp = NULL; 7467 struct proc *p = curproc; 7468 kproject_t *proj = NULL; 7469 int chargeproc = 1; 7470 size_t locked_bytes = 0; 7471 size_t unlocked_bytes = 0; 7472 int err = 0; 7473 7474 /* 7475 * Hold write lock on address space because may split or concatenate 7476 * segments 7477 */ 7478 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7479 7480 /* 7481 * If this is a shm, use shm's project and zone, else use 7482 * project and zone of calling process 7483 */ 7484 7485 /* Determine if this segment backs a sysV shm */ 7486 if (svd->amp != NULL && svd->amp->a_sp != NULL) { 7487 ASSERT(svd->type == MAP_SHARED); 7488 ASSERT(svd->tr_state == SEGVN_TR_OFF); 7489 sp = svd->amp->a_sp; 7490 proj = sp->shm_perm.ipc_proj; 7491 chargeproc = 0; 7492 } 7493 7494 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 7495 if (attr) { 7496 pageprot = attr & ~(SHARED|PRIVATE); 7497 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE; 7498 7499 /* 7500 * We are done if the segment types don't match 7501 * or if we have segment level protections and 7502 * they don't match. 7503 */ 7504 if (svd->type != segtype) { 7505 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7506 return (0); 7507 } 7508 if (svd->pageprot == 0 && svd->prot != pageprot) { 7509 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7510 return (0); 7511 } 7512 } 7513 7514 if (op == MC_LOCK) { 7515 if (svd->tr_state == SEGVN_TR_INIT) { 7516 svd->tr_state = SEGVN_TR_OFF; 7517 } else if (svd->tr_state == SEGVN_TR_ON) { 7518 ASSERT(svd->amp != NULL); 7519 segvn_textunrepl(seg, 0); 7520 ASSERT(svd->amp == NULL && 7521 svd->tr_state == SEGVN_TR_OFF); 7522 } 7523 } 7524 7525 /* 7526 * If we're locking, then we must create a vpage structure if 7527 * none exists. If we're unlocking, then check to see if there 7528 * is a vpage -- if not, then we could not have locked anything. 7529 */ 7530 7531 if ((vpp = svd->vpage) == NULL) { 7532 if (op == MC_LOCK) 7533 segvn_vpage(seg); 7534 else { 7535 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7536 return (0); 7537 } 7538 } 7539 7540 /* 7541 * The anonymous data vector (i.e., previously 7542 * unreferenced mapping to swap space) can be allocated 7543 * by lazily testing for its existence. 7544 */ 7545 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) { 7546 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 7547 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 7548 svd->amp->a_szc = seg->s_szc; 7549 } 7550 7551 if ((amp = svd->amp) != NULL) { 7552 anon_index = svd->anon_index + seg_page(seg, addr); 7553 } 7554 7555 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7556 evp = &svd->vpage[seg_page(seg, addr + len)]; 7557 7558 if (sp != NULL) 7559 mutex_enter(&sp->shm_mlock); 7560 7561 /* determine number of unlocked bytes in range for lock operation */ 7562 if (op == MC_LOCK) { 7563 7564 if (sp == NULL) { 7565 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7566 vpp++) { 7567 if (!VPP_ISPPLOCK(vpp)) 7568 unlocked_bytes += PAGESIZE; 7569 } 7570 } else { 7571 ulong_t i_idx, i_edx; 7572 anon_sync_obj_t i_cookie; 7573 struct anon *i_ap; 7574 struct vnode *i_vp; 7575 u_offset_t i_off; 7576 7577 /* Only count sysV pages once for locked memory */ 7578 i_edx = svd->anon_index + seg_page(seg, addr + len); 7579 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7580 for (i_idx = anon_index; i_idx < i_edx; i_idx++) { 7581 anon_array_enter(amp, i_idx, &i_cookie); 7582 i_ap = anon_get_ptr(amp->ahp, i_idx); 7583 if (i_ap == NULL) { 7584 unlocked_bytes += PAGESIZE; 7585 anon_array_exit(&i_cookie); 7586 continue; 7587 } 7588 swap_xlate(i_ap, &i_vp, &i_off); 7589 anon_array_exit(&i_cookie); 7590 pp = page_lookup(i_vp, i_off, SE_SHARED); 7591 if (pp == NULL) { 7592 unlocked_bytes += PAGESIZE; 7593 continue; 7594 } else if (pp->p_lckcnt == 0) 7595 unlocked_bytes += PAGESIZE; 7596 page_unlock(pp); 7597 } 7598 ANON_LOCK_EXIT(&->a_rwlock); 7599 } 7600 7601 mutex_enter(&p->p_lock); 7602 err = rctl_incr_locked_mem(p, proj, unlocked_bytes, 7603 chargeproc); 7604 mutex_exit(&p->p_lock); 7605 7606 if (err) { 7607 if (sp != NULL) 7608 mutex_exit(&sp->shm_mlock); 7609 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7610 return (err); 7611 } 7612 } 7613 /* 7614 * Loop over all pages in the range. Process if we're locking and 7615 * page has not already been locked in this mapping; or if we're 7616 * unlocking and the page has been locked. 7617 */ 7618 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7619 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) { 7620 if ((attr == 0 || VPP_PROT(vpp) == pageprot) && 7621 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) || 7622 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) { 7623 7624 if (amp != NULL) 7625 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7626 /* 7627 * If this isn't a MAP_NORESERVE segment and 7628 * we're locking, allocate anon slots if they 7629 * don't exist. The page is brought in later on. 7630 */ 7631 if (op == MC_LOCK && svd->vp == NULL && 7632 ((svd->flags & MAP_NORESERVE) == 0) && 7633 amp != NULL && 7634 ((ap = anon_get_ptr(amp->ahp, anon_index)) 7635 == NULL)) { 7636 anon_array_enter(amp, anon_index, &cookie); 7637 7638 if ((ap = anon_get_ptr(amp->ahp, 7639 anon_index)) == NULL) { 7640 pp = anon_zero(seg, addr, &ap, 7641 svd->cred); 7642 if (pp == NULL) { 7643 anon_array_exit(&cookie); 7644 ANON_LOCK_EXIT(&->a_rwlock); 7645 err = ENOMEM; 7646 goto out; 7647 } 7648 ASSERT(anon_get_ptr(amp->ahp, 7649 anon_index) == NULL); 7650 (void) anon_set_ptr(amp->ahp, 7651 anon_index, ap, ANON_SLEEP); 7652 page_unlock(pp); 7653 } 7654 anon_array_exit(&cookie); 7655 } 7656 7657 /* 7658 * Get name for page, accounting for 7659 * existence of private copy. 7660 */ 7661 ap = NULL; 7662 if (amp != NULL) { 7663 anon_array_enter(amp, anon_index, &cookie); 7664 ap = anon_get_ptr(amp->ahp, anon_index); 7665 if (ap != NULL) { 7666 swap_xlate(ap, &vp, &off); 7667 } else { 7668 if (svd->vp == NULL && 7669 (svd->flags & MAP_NORESERVE)) { 7670 anon_array_exit(&cookie); 7671 ANON_LOCK_EXIT(&->a_rwlock); 7672 continue; 7673 } 7674 vp = svd->vp; 7675 off = offset; 7676 } 7677 anon_array_exit(&cookie); 7678 ANON_LOCK_EXIT(&->a_rwlock); 7679 } else { 7680 vp = svd->vp; 7681 off = offset; 7682 } 7683 7684 /* 7685 * Get page frame. It's ok if the page is 7686 * not available when we're unlocking, as this 7687 * may simply mean that a page we locked got 7688 * truncated out of existence after we locked it. 7689 * 7690 * Invoke VOP_GETPAGE() to obtain the page struct 7691 * since we may need to read it from disk if its 7692 * been paged out. 7693 */ 7694 if (op != MC_LOCK) 7695 pp = page_lookup(vp, off, SE_SHARED); 7696 else { 7697 page_t *pl[1 + 1]; 7698 int error; 7699 7700 ASSERT(vp != NULL); 7701 7702 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 7703 (uint_t *)NULL, pl, PAGESIZE, seg, addr, 7704 S_OTHER, svd->cred); 7705 7706 /* 7707 * If the error is EDEADLK then we must bounce 7708 * up and drop all vm subsystem locks and then 7709 * retry the operation later 7710 * This behavior is a temporary measure because 7711 * ufs/sds logging is badly designed and will 7712 * deadlock if we don't allow this bounce to 7713 * happen. The real solution is to re-design 7714 * the logging code to work properly. See bug 7715 * 4125102 for details of the problem. 7716 */ 7717 if (error == EDEADLK) { 7718 err = error; 7719 goto out; 7720 } 7721 /* 7722 * Quit if we fail to fault in the page. Treat 7723 * the failure as an error, unless the addr 7724 * is mapped beyond the end of a file. 7725 */ 7726 if (error && svd->vp) { 7727 va.va_mask = AT_SIZE; 7728 if (VOP_GETATTR(svd->vp, &va, 0, 7729 svd->cred) != 0) { 7730 err = EIO; 7731 goto out; 7732 } 7733 if (btopr(va.va_size) >= 7734 btopr(off + 1)) { 7735 err = EIO; 7736 goto out; 7737 } 7738 goto out; 7739 7740 } else if (error) { 7741 err = EIO; 7742 goto out; 7743 } 7744 pp = pl[0]; 7745 ASSERT(pp != NULL); 7746 } 7747 7748 /* 7749 * See Statement at the beginning of this routine. 7750 * 7751 * claim is always set if MAP_PRIVATE and PROT_WRITE 7752 * irrespective of following factors: 7753 * 7754 * (1) anon slots are populated or not 7755 * (2) cow is broken or not 7756 * (3) refcnt on ap is 1 or greater than 1 7757 * 7758 * See 4140683 for details 7759 */ 7760 claim = ((VPP_PROT(vpp) & PROT_WRITE) && 7761 (svd->type == MAP_PRIVATE)); 7762 7763 /* 7764 * Perform page-level operation appropriate to 7765 * operation. If locking, undo the SOFTLOCK 7766 * performed to bring the page into memory 7767 * after setting the lock. If unlocking, 7768 * and no page was found, account for the claim 7769 * separately. 7770 */ 7771 if (op == MC_LOCK) { 7772 int ret = 1; /* Assume success */ 7773 7774 ASSERT(!VPP_ISPPLOCK(vpp)); 7775 7776 ret = page_pp_lock(pp, claim, 0); 7777 if (ret == 0) { 7778 /* locking page failed */ 7779 page_unlock(pp); 7780 err = EAGAIN; 7781 goto out; 7782 } 7783 VPP_SETPPLOCK(vpp); 7784 if (sp != NULL) { 7785 if (pp->p_lckcnt == 1) 7786 locked_bytes += PAGESIZE; 7787 } else 7788 locked_bytes += PAGESIZE; 7789 7790 if (lockmap != (ulong_t *)NULL) 7791 BT_SET(lockmap, pos); 7792 7793 page_unlock(pp); 7794 } else { 7795 ASSERT(VPP_ISPPLOCK(vpp)); 7796 if (pp != NULL) { 7797 /* sysV pages should be locked */ 7798 ASSERT(sp == NULL || pp->p_lckcnt > 0); 7799 page_pp_unlock(pp, claim, 0); 7800 if (sp != NULL) { 7801 if (pp->p_lckcnt == 0) 7802 unlocked_bytes 7803 += PAGESIZE; 7804 } else 7805 unlocked_bytes += PAGESIZE; 7806 page_unlock(pp); 7807 } else { 7808 ASSERT(sp == NULL); 7809 unlocked_bytes += PAGESIZE; 7810 } 7811 VPP_CLRPPLOCK(vpp); 7812 } 7813 } 7814 } 7815 out: 7816 if (op == MC_LOCK) { 7817 /* Credit back bytes that did not get locked */ 7818 if ((unlocked_bytes - locked_bytes) > 0) { 7819 if (proj == NULL) 7820 mutex_enter(&p->p_lock); 7821 rctl_decr_locked_mem(p, proj, 7822 (unlocked_bytes - locked_bytes), chargeproc); 7823 if (proj == NULL) 7824 mutex_exit(&p->p_lock); 7825 } 7826 7827 } else { 7828 /* Account bytes that were unlocked */ 7829 if (unlocked_bytes > 0) { 7830 if (proj == NULL) 7831 mutex_enter(&p->p_lock); 7832 rctl_decr_locked_mem(p, proj, unlocked_bytes, 7833 chargeproc); 7834 if (proj == NULL) 7835 mutex_exit(&p->p_lock); 7836 } 7837 } 7838 if (sp != NULL) 7839 mutex_exit(&sp->shm_mlock); 7840 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7841 7842 return (err); 7843 } 7844 7845 /* 7846 * Set advice from user for specified pages 7847 * There are 5 types of advice: 7848 * MADV_NORMAL - Normal (default) behavior (whatever that is) 7849 * MADV_RANDOM - Random page references 7850 * do not allow readahead or 'klustering' 7851 * MADV_SEQUENTIAL - Sequential page references 7852 * Pages previous to the one currently being 7853 * accessed (determined by fault) are 'not needed' 7854 * and are freed immediately 7855 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl) 7856 * MADV_DONTNEED - Pages are not needed (synced out in mctl) 7857 * MADV_FREE - Contents can be discarded 7858 * MADV_ACCESS_DEFAULT- Default access 7859 * MADV_ACCESS_LWP - Next LWP will access heavily 7860 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily 7861 */ 7862 static int 7863 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 7864 { 7865 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7866 size_t page; 7867 int err = 0; 7868 int already_set; 7869 struct anon_map *amp; 7870 ulong_t anon_index; 7871 struct seg *next; 7872 lgrp_mem_policy_t policy; 7873 struct seg *prev; 7874 struct vnode *vp; 7875 7876 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7877 7878 /* 7879 * In case of MADV_FREE, we won't be modifying any segment private 7880 * data structures; so, we only need to grab READER's lock 7881 */ 7882 if (behav != MADV_FREE) { 7883 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 7884 if (svd->tr_state != SEGVN_TR_OFF) { 7885 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7886 return (0); 7887 } 7888 } else { 7889 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7890 } 7891 7892 /* 7893 * Large pages are assumed to be only turned on when accesses to the 7894 * segment's address range have spatial and temporal locality. That 7895 * justifies ignoring MADV_SEQUENTIAL for large page segments. 7896 * Also, ignore advice affecting lgroup memory allocation 7897 * if don't need to do lgroup optimizations on this system 7898 */ 7899 7900 if ((behav == MADV_SEQUENTIAL && 7901 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) || 7902 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT || 7903 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) { 7904 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7905 return (0); 7906 } 7907 7908 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT || 7909 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) { 7910 /* 7911 * Since we are going to unload hat mappings 7912 * we first have to flush the cache. Otherwise 7913 * this might lead to system panic if another 7914 * thread is doing physio on the range whose 7915 * mappings are unloaded by madvise(3C). 7916 */ 7917 if (svd->softlockcnt > 0) { 7918 /* 7919 * Since we do have the segvn writers lock 7920 * nobody can fill the cache with entries 7921 * belonging to this seg during the purge. 7922 * The flush either succeeds or we still 7923 * have pending I/Os. In the later case, 7924 * madvise(3C) fails. 7925 */ 7926 segvn_purge(seg); 7927 if (svd->softlockcnt > 0) { 7928 /* 7929 * Since madvise(3C) is advisory and 7930 * it's not part of UNIX98, madvise(3C) 7931 * failure here doesn't cause any hardship. 7932 * Note that we don't block in "as" layer. 7933 */ 7934 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7935 return (EAGAIN); 7936 } 7937 } 7938 } 7939 7940 amp = svd->amp; 7941 vp = svd->vp; 7942 if (behav == MADV_FREE) { 7943 /* 7944 * MADV_FREE is not supported for segments with 7945 * underlying object; if anonmap is NULL, anon slots 7946 * are not yet populated and there is nothing for 7947 * us to do. As MADV_FREE is advisory, we don't 7948 * return error in either case. 7949 */ 7950 if (vp != NULL || amp == NULL) { 7951 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7952 return (0); 7953 } 7954 7955 page = seg_page(seg, addr); 7956 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7957 anon_disclaim(amp, svd->anon_index + page, len, 0); 7958 ANON_LOCK_EXIT(&->a_rwlock); 7959 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7960 return (0); 7961 } 7962 7963 /* 7964 * If advice is to be applied to entire segment, 7965 * use advice field in seg_data structure 7966 * otherwise use appropriate vpage entry. 7967 */ 7968 if ((addr == seg->s_base) && (len == seg->s_size)) { 7969 switch (behav) { 7970 case MADV_ACCESS_LWP: 7971 case MADV_ACCESS_MANY: 7972 case MADV_ACCESS_DEFAULT: 7973 /* 7974 * Set memory allocation policy for this segment 7975 */ 7976 policy = lgrp_madv_to_policy(behav, len, svd->type); 7977 if (svd->type == MAP_SHARED) 7978 already_set = lgrp_shm_policy_set(policy, amp, 7979 svd->anon_index, vp, svd->offset, len); 7980 else { 7981 /* 7982 * For private memory, need writers lock on 7983 * address space because the segment may be 7984 * split or concatenated when changing policy 7985 */ 7986 if (AS_READ_HELD(seg->s_as, 7987 &seg->s_as->a_lock)) { 7988 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7989 return (IE_RETRY); 7990 } 7991 7992 already_set = lgrp_privm_policy_set(policy, 7993 &svd->policy_info, len); 7994 } 7995 7996 /* 7997 * If policy set already and it shouldn't be reapplied, 7998 * don't do anything. 7999 */ 8000 if (already_set && 8001 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8002 break; 8003 8004 /* 8005 * Mark any existing pages in given range for 8006 * migration 8007 */ 8008 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8009 vp, svd->offset, 1); 8010 8011 /* 8012 * If same policy set already or this is a shared 8013 * memory segment, don't need to try to concatenate 8014 * segment with adjacent ones. 8015 */ 8016 if (already_set || svd->type == MAP_SHARED) 8017 break; 8018 8019 /* 8020 * Try to concatenate this segment with previous 8021 * one and next one, since we changed policy for 8022 * this one and it may be compatible with adjacent 8023 * ones now. 8024 */ 8025 prev = AS_SEGPREV(seg->s_as, seg); 8026 next = AS_SEGNEXT(seg->s_as, seg); 8027 8028 if (next && next->s_ops == &segvn_ops && 8029 addr + len == next->s_base) 8030 (void) segvn_concat(seg, next, 1); 8031 8032 if (prev && prev->s_ops == &segvn_ops && 8033 addr == prev->s_base + prev->s_size) { 8034 /* 8035 * Drop lock for private data of current 8036 * segment before concatenating (deleting) it 8037 * and return IE_REATTACH to tell as_ctl() that 8038 * current segment has changed 8039 */ 8040 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8041 if (!segvn_concat(prev, seg, 1)) 8042 err = IE_REATTACH; 8043 8044 return (err); 8045 } 8046 break; 8047 8048 case MADV_SEQUENTIAL: 8049 /* 8050 * unloading mapping guarantees 8051 * detection in segvn_fault 8052 */ 8053 ASSERT(seg->s_szc == 0); 8054 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8055 hat_unload(seg->s_as->a_hat, addr, len, 8056 HAT_UNLOAD); 8057 /* FALLTHROUGH */ 8058 case MADV_NORMAL: 8059 case MADV_RANDOM: 8060 svd->advice = (uchar_t)behav; 8061 svd->pageadvice = 0; 8062 break; 8063 case MADV_WILLNEED: /* handled in memcntl */ 8064 case MADV_DONTNEED: /* handled in memcntl */ 8065 case MADV_FREE: /* handled above */ 8066 break; 8067 default: 8068 err = EINVAL; 8069 } 8070 } else { 8071 caddr_t eaddr; 8072 struct seg *new_seg; 8073 struct segvn_data *new_svd; 8074 u_offset_t off; 8075 caddr_t oldeaddr; 8076 8077 page = seg_page(seg, addr); 8078 8079 segvn_vpage(seg); 8080 8081 switch (behav) { 8082 struct vpage *bvpp, *evpp; 8083 8084 case MADV_ACCESS_LWP: 8085 case MADV_ACCESS_MANY: 8086 case MADV_ACCESS_DEFAULT: 8087 /* 8088 * Set memory allocation policy for portion of this 8089 * segment 8090 */ 8091 8092 /* 8093 * Align address and length of advice to page 8094 * boundaries for large pages 8095 */ 8096 if (seg->s_szc != 0) { 8097 size_t pgsz; 8098 8099 pgsz = page_get_pagesize(seg->s_szc); 8100 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 8101 len = P2ROUNDUP(len, pgsz); 8102 } 8103 8104 /* 8105 * Check to see whether policy is set already 8106 */ 8107 policy = lgrp_madv_to_policy(behav, len, svd->type); 8108 8109 anon_index = svd->anon_index + page; 8110 off = svd->offset + (uintptr_t)(addr - seg->s_base); 8111 8112 if (svd->type == MAP_SHARED) 8113 already_set = lgrp_shm_policy_set(policy, amp, 8114 anon_index, vp, off, len); 8115 else 8116 already_set = 8117 (policy == svd->policy_info.mem_policy); 8118 8119 /* 8120 * If policy set already and it shouldn't be reapplied, 8121 * don't do anything. 8122 */ 8123 if (already_set && 8124 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8125 break; 8126 8127 /* 8128 * For private memory, need writers lock on 8129 * address space because the segment may be 8130 * split or concatenated when changing policy 8131 */ 8132 if (svd->type == MAP_PRIVATE && 8133 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) { 8134 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8135 return (IE_RETRY); 8136 } 8137 8138 /* 8139 * Mark any existing pages in given range for 8140 * migration 8141 */ 8142 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8143 vp, svd->offset, 1); 8144 8145 /* 8146 * Don't need to try to split or concatenate 8147 * segments, since policy is same or this is a shared 8148 * memory segment 8149 */ 8150 if (already_set || svd->type == MAP_SHARED) 8151 break; 8152 8153 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 8154 ASSERT(svd->amp == NULL); 8155 ASSERT(svd->tr_state == SEGVN_TR_OFF); 8156 ASSERT(svd->softlockcnt == 0); 8157 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 8158 HAT_REGION_TEXT); 8159 svd->rcookie = HAT_INVALID_REGION_COOKIE; 8160 } 8161 8162 /* 8163 * Split off new segment if advice only applies to a 8164 * portion of existing segment starting in middle 8165 */ 8166 new_seg = NULL; 8167 eaddr = addr + len; 8168 oldeaddr = seg->s_base + seg->s_size; 8169 if (addr > seg->s_base) { 8170 /* 8171 * Must flush I/O page cache 8172 * before splitting segment 8173 */ 8174 if (svd->softlockcnt > 0) 8175 segvn_purge(seg); 8176 8177 /* 8178 * Split segment and return IE_REATTACH to tell 8179 * as_ctl() that current segment changed 8180 */ 8181 new_seg = segvn_split_seg(seg, addr); 8182 new_svd = (struct segvn_data *)new_seg->s_data; 8183 err = IE_REATTACH; 8184 8185 /* 8186 * If new segment ends where old one 8187 * did, try to concatenate the new 8188 * segment with next one. 8189 */ 8190 if (eaddr == oldeaddr) { 8191 /* 8192 * Set policy for new segment 8193 */ 8194 (void) lgrp_privm_policy_set(policy, 8195 &new_svd->policy_info, 8196 new_seg->s_size); 8197 8198 next = AS_SEGNEXT(new_seg->s_as, 8199 new_seg); 8200 8201 if (next && 8202 next->s_ops == &segvn_ops && 8203 eaddr == next->s_base) 8204 (void) segvn_concat(new_seg, 8205 next, 1); 8206 } 8207 } 8208 8209 /* 8210 * Split off end of existing segment if advice only 8211 * applies to a portion of segment ending before 8212 * end of the existing segment 8213 */ 8214 if (eaddr < oldeaddr) { 8215 /* 8216 * Must flush I/O page cache 8217 * before splitting segment 8218 */ 8219 if (svd->softlockcnt > 0) 8220 segvn_purge(seg); 8221 8222 /* 8223 * If beginning of old segment was already 8224 * split off, use new segment to split end off 8225 * from. 8226 */ 8227 if (new_seg != NULL && new_seg != seg) { 8228 /* 8229 * Split segment 8230 */ 8231 (void) segvn_split_seg(new_seg, eaddr); 8232 8233 /* 8234 * Set policy for new segment 8235 */ 8236 (void) lgrp_privm_policy_set(policy, 8237 &new_svd->policy_info, 8238 new_seg->s_size); 8239 } else { 8240 /* 8241 * Split segment and return IE_REATTACH 8242 * to tell as_ctl() that current 8243 * segment changed 8244 */ 8245 (void) segvn_split_seg(seg, eaddr); 8246 err = IE_REATTACH; 8247 8248 (void) lgrp_privm_policy_set(policy, 8249 &svd->policy_info, seg->s_size); 8250 8251 /* 8252 * If new segment starts where old one 8253 * did, try to concatenate it with 8254 * previous segment. 8255 */ 8256 if (addr == seg->s_base) { 8257 prev = AS_SEGPREV(seg->s_as, 8258 seg); 8259 8260 /* 8261 * Drop lock for private data 8262 * of current segment before 8263 * concatenating (deleting) it 8264 */ 8265 if (prev && 8266 prev->s_ops == 8267 &segvn_ops && 8268 addr == prev->s_base + 8269 prev->s_size) { 8270 SEGVN_LOCK_EXIT( 8271 seg->s_as, 8272 &svd->lock); 8273 (void) segvn_concat( 8274 prev, seg, 1); 8275 return (err); 8276 } 8277 } 8278 } 8279 } 8280 break; 8281 case MADV_SEQUENTIAL: 8282 ASSERT(seg->s_szc == 0); 8283 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8284 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 8285 /* FALLTHROUGH */ 8286 case MADV_NORMAL: 8287 case MADV_RANDOM: 8288 bvpp = &svd->vpage[page]; 8289 evpp = &svd->vpage[page + (len >> PAGESHIFT)]; 8290 for (; bvpp < evpp; bvpp++) 8291 VPP_SETADVICE(bvpp, behav); 8292 svd->advice = MADV_NORMAL; 8293 break; 8294 case MADV_WILLNEED: /* handled in memcntl */ 8295 case MADV_DONTNEED: /* handled in memcntl */ 8296 case MADV_FREE: /* handled above */ 8297 break; 8298 default: 8299 err = EINVAL; 8300 } 8301 } 8302 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8303 return (err); 8304 } 8305 8306 /* 8307 * Create a vpage structure for this seg. 8308 */ 8309 static void 8310 segvn_vpage(struct seg *seg) 8311 { 8312 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8313 struct vpage *vp, *evp; 8314 8315 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 8316 8317 /* 8318 * If no vpage structure exists, allocate one. Copy the protections 8319 * and the advice from the segment itself to the individual pages. 8320 */ 8321 if (svd->vpage == NULL) { 8322 svd->pageadvice = 1; 8323 svd->vpage = kmem_zalloc(seg_pages(seg) * sizeof (struct vpage), 8324 KM_SLEEP); 8325 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 8326 for (vp = svd->vpage; vp < evp; vp++) { 8327 VPP_SETPROT(vp, svd->prot); 8328 VPP_SETADVICE(vp, svd->advice); 8329 } 8330 } 8331 } 8332 8333 /* 8334 * Dump the pages belonging to this segvn segment. 8335 */ 8336 static void 8337 segvn_dump(struct seg *seg) 8338 { 8339 struct segvn_data *svd; 8340 page_t *pp; 8341 struct anon_map *amp; 8342 ulong_t anon_index; 8343 struct vnode *vp; 8344 u_offset_t off, offset; 8345 pfn_t pfn; 8346 pgcnt_t page, npages; 8347 caddr_t addr; 8348 8349 npages = seg_pages(seg); 8350 svd = (struct segvn_data *)seg->s_data; 8351 vp = svd->vp; 8352 off = offset = svd->offset; 8353 addr = seg->s_base; 8354 8355 if ((amp = svd->amp) != NULL) { 8356 anon_index = svd->anon_index; 8357 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8358 } 8359 8360 for (page = 0; page < npages; page++, offset += PAGESIZE) { 8361 struct anon *ap; 8362 int we_own_it = 0; 8363 8364 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) { 8365 swap_xlate_nopanic(ap, &vp, &off); 8366 } else { 8367 vp = svd->vp; 8368 off = offset; 8369 } 8370 8371 /* 8372 * If pp == NULL, the page either does not exist 8373 * or is exclusively locked. So determine if it 8374 * exists before searching for it. 8375 */ 8376 8377 if ((pp = page_lookup_nowait(vp, off, SE_SHARED))) 8378 we_own_it = 1; 8379 else 8380 pp = page_exists(vp, off); 8381 8382 if (pp) { 8383 pfn = page_pptonum(pp); 8384 dump_addpage(seg->s_as, addr, pfn); 8385 if (we_own_it) 8386 page_unlock(pp); 8387 } 8388 addr += PAGESIZE; 8389 dump_timeleft = dump_timeout; 8390 } 8391 8392 if (amp != NULL) 8393 ANON_LOCK_EXIT(&->a_rwlock); 8394 } 8395 8396 /* 8397 * lock/unlock anon pages over a given range. Return shadow list 8398 */ 8399 static int 8400 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp, 8401 enum lock_type type, enum seg_rw rw) 8402 { 8403 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8404 size_t np, adjustpages = 0, npages = (len >> PAGESHIFT); 8405 ulong_t anon_index; 8406 uint_t protchk; 8407 uint_t error; 8408 struct anon_map *amp; 8409 struct page **pplist, **pl, *pp; 8410 caddr_t a; 8411 size_t page; 8412 caddr_t lpgaddr, lpgeaddr; 8413 pgcnt_t szc0_npages = 0; 8414 8415 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START, 8416 "segvn_pagelock: start seg %p addr %p", seg, addr); 8417 8418 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8419 if (seg->s_szc != 0 && (type == L_PAGELOCK || type == L_PAGEUNLOCK)) { 8420 /* 8421 * We are adjusting the pagelock region to the large page size 8422 * boundary because the unlocked part of a large page cannot 8423 * be freed anyway unless all constituent pages of a large 8424 * page are locked. Therefore this adjustment allows us to 8425 * decrement availrmem by the right value (note we don't want 8426 * to just decrement availrem by the large page size without 8427 * adjusting addr and len because then we may end up 8428 * decrementing availrmem by large page size for every 8429 * constituent page locked by a new as_pagelock call). 8430 * as_pageunlock caller must always match as_pagelock call's 8431 * addr and len. 8432 * 8433 * Note segment's page size cannot change while we are holding 8434 * as lock. And then it cannot change while softlockcnt is 8435 * not 0. This will allow us to correctly recalculate large 8436 * page size region for the matching pageunlock/reclaim call. 8437 * 8438 * for pageunlock *ppp points to the pointer of page_t that 8439 * corresponds to the real unadjusted start address. Similar 8440 * for pagelock *ppp must point to the pointer of page_t that 8441 * corresponds to the real unadjusted start address. 8442 */ 8443 size_t pgsz = page_get_pagesize(seg->s_szc); 8444 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 8445 adjustpages = ((uintptr_t)(addr - lpgaddr)) >> PAGESHIFT; 8446 } 8447 8448 if (type == L_PAGEUNLOCK) { 8449 8450 /* 8451 * update hat ref bits for /proc. We need to make sure 8452 * that threads tracing the ref and mod bits of the 8453 * address space get the right data. 8454 * Note: page ref and mod bits are updated at reclaim time 8455 */ 8456 if (seg->s_as->a_vbits) { 8457 for (a = addr; a < addr + len; a += PAGESIZE) { 8458 if (rw == S_WRITE) { 8459 hat_setstat(seg->s_as, a, 8460 PAGESIZE, P_REF | P_MOD); 8461 } else { 8462 hat_setstat(seg->s_as, a, 8463 PAGESIZE, P_REF); 8464 } 8465 } 8466 } 8467 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8468 if (seg->s_szc != 0) { 8469 VM_STAT_ADD(segvnvmstats.pagelock[0]); 8470 seg_pinactive(seg, lpgaddr, lpgeaddr - lpgaddr, 8471 *ppp - adjustpages, rw, segvn_reclaim); 8472 } else { 8473 seg_pinactive(seg, addr, len, *ppp, rw, segvn_reclaim); 8474 } 8475 8476 /* 8477 * If someone is blocked while unmapping, we purge 8478 * segment page cache and thus reclaim pplist synchronously 8479 * without waiting for seg_pasync_thread. This speeds up 8480 * unmapping in cases where munmap(2) is called, while 8481 * raw async i/o is still in progress or where a thread 8482 * exits on data fault in a multithreaded application. 8483 */ 8484 if (AS_ISUNMAPWAIT(seg->s_as) && (svd->softlockcnt > 0)) { 8485 /* 8486 * Even if we grab segvn WRITER's lock or segp_slock 8487 * here, there might be another thread which could've 8488 * successfully performed lookup/insert just before 8489 * we acquired the lock here. So, grabbing either 8490 * lock here is of not much use. Until we devise 8491 * a strategy at upper layers to solve the 8492 * synchronization issues completely, we expect 8493 * applications to handle this appropriately. 8494 */ 8495 segvn_purge(seg); 8496 } 8497 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8498 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END, 8499 "segvn_pagelock: unlock seg %p addr %p", seg, addr); 8500 return (0); 8501 } else if (type == L_PAGERECLAIM) { 8502 VM_STAT_COND_ADD(seg->s_szc != 0, segvnvmstats.pagelock[1]); 8503 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8504 (void) segvn_reclaim(seg, addr, len, *ppp, rw); 8505 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8506 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END, 8507 "segvn_pagelock: reclaim seg %p addr %p", seg, addr); 8508 return (0); 8509 } 8510 8511 if (seg->s_szc != 0) { 8512 VM_STAT_ADD(segvnvmstats.pagelock[2]); 8513 addr = lpgaddr; 8514 len = lpgeaddr - lpgaddr; 8515 npages = (len >> PAGESHIFT); 8516 } 8517 8518 /* 8519 * for now we only support pagelock to anon memory. We've to check 8520 * protections for vnode objects and call into the vnode driver. 8521 * That's too much for a fast path. Let the fault entry point handle it. 8522 */ 8523 if (svd->vp != NULL) { 8524 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END, 8525 "segvn_pagelock: mapped vnode seg %p addr %p", seg, addr); 8526 *ppp = NULL; 8527 return (ENOTSUP); 8528 } 8529 8530 /* 8531 * if anonmap is not yet created, let the fault entry point populate it 8532 * with anon ptrs. 8533 */ 8534 if ((amp = svd->amp) == NULL) { 8535 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END, 8536 "segvn_pagelock: anonmap null seg %p addr %p", seg, addr); 8537 *ppp = NULL; 8538 return (EFAULT); 8539 } 8540 8541 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8542 8543 /* 8544 * we acquire segp_slock to prevent duplicate entries 8545 * in seg_pcache 8546 */ 8547 mutex_enter(&svd->segp_slock); 8548 8549 /* 8550 * try to find pages in segment page cache 8551 */ 8552 pplist = seg_plookup(seg, addr, len, rw); 8553 if (pplist != NULL) { 8554 mutex_exit(&svd->segp_slock); 8555 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8556 *ppp = pplist + adjustpages; 8557 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END, 8558 "segvn_pagelock: cache hit seg %p addr %p", seg, addr); 8559 return (0); 8560 } 8561 8562 if (rw == S_READ) { 8563 protchk = PROT_READ; 8564 } else { 8565 protchk = PROT_WRITE; 8566 } 8567 8568 if (svd->pageprot == 0) { 8569 if ((svd->prot & protchk) == 0) { 8570 mutex_exit(&svd->segp_slock); 8571 error = EFAULT; 8572 goto out; 8573 } 8574 } else { 8575 /* 8576 * check page protections 8577 */ 8578 for (a = addr; a < addr + len; a += PAGESIZE) { 8579 struct vpage *vp; 8580 8581 vp = &svd->vpage[seg_page(seg, a)]; 8582 if ((VPP_PROT(vp) & protchk) == 0) { 8583 mutex_exit(&svd->segp_slock); 8584 error = EFAULT; 8585 goto out; 8586 } 8587 } 8588 } 8589 8590 /* 8591 * Avoid per page overhead of segvn_pp_lock_anonpages() for small 8592 * pages. For large pages segvn_pp_lock_anonpages() only does real 8593 * work once per large page. The tradeoff is that we may decrement 8594 * availrmem more than once for the same page but this is ok 8595 * for small pages. 8596 */ 8597 if (seg->s_szc == 0) { 8598 mutex_enter(&freemem_lock); 8599 if (availrmem < tune.t_minarmem + npages) { 8600 mutex_exit(&freemem_lock); 8601 mutex_exit(&svd->segp_slock); 8602 error = ENOMEM; 8603 goto out; 8604 } 8605 availrmem -= npages; 8606 mutex_exit(&freemem_lock); 8607 } 8608 8609 pplist = kmem_alloc(sizeof (page_t *) * npages, KM_SLEEP); 8610 pl = pplist; 8611 *ppp = pplist + adjustpages; 8612 8613 page = seg_page(seg, addr); 8614 anon_index = svd->anon_index + page; 8615 8616 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8617 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) { 8618 struct anon *ap; 8619 struct vnode *vp; 8620 u_offset_t off; 8621 anon_sync_obj_t cookie; 8622 8623 anon_array_enter(amp, anon_index, &cookie); 8624 ap = anon_get_ptr(amp->ahp, anon_index); 8625 if (ap == NULL) { 8626 anon_array_exit(&cookie); 8627 break; 8628 } else { 8629 /* 8630 * We must never use seg_pcache for COW pages 8631 * because we might end up with original page still 8632 * lying in seg_pcache even after private page is 8633 * created. This leads to data corruption as 8634 * aio_write refers to the page still in cache 8635 * while all other accesses refer to the private 8636 * page. 8637 */ 8638 if (ap->an_refcnt != 1) { 8639 anon_array_exit(&cookie); 8640 break; 8641 } 8642 } 8643 swap_xlate(ap, &vp, &off); 8644 anon_array_exit(&cookie); 8645 8646 pp = page_lookup_nowait(vp, off, SE_SHARED); 8647 if (pp == NULL) { 8648 break; 8649 } 8650 if (seg->s_szc != 0 || pp->p_szc != 0) { 8651 if (!segvn_pp_lock_anonpages(pp, a == addr)) { 8652 page_unlock(pp); 8653 break; 8654 } 8655 } else { 8656 szc0_npages++; 8657 } 8658 *pplist++ = pp; 8659 } 8660 ANON_LOCK_EXIT(&->a_rwlock); 8661 8662 ASSERT(npages >= szc0_npages); 8663 8664 if (a >= addr + len) { 8665 mutex_enter(&freemem_lock); 8666 if (seg->s_szc == 0 && npages != szc0_npages) { 8667 ASSERT(svd->type == MAP_SHARED && amp->a_szc > 0); 8668 availrmem += (npages - szc0_npages); 8669 } 8670 svd->softlockcnt += npages; 8671 segvn_pages_locked += npages; 8672 mutex_exit(&freemem_lock); 8673 (void) seg_pinsert(seg, addr, len, pl, rw, SEGP_ASYNC_FLUSH, 8674 segvn_reclaim); 8675 mutex_exit(&svd->segp_slock); 8676 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8677 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END, 8678 "segvn_pagelock: cache fill seg %p addr %p", seg, addr); 8679 return (0); 8680 } 8681 8682 mutex_exit(&svd->segp_slock); 8683 if (seg->s_szc == 0) { 8684 mutex_enter(&freemem_lock); 8685 availrmem += npages; 8686 mutex_exit(&freemem_lock); 8687 } 8688 error = EFAULT; 8689 pplist = pl; 8690 np = ((uintptr_t)(a - addr)) >> PAGESHIFT; 8691 while (np > (uint_t)0) { 8692 ASSERT(PAGE_LOCKED(*pplist)); 8693 if (seg->s_szc != 0 || (*pplist)->p_szc != 0) { 8694 segvn_pp_unlock_anonpages(*pplist, pplist == pl); 8695 } 8696 page_unlock(*pplist); 8697 np--; 8698 pplist++; 8699 } 8700 kmem_free(pl, sizeof (page_t *) * npages); 8701 out: 8702 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8703 *ppp = NULL; 8704 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END, 8705 "segvn_pagelock: cache miss seg %p addr %p", seg, addr); 8706 return (error); 8707 } 8708 8709 /* 8710 * purge any cached pages in the I/O page cache 8711 */ 8712 static void 8713 segvn_purge(struct seg *seg) 8714 { 8715 seg_ppurge(seg); 8716 } 8717 8718 static int 8719 segvn_reclaim(struct seg *seg, caddr_t addr, size_t len, struct page **pplist, 8720 enum seg_rw rw) 8721 { 8722 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8723 pgcnt_t np, npages; 8724 struct page **pl; 8725 pgcnt_t szc0_npages = 0; 8726 8727 #ifdef lint 8728 addr = addr; 8729 #endif 8730 8731 npages = np = (len >> PAGESHIFT); 8732 ASSERT(npages); 8733 pl = pplist; 8734 if (seg->s_szc != 0) { 8735 size_t pgsz = page_get_pagesize(seg->s_szc); 8736 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 8737 panic("segvn_reclaim: unaligned addr or len"); 8738 /*NOTREACHED*/ 8739 } 8740 } 8741 8742 ASSERT(svd->vp == NULL && svd->amp != NULL); 8743 8744 while (np > (uint_t)0) { 8745 if (rw == S_WRITE) { 8746 hat_setrefmod(*pplist); 8747 } else { 8748 hat_setref(*pplist); 8749 } 8750 if (seg->s_szc != 0 || (*pplist)->p_szc != 0) { 8751 segvn_pp_unlock_anonpages(*pplist, pplist == pl); 8752 } else { 8753 szc0_npages++; 8754 } 8755 page_unlock(*pplist); 8756 np--; 8757 pplist++; 8758 } 8759 kmem_free(pl, sizeof (page_t *) * npages); 8760 8761 mutex_enter(&freemem_lock); 8762 segvn_pages_locked -= npages; 8763 svd->softlockcnt -= npages; 8764 if (szc0_npages != 0) { 8765 availrmem += szc0_npages; 8766 } 8767 mutex_exit(&freemem_lock); 8768 if (svd->softlockcnt <= 0) { 8769 if (AS_ISUNMAPWAIT(seg->s_as)) { 8770 mutex_enter(&seg->s_as->a_contents); 8771 if (AS_ISUNMAPWAIT(seg->s_as)) { 8772 AS_CLRUNMAPWAIT(seg->s_as); 8773 cv_broadcast(&seg->s_as->a_cv); 8774 } 8775 mutex_exit(&seg->s_as->a_contents); 8776 } 8777 } 8778 return (0); 8779 } 8780 /* 8781 * get a memory ID for an addr in a given segment 8782 * 8783 * XXX only creates PAGESIZE pages if anon slots are not initialized. 8784 * At fault time they will be relocated into larger pages. 8785 */ 8786 static int 8787 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 8788 { 8789 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8790 struct anon *ap = NULL; 8791 ulong_t anon_index; 8792 struct anon_map *amp; 8793 anon_sync_obj_t cookie; 8794 8795 if (svd->type == MAP_PRIVATE) { 8796 memidp->val[0] = (uintptr_t)seg->s_as; 8797 memidp->val[1] = (uintptr_t)addr; 8798 return (0); 8799 } 8800 8801 if (svd->type == MAP_SHARED) { 8802 if (svd->vp) { 8803 memidp->val[0] = (uintptr_t)svd->vp; 8804 memidp->val[1] = (u_longlong_t)svd->offset + 8805 (uintptr_t)(addr - seg->s_base); 8806 return (0); 8807 } else { 8808 8809 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8810 if ((amp = svd->amp) != NULL) { 8811 anon_index = svd->anon_index + 8812 seg_page(seg, addr); 8813 } 8814 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8815 8816 ASSERT(amp != NULL); 8817 8818 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8819 anon_array_enter(amp, anon_index, &cookie); 8820 ap = anon_get_ptr(amp->ahp, anon_index); 8821 if (ap == NULL) { 8822 page_t *pp; 8823 8824 pp = anon_zero(seg, addr, &ap, svd->cred); 8825 if (pp == NULL) { 8826 anon_array_exit(&cookie); 8827 ANON_LOCK_EXIT(&->a_rwlock); 8828 return (ENOMEM); 8829 } 8830 ASSERT(anon_get_ptr(amp->ahp, anon_index) 8831 == NULL); 8832 (void) anon_set_ptr(amp->ahp, anon_index, 8833 ap, ANON_SLEEP); 8834 page_unlock(pp); 8835 } 8836 8837 anon_array_exit(&cookie); 8838 ANON_LOCK_EXIT(&->a_rwlock); 8839 8840 memidp->val[0] = (uintptr_t)ap; 8841 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 8842 return (0); 8843 } 8844 } 8845 return (EINVAL); 8846 } 8847 8848 static int 8849 sameprot(struct seg *seg, caddr_t a, size_t len) 8850 { 8851 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8852 struct vpage *vpage; 8853 spgcnt_t pages = btop(len); 8854 uint_t prot; 8855 8856 if (svd->pageprot == 0) 8857 return (1); 8858 8859 ASSERT(svd->vpage != NULL); 8860 8861 vpage = &svd->vpage[seg_page(seg, a)]; 8862 prot = VPP_PROT(vpage); 8863 vpage++; 8864 pages--; 8865 while (pages-- > 0) { 8866 if (prot != VPP_PROT(vpage)) 8867 return (0); 8868 vpage++; 8869 } 8870 return (1); 8871 } 8872 8873 /* 8874 * Get memory allocation policy info for specified address in given segment 8875 */ 8876 static lgrp_mem_policy_info_t * 8877 segvn_getpolicy(struct seg *seg, caddr_t addr) 8878 { 8879 struct anon_map *amp; 8880 ulong_t anon_index; 8881 lgrp_mem_policy_info_t *policy_info; 8882 struct segvn_data *svn_data; 8883 u_offset_t vn_off; 8884 vnode_t *vp; 8885 8886 ASSERT(seg != NULL); 8887 8888 svn_data = (struct segvn_data *)seg->s_data; 8889 if (svn_data == NULL) 8890 return (NULL); 8891 8892 /* 8893 * Get policy info for private or shared memory 8894 */ 8895 if (svn_data->type != MAP_SHARED) { 8896 if (svn_data->tr_state != SEGVN_TR_ON) { 8897 policy_info = &svn_data->policy_info; 8898 } else { 8899 policy_info = &svn_data->tr_policy_info; 8900 ASSERT(policy_info->mem_policy == 8901 LGRP_MEM_POLICY_NEXT_SEG); 8902 } 8903 } else { 8904 amp = svn_data->amp; 8905 anon_index = svn_data->anon_index + seg_page(seg, addr); 8906 vp = svn_data->vp; 8907 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base); 8908 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off); 8909 } 8910 8911 return (policy_info); 8912 } 8913 8914 /*ARGSUSED*/ 8915 static int 8916 segvn_capable(struct seg *seg, segcapability_t capability) 8917 { 8918 return (0); 8919 } 8920 8921 /* 8922 * Bind text vnode segment to an amp. If we bind successfully mappings will be 8923 * established to per vnode mapping per lgroup amp pages instead of to vnode 8924 * pages. There's one amp per vnode text mapping per lgroup. Many processes 8925 * may share the same text replication amp. If a suitable amp doesn't already 8926 * exist in svntr hash table create a new one. We may fail to bind to amp if 8927 * segment is not eligible for text replication. Code below first checks for 8928 * these conditions. If binding is successful segment tr_state is set to on 8929 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and 8930 * svd->amp remains as NULL. 8931 */ 8932 static void 8933 segvn_textrepl(struct seg *seg) 8934 { 8935 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8936 vnode_t *vp = svd->vp; 8937 u_offset_t off = svd->offset; 8938 size_t size = seg->s_size; 8939 u_offset_t eoff = off + size; 8940 uint_t szc = seg->s_szc; 8941 ulong_t hash = SVNTR_HASH_FUNC(vp); 8942 svntr_t *svntrp; 8943 struct vattr va; 8944 proc_t *p = seg->s_as->a_proc; 8945 lgrp_id_t lgrp_id; 8946 lgrp_id_t olid; 8947 int first; 8948 struct anon_map *amp; 8949 8950 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8951 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 8952 ASSERT(p != NULL); 8953 ASSERT(svd->tr_state == SEGVN_TR_INIT); 8954 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 8955 ASSERT(svd->flags & MAP_TEXT); 8956 ASSERT(svd->type == MAP_PRIVATE); 8957 ASSERT(vp != NULL && svd->amp == NULL); 8958 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 8959 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0); 8960 ASSERT(seg->s_as != &kas); 8961 ASSERT(off < eoff); 8962 ASSERT(svntr_hashtab != NULL); 8963 8964 /* 8965 * If numa optimizations are no longer desired bail out. 8966 */ 8967 if (!lgrp_optimizations()) { 8968 svd->tr_state = SEGVN_TR_OFF; 8969 return; 8970 } 8971 8972 /* 8973 * Avoid creating anon maps with size bigger than the file size. 8974 * If VOP_GETATTR() call fails bail out. 8975 */ 8976 va.va_mask = AT_SIZE | AT_MTIME; 8977 if (VOP_GETATTR(vp, &va, 0, svd->cred) != 0) { 8978 svd->tr_state = SEGVN_TR_OFF; 8979 SEGVN_TR_ADDSTAT(gaerr); 8980 return; 8981 } 8982 if (btopr(va.va_size) < btopr(eoff)) { 8983 svd->tr_state = SEGVN_TR_OFF; 8984 SEGVN_TR_ADDSTAT(overmap); 8985 return; 8986 } 8987 8988 /* 8989 * VVMEXEC may not be set yet if exec() prefaults text segment. Set 8990 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED 8991 * mapping that checks if trcache for this vnode needs to be 8992 * invalidated can't miss us. 8993 */ 8994 if (!(vp->v_flag & VVMEXEC)) { 8995 mutex_enter(&vp->v_lock); 8996 vp->v_flag |= VVMEXEC; 8997 mutex_exit(&vp->v_lock); 8998 } 8999 mutex_enter(&svntr_hashtab[hash].tr_lock); 9000 /* 9001 * Bail out if potentially MAP_SHARED writable mappings exist to this 9002 * vnode. We don't want to use old file contents from existing 9003 * replicas if this mapping was established after the original file 9004 * was changed. 9005 */ 9006 if (vn_is_mapped(vp, V_WRITE)) { 9007 mutex_exit(&svntr_hashtab[hash].tr_lock); 9008 svd->tr_state = SEGVN_TR_OFF; 9009 SEGVN_TR_ADDSTAT(wrcnt); 9010 return; 9011 } 9012 svntrp = svntr_hashtab[hash].tr_head; 9013 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9014 ASSERT(svntrp->tr_refcnt != 0); 9015 if (svntrp->tr_vp != vp) { 9016 continue; 9017 } 9018 /* 9019 * Bail out if file was changed after this replication entry 9020 * was created since we need to use the latest file contents. 9021 */ 9022 if (!svntrp->tr_valid || 9023 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec || 9024 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec) { 9025 mutex_exit(&svntr_hashtab[hash].tr_lock); 9026 svd->tr_state = SEGVN_TR_OFF; 9027 SEGVN_TR_ADDSTAT(stale); 9028 return; 9029 } 9030 /* 9031 * if off, eoff and szc match current segment we found the 9032 * existing entry we can use. 9033 */ 9034 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff && 9035 svntrp->tr_szc == szc) { 9036 break; 9037 } 9038 /* 9039 * Don't create different but overlapping in file offsets 9040 * entries to avoid replication of the same file pages more 9041 * than once per lgroup. 9042 */ 9043 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) || 9044 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) { 9045 mutex_exit(&svntr_hashtab[hash].tr_lock); 9046 svd->tr_state = SEGVN_TR_OFF; 9047 SEGVN_TR_ADDSTAT(overlap); 9048 return; 9049 } 9050 } 9051 /* 9052 * If we didn't find existing entry create a new one. 9053 */ 9054 if (svntrp == NULL) { 9055 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP); 9056 if (svntrp == NULL) { 9057 mutex_exit(&svntr_hashtab[hash].tr_lock); 9058 svd->tr_state = SEGVN_TR_OFF; 9059 SEGVN_TR_ADDSTAT(nokmem); 9060 return; 9061 } 9062 #ifdef DEBUG 9063 { 9064 lgrp_id_t i; 9065 for (i = 0; i < NLGRPS_MAX; i++) { 9066 ASSERT(svntrp->tr_amp[i] == NULL); 9067 } 9068 } 9069 #endif /* DEBUG */ 9070 svntrp->tr_vp = vp; 9071 svntrp->tr_off = off; 9072 svntrp->tr_eoff = eoff; 9073 svntrp->tr_szc = szc; 9074 svntrp->tr_valid = 1; 9075 svntrp->tr_mtime = va.va_mtime; 9076 svntrp->tr_refcnt = 0; 9077 svntrp->tr_next = svntr_hashtab[hash].tr_head; 9078 svntr_hashtab[hash].tr_head = svntrp; 9079 } 9080 first = 1; 9081 again: 9082 /* 9083 * We want to pick a replica with pages on main thread's (t_tid = 1, 9084 * aka T1) lgrp. Currently text replication is only optimized for 9085 * workloads that either have all threads of a process on the same 9086 * lgrp or execute their large text primarily on main thread. 9087 */ 9088 lgrp_id = p->p_t1_lgrpid; 9089 if (lgrp_id == LGRP_NONE) { 9090 /* 9091 * In case exec() prefaults text on non main thread use 9092 * current thread lgrpid. It will become main thread anyway 9093 * soon. 9094 */ 9095 lgrp_id = lgrp_home_id(curthread); 9096 } 9097 /* 9098 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise 9099 * just set it to NLGRPS_MAX if it's different from current process T1 9100 * home lgrp. p_tr_lgrpid is used to detect if process uses text 9101 * replication and T1 new home is different from lgrp used for text 9102 * replication. When this happens asyncronous segvn thread rechecks if 9103 * segments should change lgrps used for text replication. If we fail 9104 * to set p_tr_lgrpid with cas32 then set it to NLGRPS_MAX without cas 9105 * if it's not already NLGRPS_MAX and not equal lgrp_id we want to 9106 * use. We don't need to use cas in this case because another thread 9107 * that races in between our non atomic check and set may only change 9108 * p_tr_lgrpid to NLGRPS_MAX at this point. 9109 */ 9110 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9111 olid = p->p_tr_lgrpid; 9112 if (lgrp_id != olid && olid != NLGRPS_MAX) { 9113 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX; 9114 if (cas32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) != olid) { 9115 olid = p->p_tr_lgrpid; 9116 ASSERT(olid != LGRP_NONE); 9117 if (olid != lgrp_id && olid != NLGRPS_MAX) { 9118 p->p_tr_lgrpid = NLGRPS_MAX; 9119 } 9120 } 9121 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 9122 membar_producer(); 9123 /* 9124 * lgrp_move_thread() won't schedule async recheck after 9125 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not 9126 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid 9127 * is not LGRP_NONE. 9128 */ 9129 if (first && p->p_t1_lgrpid != LGRP_NONE && 9130 p->p_t1_lgrpid != lgrp_id) { 9131 first = 0; 9132 goto again; 9133 } 9134 } 9135 /* 9136 * If no amp was created yet for lgrp_id create a new one as long as 9137 * we have enough memory to afford it. 9138 */ 9139 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) { 9140 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 9141 if (trmem > segvn_textrepl_max_bytes) { 9142 SEGVN_TR_ADDSTAT(normem); 9143 goto fail; 9144 } 9145 if (anon_try_resv_zone(size, NULL) == 0) { 9146 SEGVN_TR_ADDSTAT(noanon); 9147 goto fail; 9148 } 9149 amp = anonmap_alloc(size, size, ANON_NOSLEEP); 9150 if (amp == NULL) { 9151 anon_unresv_zone(size, NULL); 9152 SEGVN_TR_ADDSTAT(nokmem); 9153 goto fail; 9154 } 9155 ASSERT(amp->refcnt == 1); 9156 amp->a_szc = szc; 9157 svntrp->tr_amp[lgrp_id] = amp; 9158 SEGVN_TR_ADDSTAT(newamp); 9159 } 9160 svntrp->tr_refcnt++; 9161 ASSERT(svd->svn_trnext == NULL); 9162 ASSERT(svd->svn_trprev == NULL); 9163 svd->svn_trnext = svntrp->tr_svnhead; 9164 svd->svn_trprev = NULL; 9165 if (svntrp->tr_svnhead != NULL) { 9166 svntrp->tr_svnhead->svn_trprev = svd; 9167 } 9168 svntrp->tr_svnhead = svd; 9169 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size); 9170 ASSERT(amp->refcnt >= 1); 9171 svd->amp = amp; 9172 svd->anon_index = 0; 9173 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG; 9174 svd->tr_policy_info.mem_lgrpid = lgrp_id; 9175 svd->tr_state = SEGVN_TR_ON; 9176 mutex_exit(&svntr_hashtab[hash].tr_lock); 9177 SEGVN_TR_ADDSTAT(repl); 9178 return; 9179 fail: 9180 ASSERT(segvn_textrepl_bytes >= size); 9181 atomic_add_long(&segvn_textrepl_bytes, -size); 9182 ASSERT(svntrp != NULL); 9183 ASSERT(svntrp->tr_amp[lgrp_id] == NULL); 9184 if (svntrp->tr_refcnt == 0) { 9185 ASSERT(svntrp == svntr_hashtab[hash].tr_head); 9186 svntr_hashtab[hash].tr_head = svntrp->tr_next; 9187 mutex_exit(&svntr_hashtab[hash].tr_lock); 9188 kmem_cache_free(svntr_cache, svntrp); 9189 } else { 9190 mutex_exit(&svntr_hashtab[hash].tr_lock); 9191 } 9192 svd->tr_state = SEGVN_TR_OFF; 9193 } 9194 9195 /* 9196 * Convert seg back to regular vnode mapping seg by unbinding it from its text 9197 * replication amp. This routine is most typically called when segment is 9198 * unmapped but can also be called when segment no longer qualifies for text 9199 * replication (e.g. due to protection changes). If unload_unmap is set use 9200 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of 9201 * svntr free all its anon maps and remove it from the hash table. 9202 */ 9203 static void 9204 segvn_textunrepl(struct seg *seg, int unload_unmap) 9205 { 9206 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9207 vnode_t *vp = svd->vp; 9208 u_offset_t off = svd->offset; 9209 size_t size = seg->s_size; 9210 u_offset_t eoff = off + size; 9211 uint_t szc = seg->s_szc; 9212 ulong_t hash = SVNTR_HASH_FUNC(vp); 9213 svntr_t *svntrp; 9214 svntr_t **prv_svntrp; 9215 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid; 9216 lgrp_id_t i; 9217 9218 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9219 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 9220 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 9221 ASSERT(svd->tr_state == SEGVN_TR_ON); 9222 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9223 ASSERT(svd->amp != NULL); 9224 ASSERT(svd->amp->refcnt >= 1); 9225 ASSERT(svd->anon_index == 0); 9226 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9227 ASSERT(svntr_hashtab != NULL); 9228 9229 mutex_enter(&svntr_hashtab[hash].tr_lock); 9230 prv_svntrp = &svntr_hashtab[hash].tr_head; 9231 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) { 9232 ASSERT(svntrp->tr_refcnt != 0); 9233 if (svntrp->tr_vp == vp && svntrp->tr_off == off && 9234 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) { 9235 break; 9236 } 9237 } 9238 if (svntrp == NULL) { 9239 panic("segvn_textunrepl: svntr record not found"); 9240 } 9241 if (svntrp->tr_amp[lgrp_id] != svd->amp) { 9242 panic("segvn_textunrepl: amp mismatch"); 9243 } 9244 svd->tr_state = SEGVN_TR_OFF; 9245 svd->amp = NULL; 9246 if (svd->svn_trprev == NULL) { 9247 ASSERT(svntrp->tr_svnhead == svd); 9248 svntrp->tr_svnhead = svd->svn_trnext; 9249 if (svntrp->tr_svnhead != NULL) { 9250 svntrp->tr_svnhead->svn_trprev = NULL; 9251 } 9252 svd->svn_trnext = NULL; 9253 } else { 9254 svd->svn_trprev->svn_trnext = svd->svn_trnext; 9255 if (svd->svn_trnext != NULL) { 9256 svd->svn_trnext->svn_trprev = svd->svn_trprev; 9257 svd->svn_trnext = NULL; 9258 } 9259 svd->svn_trprev = NULL; 9260 } 9261 if (--svntrp->tr_refcnt) { 9262 mutex_exit(&svntr_hashtab[hash].tr_lock); 9263 goto done; 9264 } 9265 *prv_svntrp = svntrp->tr_next; 9266 mutex_exit(&svntr_hashtab[hash].tr_lock); 9267 for (i = 0; i < NLGRPS_MAX; i++) { 9268 struct anon_map *amp = svntrp->tr_amp[i]; 9269 if (amp == NULL) { 9270 continue; 9271 } 9272 ASSERT(amp->refcnt == 1); 9273 ASSERT(amp->swresv == size); 9274 ASSERT(amp->size == size); 9275 ASSERT(amp->a_szc == szc); 9276 if (amp->a_szc != 0) { 9277 anon_free_pages(amp->ahp, 0, size, szc); 9278 } else { 9279 anon_free(amp->ahp, 0, size); 9280 } 9281 svntrp->tr_amp[i] = NULL; 9282 ASSERT(segvn_textrepl_bytes >= size); 9283 atomic_add_long(&segvn_textrepl_bytes, -size); 9284 anon_unresv_zone(amp->swresv, NULL); 9285 amp->refcnt = 0; 9286 anonmap_free(amp); 9287 } 9288 kmem_cache_free(svntr_cache, svntrp); 9289 done: 9290 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size, 9291 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL); 9292 } 9293 9294 /* 9295 * This is called when a MAP_SHARED writabble mapping is created to a vnode 9296 * that is currently used for execution (VVMEXEC flag is set). In this case we 9297 * need to prevent further use of existing replicas. 9298 */ 9299 static void 9300 segvn_inval_trcache(vnode_t *vp) 9301 { 9302 ulong_t hash = SVNTR_HASH_FUNC(vp); 9303 svntr_t *svntrp; 9304 9305 ASSERT(vp->v_flag & VVMEXEC); 9306 9307 if (svntr_hashtab == NULL) { 9308 return; 9309 } 9310 9311 mutex_enter(&svntr_hashtab[hash].tr_lock); 9312 svntrp = svntr_hashtab[hash].tr_head; 9313 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9314 ASSERT(svntrp->tr_refcnt != 0); 9315 if (svntrp->tr_vp == vp && svntrp->tr_valid) { 9316 svntrp->tr_valid = 0; 9317 } 9318 } 9319 mutex_exit(&svntr_hashtab[hash].tr_lock); 9320 } 9321 9322 static void 9323 segvn_trasync_thread(void) 9324 { 9325 callb_cpr_t cpr_info; 9326 kmutex_t cpr_lock; /* just for CPR stuff */ 9327 9328 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL); 9329 9330 CALLB_CPR_INIT(&cpr_info, &cpr_lock, 9331 callb_generic_cpr, "segvn_async"); 9332 9333 if (segvn_update_textrepl_interval == 0) { 9334 segvn_update_textrepl_interval = segvn_update_tr_time * hz; 9335 } else { 9336 segvn_update_textrepl_interval *= hz; 9337 } 9338 (void) timeout(segvn_trupdate_wakeup, NULL, 9339 segvn_update_textrepl_interval); 9340 9341 for (;;) { 9342 mutex_enter(&cpr_lock); 9343 CALLB_CPR_SAFE_BEGIN(&cpr_info); 9344 mutex_exit(&cpr_lock); 9345 sema_p(&segvn_trasync_sem); 9346 mutex_enter(&cpr_lock); 9347 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 9348 mutex_exit(&cpr_lock); 9349 segvn_trupdate(); 9350 } 9351 } 9352 9353 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0; 9354 9355 static void 9356 segvn_trupdate_wakeup(void *dummy) 9357 { 9358 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations(); 9359 9360 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) { 9361 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs; 9362 sema_v(&segvn_trasync_sem); 9363 } 9364 9365 if (!segvn_disable_textrepl_update && 9366 segvn_update_textrepl_interval != 0) { 9367 (void) timeout(segvn_trupdate_wakeup, dummy, 9368 segvn_update_textrepl_interval); 9369 } 9370 } 9371 9372 static void 9373 segvn_trupdate(void) 9374 { 9375 ulong_t hash; 9376 svntr_t *svntrp; 9377 segvn_data_t *svd; 9378 9379 ASSERT(svntr_hashtab != NULL); 9380 9381 for (hash = 0; hash < svntr_hashtab_sz; hash++) { 9382 mutex_enter(&svntr_hashtab[hash].tr_lock); 9383 svntrp = svntr_hashtab[hash].tr_head; 9384 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9385 ASSERT(svntrp->tr_refcnt != 0); 9386 svd = svntrp->tr_svnhead; 9387 for (; svd != NULL; svd = svd->svn_trnext) { 9388 segvn_trupdate_seg(svd->seg, svd, svntrp, 9389 hash); 9390 } 9391 } 9392 mutex_exit(&svntr_hashtab[hash].tr_lock); 9393 } 9394 } 9395 9396 static void 9397 segvn_trupdate_seg(struct seg *seg, 9398 segvn_data_t *svd, 9399 svntr_t *svntrp, 9400 ulong_t hash) 9401 { 9402 proc_t *p; 9403 lgrp_id_t lgrp_id; 9404 struct as *as; 9405 size_t size; 9406 struct anon_map *amp; 9407 9408 ASSERT(svd->vp != NULL); 9409 ASSERT(svd->vp == svntrp->tr_vp); 9410 ASSERT(svd->offset == svntrp->tr_off); 9411 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff); 9412 ASSERT(seg != NULL); 9413 ASSERT(svd->seg == seg); 9414 ASSERT(seg->s_data == (void *)svd); 9415 ASSERT(seg->s_szc == svntrp->tr_szc); 9416 ASSERT(svd->tr_state == SEGVN_TR_ON); 9417 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9418 ASSERT(svd->amp != NULL); 9419 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 9420 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE); 9421 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX); 9422 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp); 9423 ASSERT(svntrp->tr_refcnt != 0); 9424 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock)); 9425 9426 as = seg->s_as; 9427 ASSERT(as != NULL && as != &kas); 9428 p = as->a_proc; 9429 ASSERT(p != NULL); 9430 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 9431 lgrp_id = p->p_t1_lgrpid; 9432 if (lgrp_id == LGRP_NONE) { 9433 return; 9434 } 9435 ASSERT(lgrp_id < NLGRPS_MAX); 9436 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) { 9437 return; 9438 } 9439 9440 /* 9441 * Use tryenter locking since we are locking as/seg and svntr hash 9442 * lock in reverse from syncrounous thread order. 9443 */ 9444 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) { 9445 SEGVN_TR_ADDSTAT(nolock); 9446 if (segvn_lgrp_trthr_migrs_snpsht) { 9447 segvn_lgrp_trthr_migrs_snpsht = 0; 9448 } 9449 return; 9450 } 9451 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) { 9452 AS_LOCK_EXIT(as, &as->a_lock); 9453 SEGVN_TR_ADDSTAT(nolock); 9454 if (segvn_lgrp_trthr_migrs_snpsht) { 9455 segvn_lgrp_trthr_migrs_snpsht = 0; 9456 } 9457 return; 9458 } 9459 size = seg->s_size; 9460 if (svntrp->tr_amp[lgrp_id] == NULL) { 9461 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 9462 if (trmem > segvn_textrepl_max_bytes) { 9463 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9464 AS_LOCK_EXIT(as, &as->a_lock); 9465 atomic_add_long(&segvn_textrepl_bytes, -size); 9466 SEGVN_TR_ADDSTAT(normem); 9467 return; 9468 } 9469 if (anon_try_resv_zone(size, NULL) == 0) { 9470 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9471 AS_LOCK_EXIT(as, &as->a_lock); 9472 atomic_add_long(&segvn_textrepl_bytes, -size); 9473 SEGVN_TR_ADDSTAT(noanon); 9474 return; 9475 } 9476 amp = anonmap_alloc(size, size, KM_NOSLEEP); 9477 if (amp == NULL) { 9478 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9479 AS_LOCK_EXIT(as, &as->a_lock); 9480 atomic_add_long(&segvn_textrepl_bytes, -size); 9481 anon_unresv_zone(size, NULL); 9482 SEGVN_TR_ADDSTAT(nokmem); 9483 return; 9484 } 9485 ASSERT(amp->refcnt == 1); 9486 amp->a_szc = seg->s_szc; 9487 svntrp->tr_amp[lgrp_id] = amp; 9488 } 9489 /* 9490 * We don't need to drop the bucket lock but here we give other 9491 * threads a chance. svntr and svd can't be unlinked as long as 9492 * segment lock is held as a writer and AS held as well. After we 9493 * retake bucket lock we'll continue from where we left. We'll be able 9494 * to reach the end of either list since new entries are always added 9495 * to the beginning of the lists. 9496 */ 9497 mutex_exit(&svntr_hashtab[hash].tr_lock); 9498 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL); 9499 mutex_enter(&svntr_hashtab[hash].tr_lock); 9500 9501 ASSERT(svd->tr_state == SEGVN_TR_ON); 9502 ASSERT(svd->amp != NULL); 9503 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 9504 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id); 9505 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]); 9506 9507 svd->tr_policy_info.mem_lgrpid = lgrp_id; 9508 svd->amp = svntrp->tr_amp[lgrp_id]; 9509 p->p_tr_lgrpid = NLGRPS_MAX; 9510 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9511 AS_LOCK_EXIT(as, &as->a_lock); 9512 9513 ASSERT(svntrp->tr_refcnt != 0); 9514 ASSERT(svd->vp == svntrp->tr_vp); 9515 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id); 9516 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]); 9517 ASSERT(svd->seg == seg); 9518 ASSERT(svd->tr_state == SEGVN_TR_ON); 9519 9520 SEGVN_TR_ADDSTAT(asyncrepl); 9521 } 9522