1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2018 Joyent, Inc. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 */ 26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 /* 31 * University Copyright- Copyright (c) 1982, 1986, 1988 32 * The Regents of the University of California 33 * All Rights Reserved 34 * 35 * University Acknowledgment- Portions of this document are derived from 36 * software developed by the University of California, Berkeley, and its 37 * contributors. 38 */ 39 40 /* 41 * VM - shared or copy-on-write from a vnode/anonymous memory. 42 */ 43 44 #include <sys/types.h> 45 #include <sys/param.h> 46 #include <sys/t_lock.h> 47 #include <sys/errno.h> 48 #include <sys/systm.h> 49 #include <sys/mman.h> 50 #include <sys/debug.h> 51 #include <sys/cred.h> 52 #include <sys/vmsystm.h> 53 #include <sys/tuneable.h> 54 #include <sys/bitmap.h> 55 #include <sys/swap.h> 56 #include <sys/kmem.h> 57 #include <sys/sysmacros.h> 58 #include <sys/vtrace.h> 59 #include <sys/cmn_err.h> 60 #include <sys/callb.h> 61 #include <sys/vm.h> 62 #include <sys/dumphdr.h> 63 #include <sys/lgrp.h> 64 65 #include <vm/hat.h> 66 #include <vm/as.h> 67 #include <vm/seg.h> 68 #include <vm/seg_vn.h> 69 #include <vm/pvn.h> 70 #include <vm/anon.h> 71 #include <vm/page.h> 72 #include <vm/vpage.h> 73 #include <sys/proc.h> 74 #include <sys/task.h> 75 #include <sys/project.h> 76 #include <sys/zone.h> 77 #include <sys/shm_impl.h> 78 79 /* 80 * segvn_fault needs a temporary page list array. To avoid calling kmem all 81 * the time, it creates a small (PVN_GETPAGE_NUM entry) array and uses it if 82 * it can. In the rare case when this page list is not large enough, it 83 * goes and gets a large enough array from kmem. 84 * 85 * This small page list array covers either 8 pages or 64kB worth of pages - 86 * whichever is smaller. 87 */ 88 #define PVN_MAX_GETPAGE_SZ 0x10000 89 #define PVN_MAX_GETPAGE_NUM 0x8 90 91 #if PVN_MAX_GETPAGE_SZ > PVN_MAX_GETPAGE_NUM * PAGESIZE 92 #define PVN_GETPAGE_SZ ptob(PVN_MAX_GETPAGE_NUM) 93 #define PVN_GETPAGE_NUM PVN_MAX_GETPAGE_NUM 94 #else 95 #define PVN_GETPAGE_SZ PVN_MAX_GETPAGE_SZ 96 #define PVN_GETPAGE_NUM btop(PVN_MAX_GETPAGE_SZ) 97 #endif 98 99 /* 100 * Private seg op routines. 101 */ 102 static int segvn_dup(struct seg *seg, struct seg *newseg); 103 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len); 104 static void segvn_free(struct seg *seg); 105 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg, 106 caddr_t addr, size_t len, enum fault_type type, 107 enum seg_rw rw); 108 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr); 109 static int segvn_setprot(struct seg *seg, caddr_t addr, 110 size_t len, uint_t prot); 111 static int segvn_checkprot(struct seg *seg, caddr_t addr, 112 size_t len, uint_t prot); 113 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 114 static size_t segvn_swapout(struct seg *seg); 115 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len, 116 int attr, uint_t flags); 117 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len, 118 char *vec); 119 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 120 int attr, int op, ulong_t *lockmap, size_t pos); 121 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len, 122 uint_t *protv); 123 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr); 124 static int segvn_gettype(struct seg *seg, caddr_t addr); 125 static int segvn_getvp(struct seg *seg, caddr_t addr, 126 struct vnode **vpp); 127 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len, 128 uint_t behav); 129 static void segvn_dump(struct seg *seg); 130 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, 131 struct page ***ppp, enum lock_type type, enum seg_rw rw); 132 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, 133 uint_t szc); 134 static int segvn_getmemid(struct seg *seg, caddr_t addr, 135 memid_t *memidp); 136 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t); 137 static int segvn_capable(struct seg *seg, segcapability_t capable); 138 static int segvn_inherit(struct seg *, caddr_t, size_t, uint_t); 139 140 struct seg_ops segvn_ops = { 141 segvn_dup, 142 segvn_unmap, 143 segvn_free, 144 segvn_fault, 145 segvn_faulta, 146 segvn_setprot, 147 segvn_checkprot, 148 segvn_kluster, 149 segvn_swapout, 150 segvn_sync, 151 segvn_incore, 152 segvn_lockop, 153 segvn_getprot, 154 segvn_getoffset, 155 segvn_gettype, 156 segvn_getvp, 157 segvn_advise, 158 segvn_dump, 159 segvn_pagelock, 160 segvn_setpagesize, 161 segvn_getmemid, 162 segvn_getpolicy, 163 segvn_capable, 164 segvn_inherit 165 }; 166 167 /* 168 * Common zfod structures, provided as a shorthand for others to use. 169 */ 170 static segvn_crargs_t zfod_segvn_crargs = 171 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL); 172 static segvn_crargs_t kzfod_segvn_crargs = 173 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER, 174 PROT_ALL & ~PROT_USER); 175 static segvn_crargs_t stack_noexec_crargs = 176 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL); 177 178 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */ 179 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */ 180 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */ 181 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */ 182 183 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */ 184 185 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */ 186 187 size_t segvn_pglock_comb_thrshld = (1UL << 16); /* 64K */ 188 size_t segvn_pglock_comb_balign = (1UL << 16); /* 64K */ 189 uint_t segvn_pglock_comb_bshift; 190 size_t segvn_pglock_comb_palign; 191 192 static int segvn_concat(struct seg *, struct seg *, int); 193 static int segvn_extend_prev(struct seg *, struct seg *, 194 struct segvn_crargs *, size_t); 195 static int segvn_extend_next(struct seg *, struct seg *, 196 struct segvn_crargs *, size_t); 197 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw); 198 static void segvn_pagelist_rele(page_t **); 199 static void segvn_setvnode_mpss(vnode_t *); 200 static void segvn_relocate_pages(page_t **, page_t *); 201 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *); 202 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t, 203 uint_t, page_t **, page_t **, uint_t *, int *); 204 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t, 205 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 206 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t, 207 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 208 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t, 209 u_offset_t, struct vpage *, page_t **, uint_t, 210 enum fault_type, enum seg_rw, int); 211 static void segvn_vpage(struct seg *); 212 static size_t segvn_count_swap_by_vpages(struct seg *); 213 214 static void segvn_purge(struct seg *seg); 215 static int segvn_reclaim(void *, caddr_t, size_t, struct page **, 216 enum seg_rw, int); 217 static int shamp_reclaim(void *, caddr_t, size_t, struct page **, 218 enum seg_rw, int); 219 220 static int sameprot(struct seg *, caddr_t, size_t); 221 222 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t); 223 static int segvn_clrszc(struct seg *); 224 static struct seg *segvn_split_seg(struct seg *, caddr_t); 225 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t, 226 ulong_t, uint_t); 227 228 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t, 229 size_t, void *, u_offset_t); 230 231 static struct kmem_cache *segvn_cache; 232 static struct kmem_cache **segvn_szc_cache; 233 234 #ifdef VM_STATS 235 static struct segvnvmstats_str { 236 ulong_t fill_vp_pages[31]; 237 ulong_t fltvnpages[49]; 238 ulong_t fullszcpages[10]; 239 ulong_t relocatepages[3]; 240 ulong_t fltanpages[17]; 241 ulong_t pagelock[2]; 242 ulong_t demoterange[3]; 243 } segvnvmstats; 244 #endif /* VM_STATS */ 245 246 #define SDR_RANGE 1 /* demote entire range */ 247 #define SDR_END 2 /* demote non aligned ends only */ 248 249 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \ 250 if ((len) != 0) { \ 251 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \ 252 ASSERT(lpgaddr >= (seg)->s_base); \ 253 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \ 254 (len)), pgsz); \ 255 ASSERT(lpgeaddr > lpgaddr); \ 256 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \ 257 } else { \ 258 lpgeaddr = lpgaddr = (addr); \ 259 } \ 260 } 261 262 /*ARGSUSED*/ 263 static int 264 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags) 265 { 266 struct segvn_data *svd = buf; 267 268 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL); 269 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL); 270 svd->svn_trnext = svd->svn_trprev = NULL; 271 return (0); 272 } 273 274 /*ARGSUSED1*/ 275 static void 276 segvn_cache_destructor(void *buf, void *cdrarg) 277 { 278 struct segvn_data *svd = buf; 279 280 rw_destroy(&svd->lock); 281 mutex_destroy(&svd->segfree_syncmtx); 282 } 283 284 /*ARGSUSED*/ 285 static int 286 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags) 287 { 288 bzero(buf, sizeof (svntr_t)); 289 return (0); 290 } 291 292 /* 293 * Patching this variable to non-zero allows the system to run with 294 * stacks marked as "not executable". It's a bit of a kludge, but is 295 * provided as a tweakable for platforms that export those ABIs 296 * (e.g. sparc V8) that have executable stacks enabled by default. 297 * There are also some restrictions for platforms that don't actually 298 * implement 'noexec' protections. 299 * 300 * Once enabled, the system is (therefore) unable to provide a fully 301 * ABI-compliant execution environment, though practically speaking, 302 * most everything works. The exceptions are generally some interpreters 303 * and debuggers that create executable code on the stack and jump 304 * into it (without explicitly mprotecting the address range to include 305 * PROT_EXEC). 306 * 307 * One important class of applications that are disabled are those 308 * that have been transformed into malicious agents using one of the 309 * numerous "buffer overflow" attacks. See 4007890. 310 */ 311 int noexec_user_stack = 0; 312 int noexec_user_stack_log = 1; 313 314 int segvn_lpg_disable = 0; 315 uint_t segvn_maxpgszc = 0; 316 317 ulong_t segvn_vmpss_clrszc_cnt; 318 ulong_t segvn_vmpss_clrszc_err; 319 ulong_t segvn_fltvnpages_clrszc_cnt; 320 ulong_t segvn_fltvnpages_clrszc_err; 321 ulong_t segvn_setpgsz_align_err; 322 ulong_t segvn_setpgsz_anon_align_err; 323 ulong_t segvn_setpgsz_getattr_err; 324 ulong_t segvn_setpgsz_eof_err; 325 ulong_t segvn_faultvnmpss_align_err1; 326 ulong_t segvn_faultvnmpss_align_err2; 327 ulong_t segvn_faultvnmpss_align_err3; 328 ulong_t segvn_faultvnmpss_align_err4; 329 ulong_t segvn_faultvnmpss_align_err5; 330 ulong_t segvn_vmpss_pageio_deadlk_err; 331 332 int segvn_use_regions = 1; 333 334 /* 335 * Segvn supports text replication optimization for NUMA platforms. Text 336 * replica's are represented by anon maps (amp). There's one amp per text file 337 * region per lgroup. A process chooses the amp for each of its text mappings 338 * based on the lgroup assignment of its main thread (t_tid = 1). All 339 * processes that want a replica on a particular lgroup for the same text file 340 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table 341 * with vp,off,size,szc used as a key. Text replication segments are read only 342 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by 343 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode 344 * pages. Replication amp is assigned to a segment when it gets its first 345 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread 346 * rechecks periodically if the process still maps an amp local to the main 347 * thread. If not async thread forces process to remap to an amp in the new 348 * home lgroup of the main thread. Current text replication implementation 349 * only provides the benefit to workloads that do most of their work in the 350 * main thread of a process or all the threads of a process run in the same 351 * lgroup. To extend text replication benefit to different types of 352 * multithreaded workloads further work would be needed in the hat layer to 353 * allow the same virtual address in the same hat to simultaneously map 354 * different physical addresses (i.e. page table replication would be needed 355 * for x86). 356 * 357 * amp pages are used instead of vnode pages as long as segment has a very 358 * simple life cycle. It's created via segvn_create(), handles S_EXEC 359 * (S_READ) pagefaults and is fully unmapped. If anything more complicated 360 * happens such as protection is changed, real COW fault happens, pagesize is 361 * changed, MC_LOCK is requested or segment is partially unmapped we turn off 362 * text replication by converting the segment back to vnode only segment 363 * (unmap segment's address range and set svd->amp to NULL). 364 * 365 * The original file can be changed after amp is inserted into 366 * svntr_hashtab. Processes that are launched after the file is already 367 * changed can't use the replica's created prior to the file change. To 368 * implement this functionality hash entries are timestamped. Replica's can 369 * only be used if current file modification time is the same as the timestamp 370 * saved when hash entry was created. However just timestamps alone are not 371 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We 372 * deal with file changes via MAP_SHARED mappings differently. When writable 373 * MAP_SHARED mappings are created to vnodes marked as executable we mark all 374 * existing replica's for this vnode as not usable for future text 375 * mappings. And we don't create new replica's for files that currently have 376 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is 377 * true). 378 */ 379 380 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20) 381 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR; 382 383 static ulong_t svntr_hashtab_sz = 512; 384 static svntr_bucket_t *svntr_hashtab = NULL; 385 static struct kmem_cache *svntr_cache; 386 static svntr_stats_t *segvn_textrepl_stats; 387 static ksema_t segvn_trasync_sem; 388 389 int segvn_disable_textrepl = 1; 390 size_t textrepl_size_thresh = (size_t)-1; 391 size_t segvn_textrepl_bytes = 0; 392 size_t segvn_textrepl_max_bytes = 0; 393 clock_t segvn_update_textrepl_interval = 0; 394 int segvn_update_tr_time = 10; 395 int segvn_disable_textrepl_update = 0; 396 397 static void segvn_textrepl(struct seg *); 398 static void segvn_textunrepl(struct seg *, int); 399 static void segvn_inval_trcache(vnode_t *); 400 static void segvn_trasync_thread(void); 401 static void segvn_trupdate_wakeup(void *); 402 static void segvn_trupdate(void); 403 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *, 404 ulong_t); 405 406 /* 407 * Initialize segvn data structures 408 */ 409 void 410 segvn_init(void) 411 { 412 uint_t maxszc; 413 uint_t szc; 414 size_t pgsz; 415 416 segvn_cache = kmem_cache_create("segvn_cache", 417 sizeof (struct segvn_data), 0, 418 segvn_cache_constructor, segvn_cache_destructor, NULL, 419 NULL, NULL, 0); 420 421 if (segvn_lpg_disable == 0) { 422 szc = maxszc = page_num_pagesizes() - 1; 423 if (szc == 0) { 424 segvn_lpg_disable = 1; 425 } 426 if (page_get_pagesize(0) != PAGESIZE) { 427 panic("segvn_init: bad szc 0"); 428 /*NOTREACHED*/ 429 } 430 while (szc != 0) { 431 pgsz = page_get_pagesize(szc); 432 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) { 433 panic("segvn_init: bad szc %d", szc); 434 /*NOTREACHED*/ 435 } 436 szc--; 437 } 438 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc) 439 segvn_maxpgszc = maxszc; 440 } 441 442 if (segvn_maxpgszc) { 443 segvn_szc_cache = (struct kmem_cache **)kmem_alloc( 444 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *), 445 KM_SLEEP); 446 } 447 448 for (szc = 1; szc <= segvn_maxpgszc; szc++) { 449 char str[32]; 450 451 (void) sprintf(str, "segvn_szc_cache%d", szc); 452 segvn_szc_cache[szc] = kmem_cache_create(str, 453 page_get_pagecnt(szc) * sizeof (page_t *), 0, 454 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG); 455 } 456 457 458 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL)) 459 segvn_use_regions = 0; 460 461 /* 462 * For now shared regions and text replication segvn support 463 * are mutually exclusive. This is acceptable because 464 * currently significant benefit from text replication was 465 * only observed on AMD64 NUMA platforms (due to relatively 466 * small L2$ size) and currently we don't support shared 467 * regions on x86. 468 */ 469 if (segvn_use_regions && !segvn_disable_textrepl) { 470 segvn_disable_textrepl = 1; 471 } 472 473 #if defined(_LP64) 474 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 && 475 !segvn_disable_textrepl) { 476 ulong_t i; 477 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t); 478 479 svntr_cache = kmem_cache_create("svntr_cache", 480 sizeof (svntr_t), 0, svntr_cache_constructor, NULL, 481 NULL, NULL, NULL, 0); 482 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP); 483 for (i = 0; i < svntr_hashtab_sz; i++) { 484 mutex_init(&svntr_hashtab[i].tr_lock, NULL, 485 MUTEX_DEFAULT, NULL); 486 } 487 segvn_textrepl_max_bytes = ptob(physmem) / 488 segvn_textrepl_max_bytes_factor; 489 segvn_textrepl_stats = kmem_zalloc(NCPU * 490 sizeof (svntr_stats_t), KM_SLEEP); 491 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL); 492 (void) thread_create(NULL, 0, segvn_trasync_thread, 493 NULL, 0, &p0, TS_RUN, minclsyspri); 494 } 495 #endif 496 497 if (!ISP2(segvn_pglock_comb_balign) || 498 segvn_pglock_comb_balign < PAGESIZE) { 499 segvn_pglock_comb_balign = 1UL << 16; /* 64K */ 500 } 501 segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1; 502 segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign); 503 } 504 505 #define SEGVN_PAGEIO ((void *)0x1) 506 #define SEGVN_NOPAGEIO ((void *)0x2) 507 508 static void 509 segvn_setvnode_mpss(vnode_t *vp) 510 { 511 int err; 512 513 ASSERT(vp->v_mpssdata == NULL || 514 vp->v_mpssdata == SEGVN_PAGEIO || 515 vp->v_mpssdata == SEGVN_NOPAGEIO); 516 517 if (vp->v_mpssdata == NULL) { 518 if (vn_vmpss_usepageio(vp)) { 519 err = VOP_PAGEIO(vp, (page_t *)NULL, 520 (u_offset_t)0, 0, 0, CRED(), NULL); 521 } else { 522 err = ENOSYS; 523 } 524 /* 525 * set v_mpssdata just once per vnode life 526 * so that it never changes. 527 */ 528 mutex_enter(&vp->v_lock); 529 if (vp->v_mpssdata == NULL) { 530 if (err == EINVAL) { 531 vp->v_mpssdata = SEGVN_PAGEIO; 532 } else { 533 vp->v_mpssdata = SEGVN_NOPAGEIO; 534 } 535 } 536 mutex_exit(&vp->v_lock); 537 } 538 } 539 540 int 541 segvn_create(struct seg **segpp, void *argsp) 542 { 543 struct seg *seg = *segpp; 544 extern lgrp_mem_policy_t lgrp_mem_default_policy; 545 struct segvn_crargs *a = (struct segvn_crargs *)argsp; 546 struct segvn_data *svd; 547 size_t swresv = 0; 548 struct cred *cred; 549 struct anon_map *amp; 550 int error = 0; 551 size_t pgsz; 552 lgrp_mem_policy_t mpolicy = lgrp_mem_default_policy; 553 int use_rgn = 0; 554 int trok = 0; 555 556 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); 557 558 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) { 559 panic("segvn_create type"); 560 /*NOTREACHED*/ 561 } 562 563 /* 564 * Check arguments. If a shared anon structure is given then 565 * it is illegal to also specify a vp. 566 */ 567 if (a->amp != NULL && a->vp != NULL) { 568 panic("segvn_create anon_map"); 569 /*NOTREACHED*/ 570 } 571 572 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) && 573 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) && 574 segvn_use_regions) { 575 use_rgn = 1; 576 } 577 578 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */ 579 if (a->type == MAP_SHARED) 580 a->flags &= ~MAP_NORESERVE; 581 582 if (a->szc != 0) { 583 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) || 584 (a->amp != NULL && a->type == MAP_PRIVATE) || 585 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) { 586 a->szc = 0; 587 } else { 588 if (a->szc > segvn_maxpgszc) 589 a->szc = segvn_maxpgszc; 590 pgsz = page_get_pagesize(a->szc); 591 if (!IS_P2ALIGNED(seg->s_base, pgsz) || 592 !IS_P2ALIGNED(seg->s_size, pgsz)) { 593 a->szc = 0; 594 } else if (a->vp != NULL) { 595 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) { 596 /* 597 * paranoid check. 598 * hat_page_demote() is not supported 599 * on swapfs pages. 600 */ 601 a->szc = 0; 602 } else if (map_addr_vacalign_check(seg->s_base, 603 a->offset & PAGEMASK)) { 604 a->szc = 0; 605 } 606 } else if (a->amp != NULL) { 607 pgcnt_t anum = btopr(a->offset); 608 pgcnt_t pgcnt = page_get_pagecnt(a->szc); 609 if (!IS_P2ALIGNED(anum, pgcnt)) { 610 a->szc = 0; 611 } 612 } 613 } 614 } 615 616 /* 617 * If segment may need private pages, reserve them now. 618 */ 619 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) || 620 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) { 621 if (anon_resv_zone(seg->s_size, 622 seg->s_as->a_proc->p_zone) == 0) 623 return (EAGAIN); 624 swresv = seg->s_size; 625 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 626 seg, swresv, 1); 627 } 628 629 /* 630 * Reserve any mapping structures that may be required. 631 * 632 * Don't do it for segments that may use regions. It's currently a 633 * noop in the hat implementations anyway. 634 */ 635 if (!use_rgn) { 636 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP); 637 } 638 639 if (a->cred) { 640 cred = a->cred; 641 crhold(cred); 642 } else { 643 crhold(cred = CRED()); 644 } 645 646 /* Inform the vnode of the new mapping */ 647 if (a->vp != NULL) { 648 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK, 649 seg->s_as, seg->s_base, seg->s_size, a->prot, 650 a->maxprot, a->type, cred, NULL); 651 if (error) { 652 if (swresv != 0) { 653 anon_unresv_zone(swresv, 654 seg->s_as->a_proc->p_zone); 655 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 656 "anon proc:%p %lu %u", seg, swresv, 0); 657 } 658 crfree(cred); 659 if (!use_rgn) { 660 hat_unload(seg->s_as->a_hat, seg->s_base, 661 seg->s_size, HAT_UNLOAD_UNMAP); 662 } 663 return (error); 664 } 665 /* 666 * svntr_hashtab will be NULL if we support shared regions. 667 */ 668 trok = ((a->flags & MAP_TEXT) && 669 (seg->s_size > textrepl_size_thresh || 670 (a->flags & _MAP_TEXTREPL)) && 671 lgrp_optimizations() && svntr_hashtab != NULL && 672 a->type == MAP_PRIVATE && swresv == 0 && 673 !(a->flags & MAP_NORESERVE) && 674 seg->s_as != &kas && a->vp->v_type == VREG); 675 676 ASSERT(!trok || !use_rgn); 677 } 678 679 /* 680 * MAP_NORESERVE mappings don't count towards the VSZ of a process 681 * until we fault the pages in. 682 */ 683 if ((a->vp == NULL || a->vp->v_type != VREG) && 684 a->flags & MAP_NORESERVE) { 685 seg->s_as->a_resvsize -= seg->s_size; 686 } 687 688 /* 689 * If more than one segment in the address space, and they're adjacent 690 * virtually, try to concatenate them. Don't concatenate if an 691 * explicit anon_map structure was supplied (e.g., SystemV shared 692 * memory) or if we'll use text replication for this segment. 693 */ 694 if (a->amp == NULL && !use_rgn && !trok) { 695 struct seg *pseg, *nseg; 696 struct segvn_data *psvd, *nsvd; 697 lgrp_mem_policy_t ppolicy, npolicy; 698 uint_t lgrp_mem_policy_flags = 0; 699 700 /* 701 * Memory policy flags (lgrp_mem_policy_flags) is valid when 702 * extending stack/heap segments. 703 */ 704 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) && 705 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) { 706 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags; 707 } else { 708 /* 709 * Get policy when not extending it from another segment 710 */ 711 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type); 712 } 713 714 /* 715 * First, try to concatenate the previous and new segments 716 */ 717 pseg = AS_SEGPREV(seg->s_as, seg); 718 if (pseg != NULL && 719 pseg->s_base + pseg->s_size == seg->s_base && 720 pseg->s_ops == &segvn_ops) { 721 /* 722 * Get memory allocation policy from previous segment. 723 * When extension is specified (e.g. for heap) apply 724 * this policy to the new segment regardless of the 725 * outcome of segment concatenation. Extension occurs 726 * for non-default policy otherwise default policy is 727 * used and is based on extended segment size. 728 */ 729 psvd = (struct segvn_data *)pseg->s_data; 730 ppolicy = psvd->policy_info.mem_policy; 731 if (lgrp_mem_policy_flags == 732 LGRP_MP_FLAG_EXTEND_UP) { 733 if (ppolicy != lgrp_mem_default_policy) { 734 mpolicy = ppolicy; 735 } else { 736 mpolicy = lgrp_mem_policy_default( 737 pseg->s_size + seg->s_size, 738 a->type); 739 } 740 } 741 742 if (mpolicy == ppolicy && 743 (pseg->s_size + seg->s_size <= 744 segvn_comb_thrshld || psvd->amp == NULL) && 745 segvn_extend_prev(pseg, seg, a, swresv) == 0) { 746 /* 747 * success! now try to concatenate 748 * with following seg 749 */ 750 crfree(cred); 751 nseg = AS_SEGNEXT(pseg->s_as, pseg); 752 if (nseg != NULL && 753 nseg != pseg && 754 nseg->s_ops == &segvn_ops && 755 pseg->s_base + pseg->s_size == 756 nseg->s_base) 757 (void) segvn_concat(pseg, nseg, 0); 758 ASSERT(pseg->s_szc == 0 || 759 (a->szc == pseg->s_szc && 760 IS_P2ALIGNED(pseg->s_base, pgsz) && 761 IS_P2ALIGNED(pseg->s_size, pgsz))); 762 /* 763 * Communicate out the newly concatenated 764 * segment as part of the result. 765 */ 766 *segpp = pseg; 767 return (0); 768 } 769 } 770 771 /* 772 * Failed, so try to concatenate with following seg 773 */ 774 nseg = AS_SEGNEXT(seg->s_as, seg); 775 if (nseg != NULL && 776 seg->s_base + seg->s_size == nseg->s_base && 777 nseg->s_ops == &segvn_ops) { 778 /* 779 * Get memory allocation policy from next segment. 780 * When extension is specified (e.g. for stack) apply 781 * this policy to the new segment regardless of the 782 * outcome of segment concatenation. Extension occurs 783 * for non-default policy otherwise default policy is 784 * used and is based on extended segment size. 785 */ 786 nsvd = (struct segvn_data *)nseg->s_data; 787 npolicy = nsvd->policy_info.mem_policy; 788 if (lgrp_mem_policy_flags == 789 LGRP_MP_FLAG_EXTEND_DOWN) { 790 if (npolicy != lgrp_mem_default_policy) { 791 mpolicy = npolicy; 792 } else { 793 mpolicy = lgrp_mem_policy_default( 794 nseg->s_size + seg->s_size, 795 a->type); 796 } 797 } 798 799 if (mpolicy == npolicy && 800 segvn_extend_next(seg, nseg, a, swresv) == 0) { 801 crfree(cred); 802 ASSERT(nseg->s_szc == 0 || 803 (a->szc == nseg->s_szc && 804 IS_P2ALIGNED(nseg->s_base, pgsz) && 805 IS_P2ALIGNED(nseg->s_size, pgsz))); 806 /* 807 * Communicate out the newly concatenated 808 * segment as part of the result. 809 */ 810 *segpp = nseg; 811 return (0); 812 } 813 } 814 } 815 816 if (a->vp != NULL) { 817 VN_HOLD(a->vp); 818 if (a->type == MAP_SHARED) 819 lgrp_shm_policy_init(NULL, a->vp); 820 } 821 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 822 823 seg->s_ops = &segvn_ops; 824 seg->s_data = (void *)svd; 825 seg->s_szc = a->szc; 826 827 svd->seg = seg; 828 svd->vp = a->vp; 829 /* 830 * Anonymous mappings have no backing file so the offset is meaningless. 831 */ 832 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0; 833 svd->prot = a->prot; 834 svd->maxprot = a->maxprot; 835 svd->pageprot = 0; 836 svd->type = a->type; 837 svd->vpage = NULL; 838 svd->cred = cred; 839 svd->advice = MADV_NORMAL; 840 svd->pageadvice = 0; 841 svd->flags = (ushort_t)a->flags; 842 svd->softlockcnt = 0; 843 svd->softlockcnt_sbase = 0; 844 svd->softlockcnt_send = 0; 845 svd->svn_inz = 0; 846 svd->rcookie = HAT_INVALID_REGION_COOKIE; 847 svd->pageswap = 0; 848 849 if (a->szc != 0 && a->vp != NULL) { 850 segvn_setvnode_mpss(a->vp); 851 } 852 if (svd->type == MAP_SHARED && svd->vp != NULL && 853 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) { 854 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 855 segvn_inval_trcache(svd->vp); 856 } 857 858 amp = a->amp; 859 if ((svd->amp = amp) == NULL) { 860 svd->anon_index = 0; 861 if (svd->type == MAP_SHARED) { 862 svd->swresv = 0; 863 /* 864 * Shared mappings to a vp need no other setup. 865 * If we have a shared mapping to an anon_map object 866 * which hasn't been allocated yet, allocate the 867 * struct now so that it will be properly shared 868 * by remembering the swap reservation there. 869 */ 870 if (a->vp == NULL) { 871 svd->amp = anonmap_alloc(seg->s_size, swresv, 872 ANON_SLEEP); 873 svd->amp->a_szc = seg->s_szc; 874 } 875 } else { 876 /* 877 * Private mapping (with or without a vp). 878 * Allocate anon_map when needed. 879 */ 880 svd->swresv = swresv; 881 } 882 } else { 883 pgcnt_t anon_num; 884 885 /* 886 * Mapping to an existing anon_map structure without a vp. 887 * For now we will insure that the segment size isn't larger 888 * than the size - offset gives us. Later on we may wish to 889 * have the anon array dynamically allocated itself so that 890 * we don't always have to allocate all the anon pointer slots. 891 * This of course involves adding extra code to check that we 892 * aren't trying to use an anon pointer slot beyond the end 893 * of the currently allocated anon array. 894 */ 895 if ((amp->size - a->offset) < seg->s_size) { 896 panic("segvn_create anon_map size"); 897 /*NOTREACHED*/ 898 } 899 900 anon_num = btopr(a->offset); 901 902 if (a->type == MAP_SHARED) { 903 /* 904 * SHARED mapping to a given anon_map. 905 */ 906 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 907 amp->refcnt++; 908 if (a->szc > amp->a_szc) { 909 amp->a_szc = a->szc; 910 } 911 ANON_LOCK_EXIT(&->a_rwlock); 912 svd->anon_index = anon_num; 913 svd->swresv = 0; 914 } else { 915 /* 916 * PRIVATE mapping to a given anon_map. 917 * Make sure that all the needed anon 918 * structures are created (so that we will 919 * share the underlying pages if nothing 920 * is written by this mapping) and then 921 * duplicate the anon array as is done 922 * when a privately mapped segment is dup'ed. 923 */ 924 struct anon *ap; 925 caddr_t addr; 926 caddr_t eaddr; 927 ulong_t anon_idx; 928 int hat_flag = HAT_LOAD; 929 930 if (svd->flags & MAP_TEXT) { 931 hat_flag |= HAT_LOAD_TEXT; 932 } 933 934 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 935 svd->amp->a_szc = seg->s_szc; 936 svd->anon_index = 0; 937 svd->swresv = swresv; 938 939 /* 940 * Prevent 2 threads from allocating anon 941 * slots simultaneously. 942 */ 943 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 944 eaddr = seg->s_base + seg->s_size; 945 946 for (anon_idx = anon_num, addr = seg->s_base; 947 addr < eaddr; addr += PAGESIZE, anon_idx++) { 948 page_t *pp; 949 950 if ((ap = anon_get_ptr(amp->ahp, 951 anon_idx)) != NULL) 952 continue; 953 954 /* 955 * Allocate the anon struct now. 956 * Might as well load up translation 957 * to the page while we're at it... 958 */ 959 pp = anon_zero(seg, addr, &ap, cred); 960 if (ap == NULL || pp == NULL) { 961 panic("segvn_create anon_zero"); 962 /*NOTREACHED*/ 963 } 964 965 /* 966 * Re-acquire the anon_map lock and 967 * initialize the anon array entry. 968 */ 969 ASSERT(anon_get_ptr(amp->ahp, 970 anon_idx) == NULL); 971 (void) anon_set_ptr(amp->ahp, anon_idx, ap, 972 ANON_SLEEP); 973 974 ASSERT(seg->s_szc == 0); 975 ASSERT(!IS_VMODSORT(pp->p_vnode)); 976 977 ASSERT(use_rgn == 0); 978 hat_memload(seg->s_as->a_hat, addr, pp, 979 svd->prot & ~PROT_WRITE, hat_flag); 980 981 page_unlock(pp); 982 } 983 ASSERT(seg->s_szc == 0); 984 anon_dup(amp->ahp, anon_num, svd->amp->ahp, 985 0, seg->s_size); 986 ANON_LOCK_EXIT(&->a_rwlock); 987 } 988 } 989 990 /* 991 * Set default memory allocation policy for segment 992 * 993 * Always set policy for private memory at least for initialization 994 * even if this is a shared memory segment 995 */ 996 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size); 997 998 if (svd->type == MAP_SHARED) 999 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index, 1000 svd->vp, svd->offset, seg->s_size); 1001 1002 if (use_rgn) { 1003 ASSERT(!trok); 1004 ASSERT(svd->amp == NULL); 1005 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base, 1006 seg->s_size, (void *)svd->vp, svd->offset, svd->prot, 1007 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback, 1008 HAT_REGION_TEXT); 1009 } 1010 1011 ASSERT(!trok || !(svd->prot & PROT_WRITE)); 1012 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF; 1013 1014 return (0); 1015 } 1016 1017 /* 1018 * Concatenate two existing segments, if possible. 1019 * Return 0 on success, -1 if two segments are not compatible 1020 * or -2 on memory allocation failure. 1021 * If amp_cat == 1 then try and concat segments with anon maps 1022 */ 1023 static int 1024 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat) 1025 { 1026 struct segvn_data *svd1 = seg1->s_data; 1027 struct segvn_data *svd2 = seg2->s_data; 1028 struct anon_map *amp1 = svd1->amp; 1029 struct anon_map *amp2 = svd2->amp; 1030 struct vpage *vpage1 = svd1->vpage; 1031 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL; 1032 size_t size, nvpsize; 1033 pgcnt_t npages1, npages2; 1034 1035 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as); 1036 ASSERT(AS_WRITE_HELD(seg1->s_as)); 1037 ASSERT(seg1->s_ops == seg2->s_ops); 1038 1039 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) || 1040 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 1041 return (-1); 1042 } 1043 1044 /* both segments exist, try to merge them */ 1045 #define incompat(x) (svd1->x != svd2->x) 1046 if (incompat(vp) || incompat(maxprot) || 1047 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) || 1048 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) || 1049 incompat(type) || incompat(cred) || incompat(flags) || 1050 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) || 1051 (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0) 1052 return (-1); 1053 #undef incompat 1054 1055 /* 1056 * vp == NULL implies zfod, offset doesn't matter 1057 */ 1058 if (svd1->vp != NULL && 1059 svd1->offset + seg1->s_size != svd2->offset) { 1060 return (-1); 1061 } 1062 1063 /* 1064 * Don't concatenate if either segment uses text replication. 1065 */ 1066 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) { 1067 return (-1); 1068 } 1069 1070 /* 1071 * Fail early if we're not supposed to concatenate 1072 * segments with non NULL amp. 1073 */ 1074 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) { 1075 return (-1); 1076 } 1077 1078 if (svd1->vp == NULL && svd1->type == MAP_SHARED) { 1079 if (amp1 != amp2) { 1080 return (-1); 1081 } 1082 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) != 1083 svd2->anon_index) { 1084 return (-1); 1085 } 1086 ASSERT(amp1 == NULL || amp1->refcnt >= 2); 1087 } 1088 1089 /* 1090 * If either seg has vpages, create a new merged vpage array. 1091 */ 1092 if (vpage1 != NULL || vpage2 != NULL) { 1093 struct vpage *vp, *evp; 1094 1095 npages1 = seg_pages(seg1); 1096 npages2 = seg_pages(seg2); 1097 nvpsize = vpgtob(npages1 + npages2); 1098 1099 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) { 1100 return (-2); 1101 } 1102 1103 if (vpage1 != NULL) { 1104 bcopy(vpage1, nvpage, vpgtob(npages1)); 1105 } else { 1106 evp = nvpage + npages1; 1107 for (vp = nvpage; vp < evp; vp++) { 1108 VPP_SETPROT(vp, svd1->prot); 1109 VPP_SETADVICE(vp, svd1->advice); 1110 } 1111 } 1112 1113 if (vpage2 != NULL) { 1114 bcopy(vpage2, nvpage + npages1, vpgtob(npages2)); 1115 } else { 1116 evp = nvpage + npages1 + npages2; 1117 for (vp = nvpage + npages1; vp < evp; vp++) { 1118 VPP_SETPROT(vp, svd2->prot); 1119 VPP_SETADVICE(vp, svd2->advice); 1120 } 1121 } 1122 1123 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) { 1124 ASSERT(svd1->swresv == seg1->s_size); 1125 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1126 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1127 evp = nvpage + npages1; 1128 for (vp = nvpage; vp < evp; vp++) { 1129 VPP_SETSWAPRES(vp); 1130 } 1131 } 1132 1133 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) { 1134 ASSERT(svd2->swresv == seg2->s_size); 1135 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1136 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1137 vp = nvpage + npages1; 1138 evp = vp + npages2; 1139 for (; vp < evp; vp++) { 1140 VPP_SETSWAPRES(vp); 1141 } 1142 } 1143 } 1144 ASSERT((vpage1 != NULL || vpage2 != NULL) || 1145 (svd1->pageswap == 0 && svd2->pageswap == 0)); 1146 1147 /* 1148 * If either segment has private pages, create a new merged anon 1149 * array. If mergeing shared anon segments just decrement anon map's 1150 * refcnt. 1151 */ 1152 if (amp1 != NULL && svd1->type == MAP_SHARED) { 1153 ASSERT(amp1 == amp2 && svd1->vp == NULL); 1154 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1155 ASSERT(amp1->refcnt >= 2); 1156 amp1->refcnt--; 1157 ANON_LOCK_EXIT(&1->a_rwlock); 1158 svd2->amp = NULL; 1159 } else if (amp1 != NULL || amp2 != NULL) { 1160 struct anon_hdr *nahp; 1161 struct anon_map *namp = NULL; 1162 size_t asize; 1163 1164 ASSERT(svd1->type == MAP_PRIVATE); 1165 1166 asize = seg1->s_size + seg2->s_size; 1167 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) { 1168 if (nvpage != NULL) { 1169 kmem_free(nvpage, nvpsize); 1170 } 1171 return (-2); 1172 } 1173 if (amp1 != NULL) { 1174 /* 1175 * XXX anon rwlock is not really needed because 1176 * this is a private segment and we are writers. 1177 */ 1178 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1179 ASSERT(amp1->refcnt == 1); 1180 if (anon_copy_ptr(amp1->ahp, svd1->anon_index, 1181 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) { 1182 anon_release(nahp, btop(asize)); 1183 ANON_LOCK_EXIT(&1->a_rwlock); 1184 if (nvpage != NULL) { 1185 kmem_free(nvpage, nvpsize); 1186 } 1187 return (-2); 1188 } 1189 } 1190 if (amp2 != NULL) { 1191 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1192 ASSERT(amp2->refcnt == 1); 1193 if (anon_copy_ptr(amp2->ahp, svd2->anon_index, 1194 nahp, btop(seg1->s_size), btop(seg2->s_size), 1195 ANON_NOSLEEP)) { 1196 anon_release(nahp, btop(asize)); 1197 ANON_LOCK_EXIT(&2->a_rwlock); 1198 if (amp1 != NULL) { 1199 ANON_LOCK_EXIT(&1->a_rwlock); 1200 } 1201 if (nvpage != NULL) { 1202 kmem_free(nvpage, nvpsize); 1203 } 1204 return (-2); 1205 } 1206 } 1207 if (amp1 != NULL) { 1208 namp = amp1; 1209 anon_release(amp1->ahp, btop(amp1->size)); 1210 } 1211 if (amp2 != NULL) { 1212 if (namp == NULL) { 1213 ASSERT(amp1 == NULL); 1214 namp = amp2; 1215 anon_release(amp2->ahp, btop(amp2->size)); 1216 } else { 1217 amp2->refcnt--; 1218 ANON_LOCK_EXIT(&2->a_rwlock); 1219 anonmap_free(amp2); 1220 } 1221 svd2->amp = NULL; /* needed for seg_free */ 1222 } 1223 namp->ahp = nahp; 1224 namp->size = asize; 1225 svd1->amp = namp; 1226 svd1->anon_index = 0; 1227 ANON_LOCK_EXIT(&namp->a_rwlock); 1228 } 1229 /* 1230 * Now free the old vpage structures. 1231 */ 1232 if (nvpage != NULL) { 1233 if (vpage1 != NULL) { 1234 kmem_free(vpage1, vpgtob(npages1)); 1235 } 1236 if (vpage2 != NULL) { 1237 svd2->vpage = NULL; 1238 kmem_free(vpage2, vpgtob(npages2)); 1239 } 1240 if (svd2->pageprot) { 1241 svd1->pageprot = 1; 1242 } 1243 if (svd2->pageadvice) { 1244 svd1->pageadvice = 1; 1245 } 1246 if (svd2->pageswap) { 1247 svd1->pageswap = 1; 1248 } 1249 svd1->vpage = nvpage; 1250 } 1251 1252 /* all looks ok, merge segments */ 1253 svd1->swresv += svd2->swresv; 1254 svd2->swresv = 0; /* so seg_free doesn't release swap space */ 1255 size = seg2->s_size; 1256 seg_free(seg2); 1257 seg1->s_size += size; 1258 return (0); 1259 } 1260 1261 /* 1262 * Extend the previous segment (seg1) to include the 1263 * new segment (seg2 + a), if possible. 1264 * Return 0 on success. 1265 */ 1266 static int 1267 segvn_extend_prev(struct seg *seg1, struct seg *seg2, struct segvn_crargs *a, 1268 size_t swresv) 1269 { 1270 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data; 1271 size_t size; 1272 struct anon_map *amp1; 1273 struct vpage *new_vpage; 1274 1275 /* 1276 * We don't need any segment level locks for "segvn" data 1277 * since the address space is "write" locked. 1278 */ 1279 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as)); 1280 1281 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) { 1282 return (-1); 1283 } 1284 1285 /* second segment is new, try to extend first */ 1286 /* XXX - should also check cred */ 1287 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot || 1288 (!svd1->pageprot && (svd1->prot != a->prot)) || 1289 svd1->type != a->type || svd1->flags != a->flags || 1290 seg1->s_szc != a->szc || svd1->softlockcnt_send > 0) 1291 return (-1); 1292 1293 /* vp == NULL implies zfod, offset doesn't matter */ 1294 if (svd1->vp != NULL && 1295 svd1->offset + seg1->s_size != (a->offset & PAGEMASK)) 1296 return (-1); 1297 1298 if (svd1->tr_state != SEGVN_TR_OFF) { 1299 return (-1); 1300 } 1301 1302 amp1 = svd1->amp; 1303 if (amp1) { 1304 pgcnt_t newpgs; 1305 1306 /* 1307 * Segment has private pages, can data structures 1308 * be expanded? 1309 * 1310 * Acquire the anon_map lock to prevent it from changing, 1311 * if it is shared. This ensures that the anon_map 1312 * will not change while a thread which has a read/write 1313 * lock on an address space references it. 1314 * XXX - Don't need the anon_map lock at all if "refcnt" 1315 * is 1. 1316 * 1317 * Can't grow a MAP_SHARED segment with an anonmap because 1318 * there may be existing anon slots where we want to extend 1319 * the segment and we wouldn't know what to do with them 1320 * (e.g., for tmpfs right thing is to just leave them there, 1321 * for /dev/zero they should be cleared out). 1322 */ 1323 if (svd1->type == MAP_SHARED) 1324 return (-1); 1325 1326 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1327 if (amp1->refcnt > 1) { 1328 ANON_LOCK_EXIT(&1->a_rwlock); 1329 return (-1); 1330 } 1331 newpgs = anon_grow(amp1->ahp, &svd1->anon_index, 1332 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP); 1333 1334 if (newpgs == 0) { 1335 ANON_LOCK_EXIT(&1->a_rwlock); 1336 return (-1); 1337 } 1338 amp1->size = ptob(newpgs); 1339 ANON_LOCK_EXIT(&1->a_rwlock); 1340 } 1341 if (svd1->vpage != NULL) { 1342 struct vpage *vp, *evp; 1343 new_vpage = 1344 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1345 KM_NOSLEEP); 1346 if (new_vpage == NULL) 1347 return (-1); 1348 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1))); 1349 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1))); 1350 svd1->vpage = new_vpage; 1351 1352 vp = new_vpage + seg_pages(seg1); 1353 evp = vp + seg_pages(seg2); 1354 for (; vp < evp; vp++) 1355 VPP_SETPROT(vp, a->prot); 1356 if (svd1->pageswap && swresv) { 1357 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1358 ASSERT(swresv == seg2->s_size); 1359 vp = new_vpage + seg_pages(seg1); 1360 for (; vp < evp; vp++) { 1361 VPP_SETSWAPRES(vp); 1362 } 1363 } 1364 } 1365 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0); 1366 size = seg2->s_size; 1367 seg_free(seg2); 1368 seg1->s_size += size; 1369 svd1->swresv += swresv; 1370 if (svd1->pageprot && (a->prot & PROT_WRITE) && 1371 svd1->type == MAP_SHARED && svd1->vp != NULL && 1372 (svd1->vp->v_flag & VVMEXEC)) { 1373 ASSERT(vn_is_mapped(svd1->vp, V_WRITE)); 1374 segvn_inval_trcache(svd1->vp); 1375 } 1376 return (0); 1377 } 1378 1379 /* 1380 * Extend the next segment (seg2) to include the 1381 * new segment (seg1 + a), if possible. 1382 * Return 0 on success. 1383 */ 1384 static int 1385 segvn_extend_next(struct seg *seg1, struct seg *seg2, struct segvn_crargs *a, 1386 size_t swresv) 1387 { 1388 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data; 1389 size_t size; 1390 struct anon_map *amp2; 1391 struct vpage *new_vpage; 1392 1393 /* 1394 * We don't need any segment level locks for "segvn" data 1395 * since the address space is "write" locked. 1396 */ 1397 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as)); 1398 1399 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 1400 return (-1); 1401 } 1402 1403 /* first segment is new, try to extend second */ 1404 /* XXX - should also check cred */ 1405 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot || 1406 (!svd2->pageprot && (svd2->prot != a->prot)) || 1407 svd2->type != a->type || svd2->flags != a->flags || 1408 seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0) 1409 return (-1); 1410 /* vp == NULL implies zfod, offset doesn't matter */ 1411 if (svd2->vp != NULL && 1412 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset) 1413 return (-1); 1414 1415 if (svd2->tr_state != SEGVN_TR_OFF) { 1416 return (-1); 1417 } 1418 1419 amp2 = svd2->amp; 1420 if (amp2) { 1421 pgcnt_t newpgs; 1422 1423 /* 1424 * Segment has private pages, can data structures 1425 * be expanded? 1426 * 1427 * Acquire the anon_map lock to prevent it from changing, 1428 * if it is shared. This ensures that the anon_map 1429 * will not change while a thread which has a read/write 1430 * lock on an address space references it. 1431 * 1432 * XXX - Don't need the anon_map lock at all if "refcnt" 1433 * is 1. 1434 */ 1435 if (svd2->type == MAP_SHARED) 1436 return (-1); 1437 1438 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1439 if (amp2->refcnt > 1) { 1440 ANON_LOCK_EXIT(&2->a_rwlock); 1441 return (-1); 1442 } 1443 newpgs = anon_grow(amp2->ahp, &svd2->anon_index, 1444 btop(seg2->s_size), btop(seg1->s_size), 1445 ANON_NOSLEEP | ANON_GROWDOWN); 1446 1447 if (newpgs == 0) { 1448 ANON_LOCK_EXIT(&2->a_rwlock); 1449 return (-1); 1450 } 1451 amp2->size = ptob(newpgs); 1452 ANON_LOCK_EXIT(&2->a_rwlock); 1453 } 1454 if (svd2->vpage != NULL) { 1455 struct vpage *vp, *evp; 1456 new_vpage = 1457 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1458 KM_NOSLEEP); 1459 if (new_vpage == NULL) { 1460 /* Not merging segments so adjust anon_index back */ 1461 if (amp2) 1462 svd2->anon_index += seg_pages(seg1); 1463 return (-1); 1464 } 1465 bcopy(svd2->vpage, new_vpage + seg_pages(seg1), 1466 vpgtob(seg_pages(seg2))); 1467 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2))); 1468 svd2->vpage = new_vpage; 1469 1470 vp = new_vpage; 1471 evp = vp + seg_pages(seg1); 1472 for (; vp < evp; vp++) 1473 VPP_SETPROT(vp, a->prot); 1474 if (svd2->pageswap && swresv) { 1475 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1476 ASSERT(swresv == seg1->s_size); 1477 vp = new_vpage; 1478 for (; vp < evp; vp++) { 1479 VPP_SETSWAPRES(vp); 1480 } 1481 } 1482 } 1483 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0); 1484 size = seg1->s_size; 1485 seg_free(seg1); 1486 seg2->s_size += size; 1487 seg2->s_base -= size; 1488 svd2->offset -= size; 1489 svd2->swresv += swresv; 1490 if (svd2->pageprot && (a->prot & PROT_WRITE) && 1491 svd2->type == MAP_SHARED && svd2->vp != NULL && 1492 (svd2->vp->v_flag & VVMEXEC)) { 1493 ASSERT(vn_is_mapped(svd2->vp, V_WRITE)); 1494 segvn_inval_trcache(svd2->vp); 1495 } 1496 return (0); 1497 } 1498 1499 /* 1500 * Duplicate all the pages in the segment. This may break COW sharing for a 1501 * given page. If the page is marked with inherit zero set, then instead of 1502 * duplicating the page, we zero the page. 1503 */ 1504 static int 1505 segvn_dup_pages(struct seg *seg, struct seg *newseg) 1506 { 1507 int error; 1508 uint_t prot; 1509 page_t *pp; 1510 struct anon *ap, *newap; 1511 size_t i; 1512 caddr_t addr; 1513 1514 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1515 struct segvn_data *newsvd = (struct segvn_data *)newseg->s_data; 1516 ulong_t old_idx = svd->anon_index; 1517 ulong_t new_idx = 0; 1518 1519 i = btopr(seg->s_size); 1520 addr = seg->s_base; 1521 1522 /* 1523 * XXX break cow sharing using PAGESIZE 1524 * pages. They will be relocated into larger 1525 * pages at fault time. 1526 */ 1527 while (i-- > 0) { 1528 if ((ap = anon_get_ptr(svd->amp->ahp, old_idx)) != NULL) { 1529 struct vpage *vpp; 1530 1531 vpp = &svd->vpage[seg_page(seg, addr)]; 1532 1533 /* 1534 * prot need not be computed below 'cause anon_private 1535 * is going to ignore it anyway as child doesn't inherit 1536 * pagelock from parent. 1537 */ 1538 prot = svd->pageprot ? VPP_PROT(vpp) : svd->prot; 1539 1540 /* 1541 * Check whether we should zero this or dup it. 1542 */ 1543 if (svd->svn_inz == SEGVN_INZ_ALL || 1544 (svd->svn_inz == SEGVN_INZ_VPP && 1545 VPP_ISINHZERO(vpp))) { 1546 pp = anon_zero(newseg, addr, &newap, 1547 newsvd->cred); 1548 } else { 1549 page_t *anon_pl[1+1]; 1550 uint_t vpprot; 1551 error = anon_getpage(&ap, &vpprot, anon_pl, 1552 PAGESIZE, seg, addr, S_READ, svd->cred); 1553 if (error != 0) 1554 return (error); 1555 1556 pp = anon_private(&newap, newseg, addr, prot, 1557 anon_pl[0], 0, newsvd->cred); 1558 } 1559 if (pp == NULL) { 1560 return (ENOMEM); 1561 } 1562 (void) anon_set_ptr(newsvd->amp->ahp, new_idx, newap, 1563 ANON_SLEEP); 1564 page_unlock(pp); 1565 } 1566 addr += PAGESIZE; 1567 old_idx++; 1568 new_idx++; 1569 } 1570 1571 return (0); 1572 } 1573 1574 static int 1575 segvn_dup(struct seg *seg, struct seg *newseg) 1576 { 1577 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1578 struct segvn_data *newsvd; 1579 pgcnt_t npages = seg_pages(seg); 1580 int error = 0; 1581 size_t len; 1582 struct anon_map *amp; 1583 1584 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); 1585 ASSERT(newseg->s_as->a_proc->p_parent == curproc); 1586 1587 /* 1588 * If segment has anon reserved, reserve more for the new seg. 1589 * For a MAP_NORESERVE segment swresv will be a count of all the 1590 * allocated anon slots; thus we reserve for the child as many slots 1591 * as the parent has allocated. This semantic prevents the child or 1592 * parent from dieing during a copy-on-write fault caused by trying 1593 * to write a shared pre-existing anon page. 1594 */ 1595 if ((len = svd->swresv) != 0) { 1596 if (anon_resv(svd->swresv) == 0) 1597 return (ENOMEM); 1598 1599 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 1600 seg, len, 0); 1601 } 1602 1603 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 1604 1605 newseg->s_ops = &segvn_ops; 1606 newseg->s_data = (void *)newsvd; 1607 newseg->s_szc = seg->s_szc; 1608 1609 newsvd->seg = newseg; 1610 if ((newsvd->vp = svd->vp) != NULL) { 1611 VN_HOLD(svd->vp); 1612 if (svd->type == MAP_SHARED) 1613 lgrp_shm_policy_init(NULL, svd->vp); 1614 } 1615 newsvd->offset = svd->offset; 1616 newsvd->prot = svd->prot; 1617 newsvd->maxprot = svd->maxprot; 1618 newsvd->pageprot = svd->pageprot; 1619 newsvd->type = svd->type; 1620 newsvd->cred = svd->cred; 1621 crhold(newsvd->cred); 1622 newsvd->advice = svd->advice; 1623 newsvd->pageadvice = svd->pageadvice; 1624 newsvd->svn_inz = svd->svn_inz; 1625 newsvd->swresv = svd->swresv; 1626 newsvd->pageswap = svd->pageswap; 1627 newsvd->flags = svd->flags; 1628 newsvd->softlockcnt = 0; 1629 newsvd->softlockcnt_sbase = 0; 1630 newsvd->softlockcnt_send = 0; 1631 newsvd->policy_info = svd->policy_info; 1632 newsvd->rcookie = HAT_INVALID_REGION_COOKIE; 1633 1634 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) { 1635 /* 1636 * Not attaching to a shared anon object. 1637 */ 1638 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) || 1639 svd->tr_state == SEGVN_TR_OFF); 1640 if (svd->tr_state == SEGVN_TR_ON) { 1641 ASSERT(newsvd->vp != NULL && amp != NULL); 1642 newsvd->tr_state = SEGVN_TR_INIT; 1643 } else { 1644 newsvd->tr_state = svd->tr_state; 1645 } 1646 newsvd->amp = NULL; 1647 newsvd->anon_index = 0; 1648 } else { 1649 /* regions for now are only used on pure vnode segments */ 1650 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 1651 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1652 newsvd->tr_state = SEGVN_TR_OFF; 1653 if (svd->type == MAP_SHARED) { 1654 ASSERT(svd->svn_inz == SEGVN_INZ_NONE); 1655 newsvd->amp = amp; 1656 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1657 amp->refcnt++; 1658 ANON_LOCK_EXIT(&->a_rwlock); 1659 newsvd->anon_index = svd->anon_index; 1660 } else { 1661 int reclaim = 1; 1662 1663 /* 1664 * Allocate and initialize new anon_map structure. 1665 */ 1666 newsvd->amp = anonmap_alloc(newseg->s_size, 0, 1667 ANON_SLEEP); 1668 newsvd->amp->a_szc = newseg->s_szc; 1669 newsvd->anon_index = 0; 1670 ASSERT(svd->svn_inz == SEGVN_INZ_NONE || 1671 svd->svn_inz == SEGVN_INZ_ALL || 1672 svd->svn_inz == SEGVN_INZ_VPP); 1673 1674 /* 1675 * We don't have to acquire the anon_map lock 1676 * for the new segment (since it belongs to an 1677 * address space that is still not associated 1678 * with any process), or the segment in the old 1679 * address space (since all threads in it 1680 * are stopped while duplicating the address space). 1681 */ 1682 1683 /* 1684 * The goal of the following code is to make sure that 1685 * softlocked pages do not end up as copy on write 1686 * pages. This would cause problems where one 1687 * thread writes to a page that is COW and a different 1688 * thread in the same process has softlocked it. The 1689 * softlock lock would move away from this process 1690 * because the write would cause this process to get 1691 * a copy (without the softlock). 1692 * 1693 * The strategy here is to just break the 1694 * sharing on pages that could possibly be 1695 * softlocked. 1696 * 1697 * In addition, if any pages have been marked that they 1698 * should be inherited as zero, then we immediately go 1699 * ahead and break COW and zero them. In the case of a 1700 * softlocked page that should be inherited zero, we 1701 * break COW and just get a zero page. 1702 */ 1703 retry: 1704 if (svd->softlockcnt || 1705 svd->svn_inz != SEGVN_INZ_NONE) { 1706 /* 1707 * The softlock count might be non zero 1708 * because some pages are still stuck in the 1709 * cache for lazy reclaim. Flush the cache 1710 * now. This should drop the count to zero. 1711 * [or there is really I/O going on to these 1712 * pages]. Note, we have the writers lock so 1713 * nothing gets inserted during the flush. 1714 */ 1715 if (svd->softlockcnt && reclaim == 1) { 1716 segvn_purge(seg); 1717 reclaim = 0; 1718 goto retry; 1719 } 1720 1721 error = segvn_dup_pages(seg, newseg); 1722 if (error != 0) { 1723 newsvd->vpage = NULL; 1724 goto out; 1725 } 1726 } else { /* common case */ 1727 if (seg->s_szc != 0) { 1728 /* 1729 * If at least one of anon slots of a 1730 * large page exists then make sure 1731 * all anon slots of a large page 1732 * exist to avoid partial cow sharing 1733 * of a large page in the future. 1734 */ 1735 anon_dup_fill_holes(amp->ahp, 1736 svd->anon_index, newsvd->amp->ahp, 1737 0, seg->s_size, seg->s_szc, 1738 svd->vp != NULL); 1739 } else { 1740 anon_dup(amp->ahp, svd->anon_index, 1741 newsvd->amp->ahp, 0, seg->s_size); 1742 } 1743 1744 hat_clrattr(seg->s_as->a_hat, seg->s_base, 1745 seg->s_size, PROT_WRITE); 1746 } 1747 } 1748 } 1749 /* 1750 * If necessary, create a vpage structure for the new segment. 1751 * Do not copy any page lock indications. 1752 */ 1753 if (svd->vpage != NULL) { 1754 uint_t i; 1755 struct vpage *ovp = svd->vpage; 1756 struct vpage *nvp; 1757 1758 nvp = newsvd->vpage = 1759 kmem_alloc(vpgtob(npages), KM_SLEEP); 1760 for (i = 0; i < npages; i++) { 1761 *nvp = *ovp++; 1762 VPP_CLRPPLOCK(nvp++); 1763 } 1764 } else 1765 newsvd->vpage = NULL; 1766 1767 /* Inform the vnode of the new mapping */ 1768 if (newsvd->vp != NULL) { 1769 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset, 1770 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot, 1771 newsvd->maxprot, newsvd->type, newsvd->cred, NULL); 1772 } 1773 out: 1774 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1775 ASSERT(newsvd->amp == NULL); 1776 ASSERT(newsvd->tr_state == SEGVN_TR_OFF); 1777 newsvd->rcookie = svd->rcookie; 1778 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie); 1779 } 1780 return (error); 1781 } 1782 1783 1784 /* 1785 * callback function to invoke free_vp_pages() for only those pages actually 1786 * processed by the HAT when a shared region is destroyed. 1787 */ 1788 extern int free_pages; 1789 1790 static void 1791 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 1792 size_t r_size, void *r_obj, u_offset_t r_objoff) 1793 { 1794 u_offset_t off; 1795 size_t len; 1796 vnode_t *vp = (vnode_t *)r_obj; 1797 1798 ASSERT(eaddr > saddr); 1799 ASSERT(saddr >= r_saddr); 1800 ASSERT(saddr < r_saddr + r_size); 1801 ASSERT(eaddr > r_saddr); 1802 ASSERT(eaddr <= r_saddr + r_size); 1803 ASSERT(vp != NULL); 1804 1805 if (!free_pages) { 1806 return; 1807 } 1808 1809 len = eaddr - saddr; 1810 off = (saddr - r_saddr) + r_objoff; 1811 free_vp_pages(vp, off, len); 1812 } 1813 1814 /* 1815 * callback function used by segvn_unmap to invoke free_vp_pages() for only 1816 * those pages actually processed by the HAT 1817 */ 1818 static void 1819 segvn_hat_unload_callback(hat_callback_t *cb) 1820 { 1821 struct seg *seg = cb->hcb_data; 1822 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1823 size_t len; 1824 u_offset_t off; 1825 1826 ASSERT(svd->vp != NULL); 1827 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr); 1828 ASSERT(cb->hcb_start_addr >= seg->s_base); 1829 1830 len = cb->hcb_end_addr - cb->hcb_start_addr; 1831 off = cb->hcb_start_addr - seg->s_base; 1832 free_vp_pages(svd->vp, svd->offset + off, len); 1833 } 1834 1835 /* 1836 * This function determines the number of bytes of swap reserved by 1837 * a segment for which per-page accounting is present. It is used to 1838 * calculate the correct value of a segvn_data's swresv. 1839 */ 1840 static size_t 1841 segvn_count_swap_by_vpages(struct seg *seg) 1842 { 1843 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1844 struct vpage *vp, *evp; 1845 size_t nswappages = 0; 1846 1847 ASSERT(svd->pageswap); 1848 ASSERT(svd->vpage != NULL); 1849 1850 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 1851 1852 for (vp = svd->vpage; vp < evp; vp++) { 1853 if (VPP_ISSWAPRES(vp)) 1854 nswappages++; 1855 } 1856 1857 return (nswappages << PAGESHIFT); 1858 } 1859 1860 static int 1861 segvn_unmap(struct seg *seg, caddr_t addr, size_t len) 1862 { 1863 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1864 struct segvn_data *nsvd; 1865 struct seg *nseg; 1866 struct anon_map *amp; 1867 pgcnt_t opages; /* old segment size in pages */ 1868 pgcnt_t npages; /* new segment size in pages */ 1869 pgcnt_t dpages; /* pages being deleted (unmapped) */ 1870 hat_callback_t callback; /* used for free_vp_pages() */ 1871 hat_callback_t *cbp = NULL; 1872 caddr_t nbase; 1873 size_t nsize; 1874 size_t oswresv; 1875 int reclaim = 1; 1876 1877 /* 1878 * We don't need any segment level locks for "segvn" data 1879 * since the address space is "write" locked. 1880 */ 1881 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); 1882 1883 /* 1884 * Fail the unmap if pages are SOFTLOCKed through this mapping. 1885 * softlockcnt is protected from change by the as write lock. 1886 */ 1887 retry: 1888 if (svd->softlockcnt > 0) { 1889 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1890 1891 /* 1892 * If this is shared segment non 0 softlockcnt 1893 * means locked pages are still in use. 1894 */ 1895 if (svd->type == MAP_SHARED) { 1896 return (EAGAIN); 1897 } 1898 1899 /* 1900 * since we do have the writers lock nobody can fill 1901 * the cache during the purge. The flush either succeeds 1902 * or we still have pending I/Os. 1903 */ 1904 if (reclaim == 1) { 1905 segvn_purge(seg); 1906 reclaim = 0; 1907 goto retry; 1908 } 1909 return (EAGAIN); 1910 } 1911 1912 /* 1913 * Check for bad sizes 1914 */ 1915 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size || 1916 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) { 1917 panic("segvn_unmap"); 1918 /*NOTREACHED*/ 1919 } 1920 1921 if (seg->s_szc != 0) { 1922 size_t pgsz = page_get_pagesize(seg->s_szc); 1923 int err; 1924 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 1925 ASSERT(seg->s_base != addr || seg->s_size != len); 1926 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1927 ASSERT(svd->amp == NULL); 1928 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1929 hat_leave_region(seg->s_as->a_hat, 1930 svd->rcookie, HAT_REGION_TEXT); 1931 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1932 /* 1933 * could pass a flag to segvn_demote_range() 1934 * below to tell it not to do any unloads but 1935 * this case is rare enough to not bother for 1936 * now. 1937 */ 1938 } else if (svd->tr_state == SEGVN_TR_INIT) { 1939 svd->tr_state = SEGVN_TR_OFF; 1940 } else if (svd->tr_state == SEGVN_TR_ON) { 1941 ASSERT(svd->amp != NULL); 1942 segvn_textunrepl(seg, 1); 1943 ASSERT(svd->amp == NULL); 1944 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1945 } 1946 VM_STAT_ADD(segvnvmstats.demoterange[0]); 1947 err = segvn_demote_range(seg, addr, len, SDR_END, 0); 1948 if (err == 0) { 1949 return (IE_RETRY); 1950 } 1951 return (err); 1952 } 1953 } 1954 1955 /* Inform the vnode of the unmapping. */ 1956 if (svd->vp) { 1957 int error; 1958 1959 error = VOP_DELMAP(svd->vp, 1960 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base), 1961 seg->s_as, addr, len, svd->prot, svd->maxprot, 1962 svd->type, svd->cred, NULL); 1963 1964 if (error == EAGAIN) 1965 return (error); 1966 } 1967 1968 /* 1969 * Remove any page locks set through this mapping. 1970 * If text replication is not off no page locks could have been 1971 * established via this mapping. 1972 */ 1973 if (svd->tr_state == SEGVN_TR_OFF) { 1974 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0); 1975 } 1976 1977 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1978 ASSERT(svd->amp == NULL); 1979 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1980 ASSERT(svd->type == MAP_PRIVATE); 1981 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 1982 HAT_REGION_TEXT); 1983 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1984 } else if (svd->tr_state == SEGVN_TR_ON) { 1985 ASSERT(svd->amp != NULL); 1986 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE)); 1987 segvn_textunrepl(seg, 1); 1988 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 1989 } else { 1990 if (svd->tr_state != SEGVN_TR_OFF) { 1991 ASSERT(svd->tr_state == SEGVN_TR_INIT); 1992 svd->tr_state = SEGVN_TR_OFF; 1993 } 1994 /* 1995 * Unload any hardware translations in the range to be taken 1996 * out. Use a callback to invoke free_vp_pages() effectively. 1997 */ 1998 if (svd->vp != NULL && free_pages != 0) { 1999 callback.hcb_data = seg; 2000 callback.hcb_function = segvn_hat_unload_callback; 2001 cbp = &callback; 2002 } 2003 hat_unload_callback(seg->s_as->a_hat, addr, len, 2004 HAT_UNLOAD_UNMAP, cbp); 2005 2006 if (svd->type == MAP_SHARED && svd->vp != NULL && 2007 (svd->vp->v_flag & VVMEXEC) && 2008 ((svd->prot & PROT_WRITE) || svd->pageprot)) { 2009 segvn_inval_trcache(svd->vp); 2010 } 2011 } 2012 2013 /* 2014 * Check for entire segment 2015 */ 2016 if (addr == seg->s_base && len == seg->s_size) { 2017 seg_free(seg); 2018 return (0); 2019 } 2020 2021 opages = seg_pages(seg); 2022 dpages = btop(len); 2023 npages = opages - dpages; 2024 amp = svd->amp; 2025 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc); 2026 2027 /* 2028 * Check for beginning of segment 2029 */ 2030 if (addr == seg->s_base) { 2031 if (svd->vpage != NULL) { 2032 size_t nbytes; 2033 struct vpage *ovpage; 2034 2035 ovpage = svd->vpage; /* keep pointer to vpage */ 2036 2037 nbytes = vpgtob(npages); 2038 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2039 bcopy(&ovpage[dpages], svd->vpage, nbytes); 2040 2041 /* free up old vpage */ 2042 kmem_free(ovpage, vpgtob(opages)); 2043 } 2044 if (amp != NULL) { 2045 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2046 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2047 /* 2048 * Shared anon map is no longer in use. Before 2049 * freeing its pages purge all entries from 2050 * pcache that belong to this amp. 2051 */ 2052 if (svd->type == MAP_SHARED) { 2053 ASSERT(amp->refcnt == 1); 2054 ASSERT(svd->softlockcnt == 0); 2055 anonmap_purge(amp); 2056 } 2057 /* 2058 * Free up now unused parts of anon_map array. 2059 */ 2060 if (amp->a_szc == seg->s_szc) { 2061 if (seg->s_szc != 0) { 2062 anon_free_pages(amp->ahp, 2063 svd->anon_index, len, 2064 seg->s_szc); 2065 } else { 2066 anon_free(amp->ahp, 2067 svd->anon_index, 2068 len); 2069 } 2070 } else { 2071 ASSERT(svd->type == MAP_SHARED); 2072 ASSERT(amp->a_szc > seg->s_szc); 2073 anon_shmap_free_pages(amp, 2074 svd->anon_index, len); 2075 } 2076 2077 /* 2078 * Unreserve swap space for the 2079 * unmapped chunk of this segment in 2080 * case it's MAP_SHARED 2081 */ 2082 if (svd->type == MAP_SHARED) { 2083 anon_unresv_zone(len, 2084 seg->s_as->a_proc->p_zone); 2085 amp->swresv -= len; 2086 } 2087 } 2088 ANON_LOCK_EXIT(&->a_rwlock); 2089 svd->anon_index += dpages; 2090 } 2091 if (svd->vp != NULL) 2092 svd->offset += len; 2093 2094 seg->s_base += len; 2095 seg->s_size -= len; 2096 2097 if (svd->swresv) { 2098 if (svd->flags & MAP_NORESERVE) { 2099 ASSERT(amp); 2100 oswresv = svd->swresv; 2101 2102 svd->swresv = ptob(anon_pages(amp->ahp, 2103 svd->anon_index, npages)); 2104 anon_unresv_zone(oswresv - svd->swresv, 2105 seg->s_as->a_proc->p_zone); 2106 if (SEG_IS_PARTIAL_RESV(seg)) 2107 seg->s_as->a_resvsize -= oswresv - 2108 svd->swresv; 2109 } else { 2110 size_t unlen; 2111 2112 if (svd->pageswap) { 2113 oswresv = svd->swresv; 2114 svd->swresv = 2115 segvn_count_swap_by_vpages(seg); 2116 ASSERT(oswresv >= svd->swresv); 2117 unlen = oswresv - svd->swresv; 2118 } else { 2119 svd->swresv -= len; 2120 ASSERT(svd->swresv == seg->s_size); 2121 unlen = len; 2122 } 2123 anon_unresv_zone(unlen, 2124 seg->s_as->a_proc->p_zone); 2125 } 2126 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2127 seg, len, 0); 2128 } 2129 2130 return (0); 2131 } 2132 2133 /* 2134 * Check for end of segment 2135 */ 2136 if (addr + len == seg->s_base + seg->s_size) { 2137 if (svd->vpage != NULL) { 2138 size_t nbytes; 2139 struct vpage *ovpage; 2140 2141 ovpage = svd->vpage; /* keep pointer to vpage */ 2142 2143 nbytes = vpgtob(npages); 2144 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2145 bcopy(ovpage, svd->vpage, nbytes); 2146 2147 /* free up old vpage */ 2148 kmem_free(ovpage, vpgtob(opages)); 2149 2150 } 2151 if (amp != NULL) { 2152 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2153 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2154 /* 2155 * Free up now unused parts of anon_map array. 2156 */ 2157 ulong_t an_idx = svd->anon_index + npages; 2158 2159 /* 2160 * Shared anon map is no longer in use. Before 2161 * freeing its pages purge all entries from 2162 * pcache that belong to this amp. 2163 */ 2164 if (svd->type == MAP_SHARED) { 2165 ASSERT(amp->refcnt == 1); 2166 ASSERT(svd->softlockcnt == 0); 2167 anonmap_purge(amp); 2168 } 2169 2170 if (amp->a_szc == seg->s_szc) { 2171 if (seg->s_szc != 0) { 2172 anon_free_pages(amp->ahp, 2173 an_idx, len, 2174 seg->s_szc); 2175 } else { 2176 anon_free(amp->ahp, an_idx, 2177 len); 2178 } 2179 } else { 2180 ASSERT(svd->type == MAP_SHARED); 2181 ASSERT(amp->a_szc > seg->s_szc); 2182 anon_shmap_free_pages(amp, 2183 an_idx, len); 2184 } 2185 2186 /* 2187 * Unreserve swap space for the 2188 * unmapped chunk of this segment in 2189 * case it's MAP_SHARED 2190 */ 2191 if (svd->type == MAP_SHARED) { 2192 anon_unresv_zone(len, 2193 seg->s_as->a_proc->p_zone); 2194 amp->swresv -= len; 2195 } 2196 } 2197 ANON_LOCK_EXIT(&->a_rwlock); 2198 } 2199 2200 seg->s_size -= len; 2201 2202 if (svd->swresv) { 2203 if (svd->flags & MAP_NORESERVE) { 2204 ASSERT(amp); 2205 oswresv = svd->swresv; 2206 svd->swresv = ptob(anon_pages(amp->ahp, 2207 svd->anon_index, npages)); 2208 anon_unresv_zone(oswresv - svd->swresv, 2209 seg->s_as->a_proc->p_zone); 2210 if (SEG_IS_PARTIAL_RESV(seg)) 2211 seg->s_as->a_resvsize -= oswresv - 2212 svd->swresv; 2213 } else { 2214 size_t unlen; 2215 2216 if (svd->pageswap) { 2217 oswresv = svd->swresv; 2218 svd->swresv = 2219 segvn_count_swap_by_vpages(seg); 2220 ASSERT(oswresv >= svd->swresv); 2221 unlen = oswresv - svd->swresv; 2222 } else { 2223 svd->swresv -= len; 2224 ASSERT(svd->swresv == seg->s_size); 2225 unlen = len; 2226 } 2227 anon_unresv_zone(unlen, 2228 seg->s_as->a_proc->p_zone); 2229 } 2230 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2231 "anon proc:%p %lu %u", seg, len, 0); 2232 } 2233 2234 return (0); 2235 } 2236 2237 /* 2238 * The section to go is in the middle of the segment, 2239 * have to make it into two segments. nseg is made for 2240 * the high end while seg is cut down at the low end. 2241 */ 2242 nbase = addr + len; /* new seg base */ 2243 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */ 2244 seg->s_size = addr - seg->s_base; /* shrink old seg */ 2245 nseg = seg_alloc(seg->s_as, nbase, nsize); 2246 if (nseg == NULL) { 2247 panic("segvn_unmap seg_alloc"); 2248 /*NOTREACHED*/ 2249 } 2250 nseg->s_ops = seg->s_ops; 2251 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 2252 nseg->s_data = (void *)nsvd; 2253 nseg->s_szc = seg->s_szc; 2254 *nsvd = *svd; 2255 nsvd->seg = nseg; 2256 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base); 2257 nsvd->swresv = 0; 2258 nsvd->softlockcnt = 0; 2259 nsvd->softlockcnt_sbase = 0; 2260 nsvd->softlockcnt_send = 0; 2261 nsvd->svn_inz = svd->svn_inz; 2262 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 2263 2264 if (svd->vp != NULL) { 2265 VN_HOLD(nsvd->vp); 2266 if (nsvd->type == MAP_SHARED) 2267 lgrp_shm_policy_init(NULL, nsvd->vp); 2268 } 2269 crhold(svd->cred); 2270 2271 if (svd->vpage == NULL) { 2272 nsvd->vpage = NULL; 2273 } else { 2274 /* need to split vpage into two arrays */ 2275 size_t nbytes; 2276 struct vpage *ovpage; 2277 2278 ovpage = svd->vpage; /* keep pointer to vpage */ 2279 2280 npages = seg_pages(seg); /* seg has shrunk */ 2281 nbytes = vpgtob(npages); 2282 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2283 2284 bcopy(ovpage, svd->vpage, nbytes); 2285 2286 npages = seg_pages(nseg); 2287 nbytes = vpgtob(npages); 2288 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2289 2290 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes); 2291 2292 /* free up old vpage */ 2293 kmem_free(ovpage, vpgtob(opages)); 2294 } 2295 2296 if (amp == NULL) { 2297 nsvd->amp = NULL; 2298 nsvd->anon_index = 0; 2299 } else { 2300 /* 2301 * Need to create a new anon map for the new segment. 2302 * We'll also allocate a new smaller array for the old 2303 * smaller segment to save space. 2304 */ 2305 opages = btop((uintptr_t)(addr - seg->s_base)); 2306 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2307 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2308 /* 2309 * Free up now unused parts of anon_map array. 2310 */ 2311 ulong_t an_idx = svd->anon_index + opages; 2312 2313 /* 2314 * Shared anon map is no longer in use. Before 2315 * freeing its pages purge all entries from 2316 * pcache that belong to this amp. 2317 */ 2318 if (svd->type == MAP_SHARED) { 2319 ASSERT(amp->refcnt == 1); 2320 ASSERT(svd->softlockcnt == 0); 2321 anonmap_purge(amp); 2322 } 2323 2324 if (amp->a_szc == seg->s_szc) { 2325 if (seg->s_szc != 0) { 2326 anon_free_pages(amp->ahp, an_idx, len, 2327 seg->s_szc); 2328 } else { 2329 anon_free(amp->ahp, an_idx, 2330 len); 2331 } 2332 } else { 2333 ASSERT(svd->type == MAP_SHARED); 2334 ASSERT(amp->a_szc > seg->s_szc); 2335 anon_shmap_free_pages(amp, an_idx, len); 2336 } 2337 2338 /* 2339 * Unreserve swap space for the 2340 * unmapped chunk of this segment in 2341 * case it's MAP_SHARED 2342 */ 2343 if (svd->type == MAP_SHARED) { 2344 anon_unresv_zone(len, 2345 seg->s_as->a_proc->p_zone); 2346 amp->swresv -= len; 2347 } 2348 } 2349 nsvd->anon_index = svd->anon_index + 2350 btop((uintptr_t)(nseg->s_base - seg->s_base)); 2351 if (svd->type == MAP_SHARED) { 2352 amp->refcnt++; 2353 nsvd->amp = amp; 2354 } else { 2355 struct anon_map *namp; 2356 struct anon_hdr *nahp; 2357 2358 ASSERT(svd->type == MAP_PRIVATE); 2359 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 2360 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 2361 namp->a_szc = seg->s_szc; 2362 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp, 2363 0, btop(seg->s_size), ANON_SLEEP); 2364 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index, 2365 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 2366 anon_release(amp->ahp, btop(amp->size)); 2367 svd->anon_index = 0; 2368 nsvd->anon_index = 0; 2369 amp->ahp = nahp; 2370 amp->size = seg->s_size; 2371 nsvd->amp = namp; 2372 } 2373 ANON_LOCK_EXIT(&->a_rwlock); 2374 } 2375 if (svd->swresv) { 2376 if (svd->flags & MAP_NORESERVE) { 2377 ASSERT(amp); 2378 oswresv = svd->swresv; 2379 svd->swresv = ptob(anon_pages(amp->ahp, 2380 svd->anon_index, btop(seg->s_size))); 2381 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 2382 nsvd->anon_index, btop(nseg->s_size))); 2383 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2384 anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv), 2385 seg->s_as->a_proc->p_zone); 2386 if (SEG_IS_PARTIAL_RESV(seg)) 2387 seg->s_as->a_resvsize -= oswresv - 2388 (svd->swresv + nsvd->swresv); 2389 } else { 2390 size_t unlen; 2391 2392 if (svd->pageswap) { 2393 oswresv = svd->swresv; 2394 svd->swresv = segvn_count_swap_by_vpages(seg); 2395 nsvd->swresv = segvn_count_swap_by_vpages(nseg); 2396 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2397 unlen = oswresv - (svd->swresv + nsvd->swresv); 2398 } else { 2399 if (seg->s_size + nseg->s_size + len != 2400 svd->swresv) { 2401 panic("segvn_unmap: cannot split " 2402 "swap reservation"); 2403 /*NOTREACHED*/ 2404 } 2405 svd->swresv = seg->s_size; 2406 nsvd->swresv = nseg->s_size; 2407 unlen = len; 2408 } 2409 anon_unresv_zone(unlen, 2410 seg->s_as->a_proc->p_zone); 2411 } 2412 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2413 seg, len, 0); 2414 } 2415 2416 return (0); /* I'm glad that's all over with! */ 2417 } 2418 2419 static void 2420 segvn_free(struct seg *seg) 2421 { 2422 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2423 pgcnt_t npages = seg_pages(seg); 2424 struct anon_map *amp; 2425 size_t len; 2426 2427 /* 2428 * We don't need any segment level locks for "segvn" data 2429 * since the address space is "write" locked. 2430 */ 2431 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); 2432 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2433 2434 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2435 2436 /* 2437 * Be sure to unlock pages. XXX Why do things get free'ed instead 2438 * of unmapped? XXX 2439 */ 2440 (void) segvn_lockop(seg, seg->s_base, seg->s_size, 2441 0, MC_UNLOCK, NULL, 0); 2442 2443 /* 2444 * Deallocate the vpage and anon pointers if necessary and possible. 2445 */ 2446 if (svd->vpage != NULL) { 2447 kmem_free(svd->vpage, vpgtob(npages)); 2448 svd->vpage = NULL; 2449 } 2450 if ((amp = svd->amp) != NULL) { 2451 /* 2452 * If there are no more references to this anon_map 2453 * structure, then deallocate the structure after freeing 2454 * up all the anon slot pointers that we can. 2455 */ 2456 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2457 ASSERT(amp->a_szc >= seg->s_szc); 2458 if (--amp->refcnt == 0) { 2459 if (svd->type == MAP_PRIVATE) { 2460 /* 2461 * Private - we only need to anon_free 2462 * the part that this segment refers to. 2463 */ 2464 if (seg->s_szc != 0) { 2465 anon_free_pages(amp->ahp, 2466 svd->anon_index, seg->s_size, 2467 seg->s_szc); 2468 } else { 2469 anon_free(amp->ahp, svd->anon_index, 2470 seg->s_size); 2471 } 2472 } else { 2473 2474 /* 2475 * Shared anon map is no longer in use. Before 2476 * freeing its pages purge all entries from 2477 * pcache that belong to this amp. 2478 */ 2479 ASSERT(svd->softlockcnt == 0); 2480 anonmap_purge(amp); 2481 2482 /* 2483 * Shared - anon_free the entire 2484 * anon_map's worth of stuff and 2485 * release any swap reservation. 2486 */ 2487 if (amp->a_szc != 0) { 2488 anon_shmap_free_pages(amp, 0, 2489 amp->size); 2490 } else { 2491 anon_free(amp->ahp, 0, amp->size); 2492 } 2493 if ((len = amp->swresv) != 0) { 2494 anon_unresv_zone(len, 2495 seg->s_as->a_proc->p_zone); 2496 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2497 "anon proc:%p %lu %u", seg, len, 0); 2498 } 2499 } 2500 svd->amp = NULL; 2501 ANON_LOCK_EXIT(&->a_rwlock); 2502 anonmap_free(amp); 2503 } else if (svd->type == MAP_PRIVATE) { 2504 /* 2505 * We had a private mapping which still has 2506 * a held anon_map so just free up all the 2507 * anon slot pointers that we were using. 2508 */ 2509 if (seg->s_szc != 0) { 2510 anon_free_pages(amp->ahp, svd->anon_index, 2511 seg->s_size, seg->s_szc); 2512 } else { 2513 anon_free(amp->ahp, svd->anon_index, 2514 seg->s_size); 2515 } 2516 ANON_LOCK_EXIT(&->a_rwlock); 2517 } else { 2518 ANON_LOCK_EXIT(&->a_rwlock); 2519 } 2520 } 2521 2522 /* 2523 * Release swap reservation. 2524 */ 2525 if ((len = svd->swresv) != 0) { 2526 anon_unresv_zone(svd->swresv, 2527 seg->s_as->a_proc->p_zone); 2528 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2529 seg, len, 0); 2530 if (SEG_IS_PARTIAL_RESV(seg)) 2531 seg->s_as->a_resvsize -= svd->swresv; 2532 svd->swresv = 0; 2533 } 2534 /* 2535 * Release claim on vnode, credentials, and finally free the 2536 * private data. 2537 */ 2538 if (svd->vp != NULL) { 2539 if (svd->type == MAP_SHARED) 2540 lgrp_shm_policy_fini(NULL, svd->vp); 2541 VN_RELE(svd->vp); 2542 svd->vp = NULL; 2543 } 2544 crfree(svd->cred); 2545 svd->pageprot = 0; 2546 svd->pageadvice = 0; 2547 svd->pageswap = 0; 2548 svd->cred = NULL; 2549 2550 /* 2551 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's 2552 * still working with this segment without holding as lock (in case 2553 * it's called by pcache async thread). 2554 */ 2555 ASSERT(svd->softlockcnt == 0); 2556 mutex_enter(&svd->segfree_syncmtx); 2557 mutex_exit(&svd->segfree_syncmtx); 2558 2559 seg->s_data = NULL; 2560 kmem_cache_free(segvn_cache, svd); 2561 } 2562 2563 /* 2564 * Do a F_SOFTUNLOCK call over the range requested. The range must have 2565 * already been F_SOFTLOCK'ed. 2566 * Caller must always match addr and len of a softunlock with a previous 2567 * softlock with exactly the same addr and len. 2568 */ 2569 static void 2570 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw) 2571 { 2572 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2573 page_t *pp; 2574 caddr_t adr; 2575 struct vnode *vp; 2576 u_offset_t offset; 2577 ulong_t anon_index = 0; 2578 struct anon_map *amp; 2579 struct anon *ap = NULL; 2580 2581 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 2582 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 2583 2584 if ((amp = svd->amp) != NULL) 2585 anon_index = svd->anon_index + seg_page(seg, addr); 2586 2587 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 2588 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2589 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie); 2590 } else { 2591 hat_unlock(seg->s_as->a_hat, addr, len); 2592 } 2593 for (adr = addr; adr < addr + len; adr += PAGESIZE) { 2594 if (amp != NULL) { 2595 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2596 if ((ap = anon_get_ptr(amp->ahp, anon_index++)) 2597 != NULL) { 2598 swap_xlate(ap, &vp, &offset); 2599 } else { 2600 vp = svd->vp; 2601 offset = svd->offset + 2602 (uintptr_t)(adr - seg->s_base); 2603 } 2604 ANON_LOCK_EXIT(&->a_rwlock); 2605 } else { 2606 vp = svd->vp; 2607 offset = svd->offset + 2608 (uintptr_t)(adr - seg->s_base); 2609 } 2610 2611 /* 2612 * Use page_find() instead of page_lookup() to 2613 * find the page since we know that it is locked. 2614 */ 2615 pp = page_find(vp, offset); 2616 if (pp == NULL) { 2617 panic( 2618 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx", 2619 (void *)adr, (void *)ap, (void *)vp, offset); 2620 /*NOTREACHED*/ 2621 } 2622 2623 if (rw == S_WRITE) { 2624 hat_setrefmod(pp); 2625 if (seg->s_as->a_vbits) 2626 hat_setstat(seg->s_as, adr, PAGESIZE, 2627 P_REF | P_MOD); 2628 } else if (rw != S_OTHER) { 2629 hat_setref(pp); 2630 if (seg->s_as->a_vbits) 2631 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF); 2632 } 2633 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2634 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset); 2635 page_unlock(pp); 2636 } 2637 ASSERT(svd->softlockcnt >= btop(len)); 2638 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) { 2639 /* 2640 * All SOFTLOCKS are gone. Wakeup any waiting 2641 * unmappers so they can try again to unmap. 2642 * Check for waiters first without the mutex 2643 * held so we don't always grab the mutex on 2644 * softunlocks. 2645 */ 2646 if (AS_ISUNMAPWAIT(seg->s_as)) { 2647 mutex_enter(&seg->s_as->a_contents); 2648 if (AS_ISUNMAPWAIT(seg->s_as)) { 2649 AS_CLRUNMAPWAIT(seg->s_as); 2650 cv_broadcast(&seg->s_as->a_cv); 2651 } 2652 mutex_exit(&seg->s_as->a_contents); 2653 } 2654 } 2655 } 2656 2657 #define PAGE_HANDLED ((page_t *)-1) 2658 2659 /* 2660 * Release all the pages in the NULL terminated ppp list 2661 * which haven't already been converted to PAGE_HANDLED. 2662 */ 2663 static void 2664 segvn_pagelist_rele(page_t **ppp) 2665 { 2666 for (; *ppp != NULL; ppp++) { 2667 if (*ppp != PAGE_HANDLED) 2668 page_unlock(*ppp); 2669 } 2670 } 2671 2672 static int stealcow = 1; 2673 2674 /* 2675 * Workaround for viking chip bug. See bug id 1220902. 2676 * To fix this down in pagefault() would require importing so 2677 * much as and segvn code as to be unmaintainable. 2678 */ 2679 int enable_mbit_wa = 0; 2680 2681 /* 2682 * Handles all the dirty work of getting the right 2683 * anonymous pages and loading up the translations. 2684 * This routine is called only from segvn_fault() 2685 * when looping over the range of addresses requested. 2686 * 2687 * The basic algorithm here is: 2688 * If this is an anon_zero case 2689 * Call anon_zero to allocate page 2690 * Load up translation 2691 * Return 2692 * endif 2693 * If this is an anon page 2694 * Use anon_getpage to get the page 2695 * else 2696 * Find page in pl[] list passed in 2697 * endif 2698 * If not a cow 2699 * Load up the translation to the page 2700 * return 2701 * endif 2702 * Call anon_private to handle cow 2703 * Load up (writable) translation to new page 2704 */ 2705 static faultcode_t 2706 segvn_faultpage( 2707 struct hat *hat, /* the hat to use for mapping */ 2708 struct seg *seg, /* seg_vn of interest */ 2709 caddr_t addr, /* address in as */ 2710 u_offset_t off, /* offset in vp */ 2711 struct vpage *vpage, /* pointer to vpage for vp, off */ 2712 page_t *pl[], /* object source page pointer */ 2713 uint_t vpprot, /* access allowed to object pages */ 2714 enum fault_type type, /* type of fault */ 2715 enum seg_rw rw, /* type of access at fault */ 2716 int brkcow) /* we may need to break cow */ 2717 { 2718 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2719 page_t *pp, **ppp; 2720 uint_t pageflags = 0; 2721 page_t *anon_pl[1 + 1]; 2722 page_t *opp = NULL; /* original page */ 2723 uint_t prot; 2724 int err; 2725 int cow; 2726 int claim; 2727 int steal = 0; 2728 ulong_t anon_index = 0; 2729 struct anon *ap, *oldap; 2730 struct anon_map *amp; 2731 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 2732 int anon_lock = 0; 2733 anon_sync_obj_t cookie; 2734 2735 if (svd->flags & MAP_TEXT) { 2736 hat_flag |= HAT_LOAD_TEXT; 2737 } 2738 2739 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 2740 ASSERT(seg->s_szc == 0); 2741 ASSERT(svd->tr_state != SEGVN_TR_INIT); 2742 2743 /* 2744 * Initialize protection value for this page. 2745 * If we have per page protection values check it now. 2746 */ 2747 if (svd->pageprot) { 2748 uint_t protchk; 2749 2750 switch (rw) { 2751 case S_READ: 2752 protchk = PROT_READ; 2753 break; 2754 case S_WRITE: 2755 protchk = PROT_WRITE; 2756 break; 2757 case S_EXEC: 2758 protchk = PROT_EXEC; 2759 break; 2760 case S_OTHER: 2761 default: 2762 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 2763 break; 2764 } 2765 2766 prot = VPP_PROT(vpage); 2767 if ((prot & protchk) == 0) 2768 return (FC_PROT); /* illegal access type */ 2769 } else { 2770 prot = svd->prot; 2771 } 2772 2773 if (type == F_SOFTLOCK) { 2774 atomic_inc_ulong((ulong_t *)&svd->softlockcnt); 2775 } 2776 2777 /* 2778 * Always acquire the anon array lock to prevent 2 threads from 2779 * allocating separate anon slots for the same "addr". 2780 */ 2781 2782 if ((amp = svd->amp) != NULL) { 2783 ASSERT(RW_READ_HELD(&->a_rwlock)); 2784 anon_index = svd->anon_index + seg_page(seg, addr); 2785 anon_array_enter(amp, anon_index, &cookie); 2786 anon_lock = 1; 2787 } 2788 2789 if (svd->vp == NULL && amp != NULL) { 2790 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) { 2791 /* 2792 * Allocate a (normally) writable anonymous page of 2793 * zeroes. If no advance reservations, reserve now. 2794 */ 2795 if (svd->flags & MAP_NORESERVE) { 2796 if (anon_resv_zone(ptob(1), 2797 seg->s_as->a_proc->p_zone)) { 2798 atomic_add_long(&svd->swresv, ptob(1)); 2799 atomic_add_long(&seg->s_as->a_resvsize, 2800 ptob(1)); 2801 } else { 2802 err = ENOMEM; 2803 goto out; 2804 } 2805 } 2806 if ((pp = anon_zero(seg, addr, &ap, 2807 svd->cred)) == NULL) { 2808 err = ENOMEM; 2809 goto out; /* out of swap space */ 2810 } 2811 /* 2812 * Re-acquire the anon_map lock and 2813 * initialize the anon array entry. 2814 */ 2815 (void) anon_set_ptr(amp->ahp, anon_index, ap, 2816 ANON_SLEEP); 2817 2818 ASSERT(pp->p_szc == 0); 2819 2820 /* 2821 * Handle pages that have been marked for migration 2822 */ 2823 if (lgrp_optimizations()) 2824 page_migrate(seg, addr, &pp, 1); 2825 2826 if (enable_mbit_wa) { 2827 if (rw == S_WRITE) 2828 hat_setmod(pp); 2829 else if (!hat_ismod(pp)) 2830 prot &= ~PROT_WRITE; 2831 } 2832 /* 2833 * If AS_PAGLCK is set in a_flags (via memcntl(2) 2834 * with MC_LOCKAS, MCL_FUTURE) and this is a 2835 * MAP_NORESERVE segment, we may need to 2836 * permanently lock the page as it is being faulted 2837 * for the first time. The following text applies 2838 * only to MAP_NORESERVE segments: 2839 * 2840 * As per memcntl(2), if this segment was created 2841 * after MCL_FUTURE was applied (a "future" 2842 * segment), its pages must be locked. If this 2843 * segment existed at MCL_FUTURE application (a 2844 * "past" segment), the interface is unclear. 2845 * 2846 * We decide to lock only if vpage is present: 2847 * 2848 * - "future" segments will have a vpage array (see 2849 * as_map), and so will be locked as required 2850 * 2851 * - "past" segments may not have a vpage array, 2852 * depending on whether events (such as 2853 * mprotect) have occurred. Locking if vpage 2854 * exists will preserve legacy behavior. Not 2855 * locking if vpage is absent, will not break 2856 * the interface or legacy behavior. Note that 2857 * allocating vpage here if it's absent requires 2858 * upgrading the segvn reader lock, the cost of 2859 * which does not seem worthwhile. 2860 * 2861 * Usually testing and setting VPP_ISPPLOCK and 2862 * VPP_SETPPLOCK requires holding the segvn lock as 2863 * writer, but in this case all readers are 2864 * serializing on the anon array lock. 2865 */ 2866 if (AS_ISPGLCK(seg->s_as) && vpage != NULL && 2867 (svd->flags & MAP_NORESERVE) && 2868 !VPP_ISPPLOCK(vpage)) { 2869 proc_t *p = seg->s_as->a_proc; 2870 ASSERT(svd->type == MAP_PRIVATE); 2871 mutex_enter(&p->p_lock); 2872 if (rctl_incr_locked_mem(p, NULL, PAGESIZE, 2873 1) == 0) { 2874 claim = VPP_PROT(vpage) & PROT_WRITE; 2875 if (page_pp_lock(pp, claim, 0)) { 2876 VPP_SETPPLOCK(vpage); 2877 } else { 2878 rctl_decr_locked_mem(p, NULL, 2879 PAGESIZE, 1); 2880 } 2881 } 2882 mutex_exit(&p->p_lock); 2883 } 2884 2885 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2886 hat_memload(hat, addr, pp, prot, hat_flag); 2887 2888 if (!(hat_flag & HAT_LOAD_LOCK)) 2889 page_unlock(pp); 2890 2891 anon_array_exit(&cookie); 2892 return (0); 2893 } 2894 } 2895 2896 /* 2897 * Obtain the page structure via anon_getpage() if it is 2898 * a private copy of an object (the result of a previous 2899 * copy-on-write). 2900 */ 2901 if (amp != NULL) { 2902 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) { 2903 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE, 2904 seg, addr, rw, svd->cred); 2905 if (err) 2906 goto out; 2907 2908 if (svd->type == MAP_SHARED) { 2909 /* 2910 * If this is a shared mapping to an 2911 * anon_map, then ignore the write 2912 * permissions returned by anon_getpage(). 2913 * They apply to the private mappings 2914 * of this anon_map. 2915 */ 2916 vpprot |= PROT_WRITE; 2917 } 2918 opp = anon_pl[0]; 2919 } 2920 } 2921 2922 /* 2923 * Search the pl[] list passed in if it is from the 2924 * original object (i.e., not a private copy). 2925 */ 2926 if (opp == NULL) { 2927 /* 2928 * Find original page. We must be bringing it in 2929 * from the list in pl[]. 2930 */ 2931 for (ppp = pl; (opp = *ppp) != NULL; ppp++) { 2932 if (opp == PAGE_HANDLED) 2933 continue; 2934 ASSERT(opp->p_vnode == svd->vp); /* XXX */ 2935 if (opp->p_offset == off) 2936 break; 2937 } 2938 if (opp == NULL) { 2939 panic("segvn_faultpage not found"); 2940 /*NOTREACHED*/ 2941 } 2942 *ppp = PAGE_HANDLED; 2943 2944 } 2945 2946 ASSERT(PAGE_LOCKED(opp)); 2947 2948 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2949 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0); 2950 2951 /* 2952 * The fault is treated as a copy-on-write fault if a 2953 * write occurs on a private segment and the object 2954 * page (i.e., mapping) is write protected. We assume 2955 * that fatal protection checks have already been made. 2956 */ 2957 2958 if (brkcow) { 2959 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2960 cow = !(vpprot & PROT_WRITE); 2961 } else if (svd->tr_state == SEGVN_TR_ON) { 2962 /* 2963 * If we are doing text replication COW on first touch. 2964 */ 2965 ASSERT(amp != NULL); 2966 ASSERT(svd->vp != NULL); 2967 ASSERT(rw != S_WRITE); 2968 cow = (ap == NULL); 2969 } else { 2970 cow = 0; 2971 } 2972 2973 /* 2974 * If not a copy-on-write case load the translation 2975 * and return. 2976 */ 2977 if (cow == 0) { 2978 2979 /* 2980 * Handle pages that have been marked for migration 2981 */ 2982 if (lgrp_optimizations()) 2983 page_migrate(seg, addr, &opp, 1); 2984 2985 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) { 2986 if (rw == S_WRITE) 2987 hat_setmod(opp); 2988 else if (rw != S_OTHER && !hat_ismod(opp)) 2989 prot &= ~PROT_WRITE; 2990 } 2991 2992 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 2993 (!svd->pageprot && svd->prot == (prot & vpprot))); 2994 ASSERT(amp == NULL || 2995 svd->rcookie == HAT_INVALID_REGION_COOKIE); 2996 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag, 2997 svd->rcookie); 2998 2999 if (!(hat_flag & HAT_LOAD_LOCK)) 3000 page_unlock(opp); 3001 3002 if (anon_lock) { 3003 anon_array_exit(&cookie); 3004 } 3005 return (0); 3006 } 3007 3008 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 3009 3010 hat_setref(opp); 3011 3012 ASSERT(amp != NULL && anon_lock); 3013 3014 /* 3015 * Steal the page only if it isn't a private page 3016 * since stealing a private page is not worth the effort. 3017 */ 3018 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) 3019 steal = 1; 3020 3021 /* 3022 * Steal the original page if the following conditions are true: 3023 * 3024 * We are low on memory, the page is not private, page is not large, 3025 * not shared, not modified, not `locked' or if we have it `locked' 3026 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies 3027 * that the page is not shared) and if it doesn't have any 3028 * translations. page_struct_lock isn't needed to look at p_cowcnt 3029 * and p_lckcnt because we first get exclusive lock on page. 3030 */ 3031 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD); 3032 3033 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 && 3034 page_tryupgrade(opp) && !hat_ismod(opp) && 3035 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) || 3036 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 && 3037 vpage != NULL && VPP_ISPPLOCK(vpage)))) { 3038 /* 3039 * Check if this page has other translations 3040 * after unloading our translation. 3041 */ 3042 if (hat_page_is_mapped(opp)) { 3043 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 3044 hat_unload(seg->s_as->a_hat, addr, PAGESIZE, 3045 HAT_UNLOAD); 3046 } 3047 3048 /* 3049 * hat_unload() might sync back someone else's recent 3050 * modification, so check again. 3051 */ 3052 if (!hat_ismod(opp) && !hat_page_is_mapped(opp)) 3053 pageflags |= STEAL_PAGE; 3054 } 3055 3056 /* 3057 * If we have a vpage pointer, see if it indicates that we have 3058 * ``locked'' the page we map -- if so, tell anon_private to 3059 * transfer the locking resource to the new page. 3060 * 3061 * See Statement at the beginning of segvn_lockop regarding 3062 * the way lockcnts/cowcnts are handled during COW. 3063 * 3064 */ 3065 if (vpage != NULL && VPP_ISPPLOCK(vpage)) 3066 pageflags |= LOCK_PAGE; 3067 3068 /* 3069 * Allocate a private page and perform the copy. 3070 * For MAP_NORESERVE reserve swap space now, unless this 3071 * is a cow fault on an existing anon page in which case 3072 * MAP_NORESERVE will have made advance reservations. 3073 */ 3074 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) { 3075 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) { 3076 atomic_add_long(&svd->swresv, ptob(1)); 3077 atomic_add_long(&seg->s_as->a_resvsize, ptob(1)); 3078 } else { 3079 page_unlock(opp); 3080 err = ENOMEM; 3081 goto out; 3082 } 3083 } 3084 oldap = ap; 3085 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred); 3086 if (pp == NULL) { 3087 err = ENOMEM; /* out of swap space */ 3088 goto out; 3089 } 3090 3091 /* 3092 * If we copied away from an anonymous page, then 3093 * we are one step closer to freeing up an anon slot. 3094 * 3095 * NOTE: The original anon slot must be released while 3096 * holding the "anon_map" lock. This is necessary to prevent 3097 * other threads from obtaining a pointer to the anon slot 3098 * which may be freed if its "refcnt" is 1. 3099 */ 3100 if (oldap != NULL) 3101 anon_decref(oldap); 3102 3103 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 3104 3105 /* 3106 * Handle pages that have been marked for migration 3107 */ 3108 if (lgrp_optimizations()) 3109 page_migrate(seg, addr, &pp, 1); 3110 3111 ASSERT(pp->p_szc == 0); 3112 3113 ASSERT(!IS_VMODSORT(pp->p_vnode)); 3114 if (enable_mbit_wa) { 3115 if (rw == S_WRITE) 3116 hat_setmod(pp); 3117 else if (!hat_ismod(pp)) 3118 prot &= ~PROT_WRITE; 3119 } 3120 3121 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 3122 hat_memload(hat, addr, pp, prot, hat_flag); 3123 3124 if (!(hat_flag & HAT_LOAD_LOCK)) 3125 page_unlock(pp); 3126 3127 ASSERT(anon_lock); 3128 anon_array_exit(&cookie); 3129 return (0); 3130 out: 3131 if (anon_lock) 3132 anon_array_exit(&cookie); 3133 3134 if (type == F_SOFTLOCK) { 3135 atomic_dec_ulong((ulong_t *)&svd->softlockcnt); 3136 } 3137 return (FC_MAKE_ERR(err)); 3138 } 3139 3140 /* 3141 * relocate a bunch of smaller targ pages into one large repl page. all targ 3142 * pages must be complete pages smaller than replacement pages. 3143 * it's assumed that no page's szc can change since they are all PAGESIZE or 3144 * complete large pages locked SHARED. 3145 */ 3146 static void 3147 segvn_relocate_pages(page_t **targ, page_t *replacement) 3148 { 3149 page_t *pp; 3150 pgcnt_t repl_npgs, curnpgs; 3151 pgcnt_t i; 3152 uint_t repl_szc = replacement->p_szc; 3153 page_t *first_repl = replacement; 3154 page_t *repl; 3155 spgcnt_t npgs; 3156 3157 VM_STAT_ADD(segvnvmstats.relocatepages[0]); 3158 3159 ASSERT(repl_szc != 0); 3160 npgs = repl_npgs = page_get_pagecnt(repl_szc); 3161 3162 i = 0; 3163 while (repl_npgs) { 3164 spgcnt_t nreloc; 3165 int err; 3166 ASSERT(replacement != NULL); 3167 pp = targ[i]; 3168 ASSERT(pp->p_szc < repl_szc); 3169 ASSERT(PAGE_EXCL(pp)); 3170 ASSERT(!PP_ISFREE(pp)); 3171 curnpgs = page_get_pagecnt(pp->p_szc); 3172 if (curnpgs == 1) { 3173 VM_STAT_ADD(segvnvmstats.relocatepages[1]); 3174 repl = replacement; 3175 page_sub(&replacement, repl); 3176 ASSERT(PAGE_EXCL(repl)); 3177 ASSERT(!PP_ISFREE(repl)); 3178 ASSERT(repl->p_szc == repl_szc); 3179 } else { 3180 page_t *repl_savepp; 3181 int j; 3182 VM_STAT_ADD(segvnvmstats.relocatepages[2]); 3183 repl_savepp = replacement; 3184 for (j = 0; j < curnpgs; j++) { 3185 repl = replacement; 3186 page_sub(&replacement, repl); 3187 ASSERT(PAGE_EXCL(repl)); 3188 ASSERT(!PP_ISFREE(repl)); 3189 ASSERT(repl->p_szc == repl_szc); 3190 ASSERT(page_pptonum(targ[i + j]) == 3191 page_pptonum(targ[i]) + j); 3192 } 3193 repl = repl_savepp; 3194 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs)); 3195 } 3196 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL); 3197 if (err || nreloc != curnpgs) { 3198 panic("segvn_relocate_pages: " 3199 "page_relocate failed err=%d curnpgs=%ld " 3200 "nreloc=%ld", err, curnpgs, nreloc); 3201 } 3202 ASSERT(curnpgs <= repl_npgs); 3203 repl_npgs -= curnpgs; 3204 i += curnpgs; 3205 } 3206 ASSERT(replacement == NULL); 3207 3208 repl = first_repl; 3209 repl_npgs = npgs; 3210 for (i = 0; i < repl_npgs; i++) { 3211 ASSERT(PAGE_EXCL(repl)); 3212 ASSERT(!PP_ISFREE(repl)); 3213 targ[i] = repl; 3214 page_downgrade(targ[i]); 3215 repl++; 3216 } 3217 } 3218 3219 /* 3220 * Check if all pages in ppa array are complete smaller than szc pages and 3221 * their roots will still be aligned relative to their current size if the 3222 * entire ppa array is relocated into one szc page. If these conditions are 3223 * not met return 0. 3224 * 3225 * If all pages are properly aligned attempt to upgrade their locks 3226 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0. 3227 * upgrdfail was set to 0 by caller. 3228 * 3229 * Return 1 if all pages are aligned and locked exclusively. 3230 * 3231 * If all pages in ppa array happen to be physically contiguous to make one 3232 * szc page and all exclusive locks are successfully obtained promote the page 3233 * size to szc and set *pszc to szc. Return 1 with pages locked shared. 3234 */ 3235 static int 3236 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc) 3237 { 3238 page_t *pp; 3239 pfn_t pfn; 3240 pgcnt_t totnpgs = page_get_pagecnt(szc); 3241 pfn_t first_pfn; 3242 int contig = 1; 3243 pgcnt_t i; 3244 pgcnt_t j; 3245 uint_t curszc; 3246 pgcnt_t curnpgs; 3247 int root = 0; 3248 3249 ASSERT(szc > 0); 3250 3251 VM_STAT_ADD(segvnvmstats.fullszcpages[0]); 3252 3253 for (i = 0; i < totnpgs; i++) { 3254 pp = ppa[i]; 3255 ASSERT(PAGE_SHARED(pp)); 3256 ASSERT(!PP_ISFREE(pp)); 3257 pfn = page_pptonum(pp); 3258 if (i == 0) { 3259 if (!IS_P2ALIGNED(pfn, totnpgs)) { 3260 contig = 0; 3261 } else { 3262 first_pfn = pfn; 3263 } 3264 } else if (contig && pfn != first_pfn + i) { 3265 contig = 0; 3266 } 3267 if (pp->p_szc == 0) { 3268 if (root) { 3269 VM_STAT_ADD(segvnvmstats.fullszcpages[1]); 3270 return (0); 3271 } 3272 } else if (!root) { 3273 if ((curszc = pp->p_szc) >= szc) { 3274 VM_STAT_ADD(segvnvmstats.fullszcpages[2]); 3275 return (0); 3276 } 3277 if (curszc == 0) { 3278 /* 3279 * p_szc changed means we don't have all pages 3280 * locked. return failure. 3281 */ 3282 VM_STAT_ADD(segvnvmstats.fullszcpages[3]); 3283 return (0); 3284 } 3285 curnpgs = page_get_pagecnt(curszc); 3286 if (!IS_P2ALIGNED(pfn, curnpgs) || 3287 !IS_P2ALIGNED(i, curnpgs)) { 3288 VM_STAT_ADD(segvnvmstats.fullszcpages[4]); 3289 return (0); 3290 } 3291 root = 1; 3292 } else { 3293 ASSERT(i > 0); 3294 VM_STAT_ADD(segvnvmstats.fullszcpages[5]); 3295 if (pp->p_szc != curszc) { 3296 VM_STAT_ADD(segvnvmstats.fullszcpages[6]); 3297 return (0); 3298 } 3299 if (pfn - 1 != page_pptonum(ppa[i - 1])) { 3300 panic("segvn_full_szcpages: " 3301 "large page not physically contiguous"); 3302 } 3303 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) { 3304 root = 0; 3305 } 3306 } 3307 } 3308 3309 for (i = 0; i < totnpgs; i++) { 3310 ASSERT(ppa[i]->p_szc < szc); 3311 if (!page_tryupgrade(ppa[i])) { 3312 for (j = 0; j < i; j++) { 3313 page_downgrade(ppa[j]); 3314 } 3315 *pszc = ppa[i]->p_szc; 3316 *upgrdfail = 1; 3317 VM_STAT_ADD(segvnvmstats.fullszcpages[7]); 3318 return (0); 3319 } 3320 } 3321 3322 /* 3323 * When a page is put a free cachelist its szc is set to 0. if file 3324 * system reclaimed pages from cachelist targ pages will be physically 3325 * contiguous with 0 p_szc. in this case just upgrade szc of targ 3326 * pages without any relocations. 3327 * To avoid any hat issues with previous small mappings 3328 * hat_pageunload() the target pages first. 3329 */ 3330 if (contig) { 3331 VM_STAT_ADD(segvnvmstats.fullszcpages[8]); 3332 for (i = 0; i < totnpgs; i++) { 3333 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD); 3334 } 3335 for (i = 0; i < totnpgs; i++) { 3336 ppa[i]->p_szc = szc; 3337 } 3338 for (i = 0; i < totnpgs; i++) { 3339 ASSERT(PAGE_EXCL(ppa[i])); 3340 page_downgrade(ppa[i]); 3341 } 3342 if (pszc != NULL) { 3343 *pszc = szc; 3344 } 3345 } 3346 VM_STAT_ADD(segvnvmstats.fullszcpages[9]); 3347 return (1); 3348 } 3349 3350 /* 3351 * Create physically contiguous pages for [vp, off] - [vp, off + 3352 * page_size(szc)) range and for private segment return them in ppa array. 3353 * Pages are created either via IO or relocations. 3354 * 3355 * Return 1 on success and 0 on failure. 3356 * 3357 * If physically contiguous pages already exist for this range return 1 without 3358 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa 3359 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE(). 3360 */ 3361 3362 static int 3363 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off, 3364 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc, 3365 int *downsize) 3366 { 3367 page_t *pplist = *ppplist; 3368 size_t pgsz = page_get_pagesize(szc); 3369 pgcnt_t pages = btop(pgsz); 3370 ulong_t start_off = off; 3371 u_offset_t eoff = off + pgsz; 3372 spgcnt_t nreloc; 3373 u_offset_t io_off = off; 3374 size_t io_len; 3375 page_t *io_pplist = NULL; 3376 page_t *done_pplist = NULL; 3377 pgcnt_t pgidx = 0; 3378 page_t *pp; 3379 page_t *newpp; 3380 page_t *targpp; 3381 int io_err = 0; 3382 int i; 3383 pfn_t pfn; 3384 ulong_t ppages; 3385 page_t *targ_pplist = NULL; 3386 page_t *repl_pplist = NULL; 3387 page_t *tmp_pplist; 3388 int nios = 0; 3389 uint_t pszc; 3390 struct vattr va; 3391 3392 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]); 3393 3394 ASSERT(szc != 0); 3395 ASSERT(pplist->p_szc == szc); 3396 3397 /* 3398 * downsize will be set to 1 only if we fail to lock pages. this will 3399 * allow subsequent faults to try to relocate the page again. If we 3400 * fail due to misalignment don't downsize and let the caller map the 3401 * whole region with small mappings to avoid more faults into the area 3402 * where we can't get large pages anyway. 3403 */ 3404 *downsize = 0; 3405 3406 while (off < eoff) { 3407 newpp = pplist; 3408 ASSERT(newpp != NULL); 3409 ASSERT(PAGE_EXCL(newpp)); 3410 ASSERT(!PP_ISFREE(newpp)); 3411 /* 3412 * we pass NULL for nrelocp to page_lookup_create() 3413 * so that it doesn't relocate. We relocate here 3414 * later only after we make sure we can lock all 3415 * pages in the range we handle and they are all 3416 * aligned. 3417 */ 3418 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0); 3419 ASSERT(pp != NULL); 3420 ASSERT(!PP_ISFREE(pp)); 3421 ASSERT(pp->p_vnode == vp); 3422 ASSERT(pp->p_offset == off); 3423 if (pp == newpp) { 3424 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]); 3425 page_sub(&pplist, pp); 3426 ASSERT(PAGE_EXCL(pp)); 3427 ASSERT(page_iolock_assert(pp)); 3428 page_list_concat(&io_pplist, &pp); 3429 off += PAGESIZE; 3430 continue; 3431 } 3432 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]); 3433 pfn = page_pptonum(pp); 3434 pszc = pp->p_szc; 3435 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL && 3436 IS_P2ALIGNED(pfn, pages)) { 3437 ASSERT(repl_pplist == NULL); 3438 ASSERT(done_pplist == NULL); 3439 ASSERT(pplist == *ppplist); 3440 page_unlock(pp); 3441 page_free_replacement_page(pplist); 3442 page_create_putback(pages); 3443 *ppplist = NULL; 3444 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]); 3445 return (1); 3446 } 3447 if (pszc >= szc) { 3448 page_unlock(pp); 3449 segvn_faultvnmpss_align_err1++; 3450 goto out; 3451 } 3452 ppages = page_get_pagecnt(pszc); 3453 if (!IS_P2ALIGNED(pfn, ppages)) { 3454 ASSERT(pszc > 0); 3455 /* 3456 * sizing down to pszc won't help. 3457 */ 3458 page_unlock(pp); 3459 segvn_faultvnmpss_align_err2++; 3460 goto out; 3461 } 3462 pfn = page_pptonum(newpp); 3463 if (!IS_P2ALIGNED(pfn, ppages)) { 3464 ASSERT(pszc > 0); 3465 /* 3466 * sizing down to pszc won't help. 3467 */ 3468 page_unlock(pp); 3469 segvn_faultvnmpss_align_err3++; 3470 goto out; 3471 } 3472 if (!PAGE_EXCL(pp)) { 3473 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]); 3474 page_unlock(pp); 3475 *downsize = 1; 3476 *ret_pszc = pp->p_szc; 3477 goto out; 3478 } 3479 targpp = pp; 3480 if (io_pplist != NULL) { 3481 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]); 3482 io_len = off - io_off; 3483 /* 3484 * Some file systems like NFS don't check EOF 3485 * conditions in VOP_PAGEIO(). Check it here 3486 * now that pages are locked SE_EXCL. Any file 3487 * truncation will wait until the pages are 3488 * unlocked so no need to worry that file will 3489 * be truncated after we check its size here. 3490 * XXX fix NFS to remove this check. 3491 */ 3492 va.va_mask = AT_SIZE; 3493 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) { 3494 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]); 3495 page_unlock(targpp); 3496 goto out; 3497 } 3498 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3499 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]); 3500 *downsize = 1; 3501 *ret_pszc = 0; 3502 page_unlock(targpp); 3503 goto out; 3504 } 3505 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3506 B_READ, svd->cred, NULL); 3507 if (io_err) { 3508 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]); 3509 page_unlock(targpp); 3510 if (io_err == EDEADLK) { 3511 segvn_vmpss_pageio_deadlk_err++; 3512 } 3513 goto out; 3514 } 3515 nios++; 3516 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]); 3517 while (io_pplist != NULL) { 3518 pp = io_pplist; 3519 page_sub(&io_pplist, pp); 3520 ASSERT(page_iolock_assert(pp)); 3521 page_io_unlock(pp); 3522 pgidx = (pp->p_offset - start_off) >> 3523 PAGESHIFT; 3524 ASSERT(pgidx < pages); 3525 ppa[pgidx] = pp; 3526 page_list_concat(&done_pplist, &pp); 3527 } 3528 } 3529 pp = targpp; 3530 ASSERT(PAGE_EXCL(pp)); 3531 ASSERT(pp->p_szc <= pszc); 3532 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) { 3533 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]); 3534 page_unlock(pp); 3535 *downsize = 1; 3536 *ret_pszc = pp->p_szc; 3537 goto out; 3538 } 3539 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]); 3540 /* 3541 * page szc chould have changed before the entire group was 3542 * locked. reread page szc. 3543 */ 3544 pszc = pp->p_szc; 3545 ppages = page_get_pagecnt(pszc); 3546 3547 /* link just the roots */ 3548 page_list_concat(&targ_pplist, &pp); 3549 page_sub(&pplist, newpp); 3550 page_list_concat(&repl_pplist, &newpp); 3551 off += PAGESIZE; 3552 while (--ppages != 0) { 3553 newpp = pplist; 3554 page_sub(&pplist, newpp); 3555 off += PAGESIZE; 3556 } 3557 io_off = off; 3558 } 3559 if (io_pplist != NULL) { 3560 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]); 3561 io_len = eoff - io_off; 3562 va.va_mask = AT_SIZE; 3563 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) { 3564 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]); 3565 goto out; 3566 } 3567 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3568 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]); 3569 *downsize = 1; 3570 *ret_pszc = 0; 3571 goto out; 3572 } 3573 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3574 B_READ, svd->cred, NULL); 3575 if (io_err) { 3576 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]); 3577 if (io_err == EDEADLK) { 3578 segvn_vmpss_pageio_deadlk_err++; 3579 } 3580 goto out; 3581 } 3582 nios++; 3583 while (io_pplist != NULL) { 3584 pp = io_pplist; 3585 page_sub(&io_pplist, pp); 3586 ASSERT(page_iolock_assert(pp)); 3587 page_io_unlock(pp); 3588 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3589 ASSERT(pgidx < pages); 3590 ppa[pgidx] = pp; 3591 } 3592 } 3593 /* 3594 * we're now bound to succeed or panic. 3595 * remove pages from done_pplist. it's not needed anymore. 3596 */ 3597 while (done_pplist != NULL) { 3598 pp = done_pplist; 3599 page_sub(&done_pplist, pp); 3600 } 3601 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]); 3602 ASSERT(pplist == NULL); 3603 *ppplist = NULL; 3604 while (targ_pplist != NULL) { 3605 int ret; 3606 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]); 3607 ASSERT(repl_pplist); 3608 pp = targ_pplist; 3609 page_sub(&targ_pplist, pp); 3610 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3611 newpp = repl_pplist; 3612 page_sub(&repl_pplist, newpp); 3613 #ifdef DEBUG 3614 pfn = page_pptonum(pp); 3615 pszc = pp->p_szc; 3616 ppages = page_get_pagecnt(pszc); 3617 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3618 pfn = page_pptonum(newpp); 3619 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3620 ASSERT(P2PHASE(pfn, pages) == pgidx); 3621 #endif 3622 nreloc = 0; 3623 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL); 3624 if (ret != 0 || nreloc == 0) { 3625 panic("segvn_fill_vp_pages: " 3626 "page_relocate failed"); 3627 } 3628 pp = newpp; 3629 while (nreloc-- != 0) { 3630 ASSERT(PAGE_EXCL(pp)); 3631 ASSERT(pp->p_vnode == vp); 3632 ASSERT(pgidx == 3633 ((pp->p_offset - start_off) >> PAGESHIFT)); 3634 ppa[pgidx++] = pp; 3635 pp++; 3636 } 3637 } 3638 3639 if (svd->type == MAP_PRIVATE) { 3640 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]); 3641 for (i = 0; i < pages; i++) { 3642 ASSERT(ppa[i] != NULL); 3643 ASSERT(PAGE_EXCL(ppa[i])); 3644 ASSERT(ppa[i]->p_vnode == vp); 3645 ASSERT(ppa[i]->p_offset == 3646 start_off + (i << PAGESHIFT)); 3647 page_downgrade(ppa[i]); 3648 } 3649 ppa[pages] = NULL; 3650 } else { 3651 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]); 3652 /* 3653 * the caller will still call VOP_GETPAGE() for shared segments 3654 * to check FS write permissions. For private segments we map 3655 * file read only anyway. so no VOP_GETPAGE is needed. 3656 */ 3657 for (i = 0; i < pages; i++) { 3658 ASSERT(ppa[i] != NULL); 3659 ASSERT(PAGE_EXCL(ppa[i])); 3660 ASSERT(ppa[i]->p_vnode == vp); 3661 ASSERT(ppa[i]->p_offset == 3662 start_off + (i << PAGESHIFT)); 3663 page_unlock(ppa[i]); 3664 } 3665 ppa[0] = NULL; 3666 } 3667 3668 return (1); 3669 out: 3670 /* 3671 * Do the cleanup. Unlock target pages we didn't relocate. They are 3672 * linked on targ_pplist by root pages. reassemble unused replacement 3673 * and io pages back to pplist. 3674 */ 3675 if (io_pplist != NULL) { 3676 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]); 3677 pp = io_pplist; 3678 do { 3679 ASSERT(pp->p_vnode == vp); 3680 ASSERT(pp->p_offset == io_off); 3681 ASSERT(page_iolock_assert(pp)); 3682 page_io_unlock(pp); 3683 page_hashout(pp, NULL); 3684 io_off += PAGESIZE; 3685 } while ((pp = pp->p_next) != io_pplist); 3686 page_list_concat(&io_pplist, &pplist); 3687 pplist = io_pplist; 3688 } 3689 tmp_pplist = NULL; 3690 while (targ_pplist != NULL) { 3691 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]); 3692 pp = targ_pplist; 3693 ASSERT(PAGE_EXCL(pp)); 3694 page_sub(&targ_pplist, pp); 3695 3696 pszc = pp->p_szc; 3697 ppages = page_get_pagecnt(pszc); 3698 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3699 3700 if (pszc != 0) { 3701 group_page_unlock(pp); 3702 } 3703 page_unlock(pp); 3704 3705 pp = repl_pplist; 3706 ASSERT(pp != NULL); 3707 ASSERT(PAGE_EXCL(pp)); 3708 ASSERT(pp->p_szc == szc); 3709 page_sub(&repl_pplist, pp); 3710 3711 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3712 3713 /* relink replacement page */ 3714 page_list_concat(&tmp_pplist, &pp); 3715 while (--ppages != 0) { 3716 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]); 3717 pp++; 3718 ASSERT(PAGE_EXCL(pp)); 3719 ASSERT(pp->p_szc == szc); 3720 page_list_concat(&tmp_pplist, &pp); 3721 } 3722 } 3723 if (tmp_pplist != NULL) { 3724 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]); 3725 page_list_concat(&tmp_pplist, &pplist); 3726 pplist = tmp_pplist; 3727 } 3728 /* 3729 * at this point all pages are either on done_pplist or 3730 * pplist. They can't be all on done_pplist otherwise 3731 * we'd've been done. 3732 */ 3733 ASSERT(pplist != NULL); 3734 if (nios != 0) { 3735 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]); 3736 pp = pplist; 3737 do { 3738 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]); 3739 ASSERT(pp->p_szc == szc); 3740 ASSERT(PAGE_EXCL(pp)); 3741 ASSERT(pp->p_vnode != vp); 3742 pp->p_szc = 0; 3743 } while ((pp = pp->p_next) != pplist); 3744 3745 pp = done_pplist; 3746 do { 3747 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]); 3748 ASSERT(pp->p_szc == szc); 3749 ASSERT(PAGE_EXCL(pp)); 3750 ASSERT(pp->p_vnode == vp); 3751 pp->p_szc = 0; 3752 } while ((pp = pp->p_next) != done_pplist); 3753 3754 while (pplist != NULL) { 3755 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]); 3756 pp = pplist; 3757 page_sub(&pplist, pp); 3758 page_free(pp, 0); 3759 } 3760 3761 while (done_pplist != NULL) { 3762 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]); 3763 pp = done_pplist; 3764 page_sub(&done_pplist, pp); 3765 page_unlock(pp); 3766 } 3767 *ppplist = NULL; 3768 return (0); 3769 } 3770 ASSERT(pplist == *ppplist); 3771 if (io_err) { 3772 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]); 3773 /* 3774 * don't downsize on io error. 3775 * see if vop_getpage succeeds. 3776 * pplist may still be used in this case 3777 * for relocations. 3778 */ 3779 return (0); 3780 } 3781 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]); 3782 page_free_replacement_page(pplist); 3783 page_create_putback(pages); 3784 *ppplist = NULL; 3785 return (0); 3786 } 3787 3788 int segvn_anypgsz = 0; 3789 3790 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \ 3791 if ((type) == F_SOFTLOCK) { \ 3792 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \ 3793 -(pages)); \ 3794 } 3795 3796 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \ 3797 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \ 3798 if ((rw) == S_WRITE) { \ 3799 for (i = 0; i < (pages); i++) { \ 3800 ASSERT((ppa)[i]->p_vnode == \ 3801 (ppa)[0]->p_vnode); \ 3802 hat_setmod((ppa)[i]); \ 3803 } \ 3804 } else if ((rw) != S_OTHER && \ 3805 ((prot) & (vpprot) & PROT_WRITE)) { \ 3806 for (i = 0; i < (pages); i++) { \ 3807 ASSERT((ppa)[i]->p_vnode == \ 3808 (ppa)[0]->p_vnode); \ 3809 if (!hat_ismod((ppa)[i])) { \ 3810 prot &= ~PROT_WRITE; \ 3811 break; \ 3812 } \ 3813 } \ 3814 } \ 3815 } 3816 3817 #ifdef VM_STATS 3818 3819 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \ 3820 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]); 3821 3822 #else /* VM_STATS */ 3823 3824 #define SEGVN_VMSTAT_FLTVNPAGES(idx) 3825 3826 #endif 3827 3828 static faultcode_t 3829 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 3830 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 3831 caddr_t eaddr, int brkcow) 3832 { 3833 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 3834 struct anon_map *amp = svd->amp; 3835 uchar_t segtype = svd->type; 3836 uint_t szc = seg->s_szc; 3837 size_t pgsz = page_get_pagesize(szc); 3838 size_t maxpgsz = pgsz; 3839 pgcnt_t pages = btop(pgsz); 3840 pgcnt_t maxpages = pages; 3841 size_t ppasize = (pages + 1) * sizeof (page_t *); 3842 caddr_t a = lpgaddr; 3843 caddr_t maxlpgeaddr = lpgeaddr; 3844 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base); 3845 ulong_t aindx = svd->anon_index + seg_page(seg, a); 3846 struct vpage *vpage = (svd->vpage != NULL) ? 3847 &svd->vpage[seg_page(seg, a)] : NULL; 3848 vnode_t *vp = svd->vp; 3849 page_t **ppa; 3850 uint_t pszc; 3851 size_t ppgsz; 3852 pgcnt_t ppages; 3853 faultcode_t err = 0; 3854 int ierr; 3855 int vop_size_err = 0; 3856 uint_t protchk, prot, vpprot; 3857 ulong_t i; 3858 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 3859 anon_sync_obj_t an_cookie; 3860 enum seg_rw arw; 3861 int alloc_failed = 0; 3862 int adjszc_chk; 3863 struct vattr va; 3864 page_t *pplist; 3865 pfn_t pfn; 3866 int physcontig; 3867 int upgrdfail; 3868 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */ 3869 int tron = (svd->tr_state == SEGVN_TR_ON); 3870 3871 ASSERT(szc != 0); 3872 ASSERT(vp != NULL); 3873 ASSERT(brkcow == 0 || amp != NULL); 3874 ASSERT(tron == 0 || amp != NULL); 3875 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 3876 ASSERT(!(svd->flags & MAP_NORESERVE)); 3877 ASSERT(type != F_SOFTUNLOCK); 3878 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 3879 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages)); 3880 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 3881 ASSERT(seg->s_szc < NBBY * sizeof (int)); 3882 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz); 3883 ASSERT(svd->tr_state != SEGVN_TR_INIT); 3884 3885 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]); 3886 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]); 3887 3888 if (svd->flags & MAP_TEXT) { 3889 hat_flag |= HAT_LOAD_TEXT; 3890 } 3891 3892 if (svd->pageprot) { 3893 prot = PROT_NONE; 3894 switch (rw) { 3895 case S_READ: 3896 protchk = PROT_READ; 3897 break; 3898 case S_WRITE: 3899 protchk = PROT_WRITE; 3900 break; 3901 case S_EXEC: 3902 protchk = PROT_EXEC; 3903 break; 3904 case S_OTHER: 3905 default: 3906 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 3907 break; 3908 } 3909 } else { 3910 protchk = PROT_NONE; 3911 prot = svd->prot; 3912 /* caller has already done segment level protection check. */ 3913 } 3914 3915 if (rw == S_WRITE && segtype == MAP_PRIVATE) { 3916 SEGVN_VMSTAT_FLTVNPAGES(2); 3917 arw = S_READ; 3918 } else { 3919 arw = rw; 3920 } 3921 3922 ppa = kmem_alloc(ppasize, KM_SLEEP); 3923 3924 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]); 3925 3926 ierr = 0; 3927 for (;;) { 3928 adjszc_chk = 0; 3929 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) { 3930 if (adjszc_chk) { 3931 while (szc < seg->s_szc) { 3932 uintptr_t e; 3933 uint_t tszc; 3934 tszc = segvn_anypgsz_vnode ? szc + 1 : 3935 seg->s_szc; 3936 ppgsz = page_get_pagesize(tszc); 3937 if (!IS_P2ALIGNED(a, ppgsz) || 3938 ((alloc_failed >> tszc) & 0x1)) { 3939 break; 3940 } 3941 SEGVN_VMSTAT_FLTVNPAGES(4); 3942 szc = tszc; 3943 pgsz = ppgsz; 3944 pages = btop(pgsz); 3945 e = P2ROUNDUP((uintptr_t)eaddr, pgsz); 3946 lpgeaddr = (caddr_t)e; 3947 } 3948 } 3949 3950 again: 3951 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) { 3952 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 3953 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 3954 anon_array_enter(amp, aindx, &an_cookie); 3955 if (anon_get_ptr(amp->ahp, aindx) != NULL) { 3956 SEGVN_VMSTAT_FLTVNPAGES(5); 3957 ASSERT(anon_pages(amp->ahp, aindx, 3958 maxpages) == maxpages); 3959 anon_array_exit(&an_cookie); 3960 ANON_LOCK_EXIT(&->a_rwlock); 3961 err = segvn_fault_anonpages(hat, seg, 3962 a, a + maxpgsz, type, rw, 3963 MAX(a, addr), 3964 MIN(a + maxpgsz, eaddr), brkcow); 3965 if (err != 0) { 3966 SEGVN_VMSTAT_FLTVNPAGES(6); 3967 goto out; 3968 } 3969 if (szc < seg->s_szc) { 3970 szc = seg->s_szc; 3971 pgsz = maxpgsz; 3972 pages = maxpages; 3973 lpgeaddr = maxlpgeaddr; 3974 } 3975 goto next; 3976 } else { 3977 ASSERT(anon_pages(amp->ahp, aindx, 3978 maxpages) == 0); 3979 SEGVN_VMSTAT_FLTVNPAGES(7); 3980 anon_array_exit(&an_cookie); 3981 ANON_LOCK_EXIT(&->a_rwlock); 3982 } 3983 } 3984 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz)); 3985 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz)); 3986 3987 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 3988 ASSERT(vpage != NULL); 3989 prot = VPP_PROT(vpage); 3990 ASSERT(sameprot(seg, a, maxpgsz)); 3991 if ((prot & protchk) == 0) { 3992 SEGVN_VMSTAT_FLTVNPAGES(8); 3993 err = FC_PROT; 3994 goto out; 3995 } 3996 } 3997 if (type == F_SOFTLOCK) { 3998 atomic_add_long((ulong_t *)&svd->softlockcnt, 3999 pages); 4000 } 4001 4002 pplist = NULL; 4003 physcontig = 0; 4004 ppa[0] = NULL; 4005 if (!brkcow && !tron && szc && 4006 !page_exists_physcontig(vp, off, szc, 4007 segtype == MAP_PRIVATE ? ppa : NULL)) { 4008 SEGVN_VMSTAT_FLTVNPAGES(9); 4009 if (page_alloc_pages(vp, seg, a, &pplist, NULL, 4010 szc, 0, 0) && type != F_SOFTLOCK) { 4011 SEGVN_VMSTAT_FLTVNPAGES(10); 4012 pszc = 0; 4013 ierr = -1; 4014 alloc_failed |= (1 << szc); 4015 break; 4016 } 4017 if (pplist != NULL && 4018 vp->v_mpssdata == SEGVN_PAGEIO) { 4019 int downsize; 4020 SEGVN_VMSTAT_FLTVNPAGES(11); 4021 physcontig = segvn_fill_vp_pages(svd, 4022 vp, off, szc, ppa, &pplist, 4023 &pszc, &downsize); 4024 ASSERT(!physcontig || pplist == NULL); 4025 if (!physcontig && downsize && 4026 type != F_SOFTLOCK) { 4027 ASSERT(pplist == NULL); 4028 SEGVN_VMSTAT_FLTVNPAGES(12); 4029 ierr = -1; 4030 break; 4031 } 4032 ASSERT(!physcontig || 4033 segtype == MAP_PRIVATE || 4034 ppa[0] == NULL); 4035 if (physcontig && ppa[0] == NULL) { 4036 physcontig = 0; 4037 } 4038 } 4039 } else if (!brkcow && !tron && szc && ppa[0] != NULL) { 4040 SEGVN_VMSTAT_FLTVNPAGES(13); 4041 ASSERT(segtype == MAP_PRIVATE); 4042 physcontig = 1; 4043 } 4044 4045 if (!physcontig) { 4046 SEGVN_VMSTAT_FLTVNPAGES(14); 4047 ppa[0] = NULL; 4048 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz, 4049 &vpprot, ppa, pgsz, seg, a, arw, 4050 svd->cred, NULL); 4051 #ifdef DEBUG 4052 if (ierr == 0) { 4053 for (i = 0; i < pages; i++) { 4054 ASSERT(PAGE_LOCKED(ppa[i])); 4055 ASSERT(!PP_ISFREE(ppa[i])); 4056 ASSERT(ppa[i]->p_vnode == vp); 4057 ASSERT(ppa[i]->p_offset == 4058 off + (i << PAGESHIFT)); 4059 } 4060 } 4061 #endif /* DEBUG */ 4062 if (segtype == MAP_PRIVATE) { 4063 SEGVN_VMSTAT_FLTVNPAGES(15); 4064 vpprot &= ~PROT_WRITE; 4065 } 4066 } else { 4067 ASSERT(segtype == MAP_PRIVATE); 4068 SEGVN_VMSTAT_FLTVNPAGES(16); 4069 vpprot = PROT_ALL & ~PROT_WRITE; 4070 ierr = 0; 4071 } 4072 4073 if (ierr != 0) { 4074 SEGVN_VMSTAT_FLTVNPAGES(17); 4075 if (pplist != NULL) { 4076 SEGVN_VMSTAT_FLTVNPAGES(18); 4077 page_free_replacement_page(pplist); 4078 page_create_putback(pages); 4079 } 4080 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4081 if (a + pgsz <= eaddr) { 4082 SEGVN_VMSTAT_FLTVNPAGES(19); 4083 err = FC_MAKE_ERR(ierr); 4084 goto out; 4085 } 4086 va.va_mask = AT_SIZE; 4087 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) { 4088 SEGVN_VMSTAT_FLTVNPAGES(20); 4089 err = FC_MAKE_ERR(EIO); 4090 goto out; 4091 } 4092 if (btopr(va.va_size) >= btopr(off + pgsz)) { 4093 SEGVN_VMSTAT_FLTVNPAGES(21); 4094 err = FC_MAKE_ERR(ierr); 4095 goto out; 4096 } 4097 if (btopr(va.va_size) < 4098 btopr(off + (eaddr - a))) { 4099 SEGVN_VMSTAT_FLTVNPAGES(22); 4100 err = FC_MAKE_ERR(ierr); 4101 goto out; 4102 } 4103 if (brkcow || tron || type == F_SOFTLOCK) { 4104 /* can't reduce map area */ 4105 SEGVN_VMSTAT_FLTVNPAGES(23); 4106 vop_size_err = 1; 4107 goto out; 4108 } 4109 SEGVN_VMSTAT_FLTVNPAGES(24); 4110 ASSERT(szc != 0); 4111 pszc = 0; 4112 ierr = -1; 4113 break; 4114 } 4115 4116 if (amp != NULL) { 4117 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4118 anon_array_enter(amp, aindx, &an_cookie); 4119 } 4120 if (amp != NULL && 4121 anon_get_ptr(amp->ahp, aindx) != NULL) { 4122 ulong_t taindx = P2ALIGN(aindx, maxpages); 4123 4124 SEGVN_VMSTAT_FLTVNPAGES(25); 4125 ASSERT(anon_pages(amp->ahp, taindx, 4126 maxpages) == maxpages); 4127 for (i = 0; i < pages; i++) { 4128 page_unlock(ppa[i]); 4129 } 4130 anon_array_exit(&an_cookie); 4131 ANON_LOCK_EXIT(&->a_rwlock); 4132 if (pplist != NULL) { 4133 page_free_replacement_page(pplist); 4134 page_create_putback(pages); 4135 } 4136 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4137 if (szc < seg->s_szc) { 4138 SEGVN_VMSTAT_FLTVNPAGES(26); 4139 /* 4140 * For private segments SOFTLOCK 4141 * either always breaks cow (any rw 4142 * type except S_READ_NOCOW) or 4143 * address space is locked as writer 4144 * (S_READ_NOCOW case) and anon slots 4145 * can't show up on second check. 4146 * Therefore if we are here for 4147 * SOFTLOCK case it must be a cow 4148 * break but cow break never reduces 4149 * szc. text replication (tron) in 4150 * this case works as cow break. 4151 * Thus the assert below. 4152 */ 4153 ASSERT(!brkcow && !tron && 4154 type != F_SOFTLOCK); 4155 pszc = seg->s_szc; 4156 ierr = -2; 4157 break; 4158 } 4159 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4160 goto again; 4161 } 4162 #ifdef DEBUG 4163 if (amp != NULL) { 4164 ulong_t taindx = P2ALIGN(aindx, maxpages); 4165 ASSERT(!anon_pages(amp->ahp, taindx, maxpages)); 4166 } 4167 #endif /* DEBUG */ 4168 4169 if (brkcow || tron) { 4170 ASSERT(amp != NULL); 4171 ASSERT(pplist == NULL); 4172 ASSERT(szc == seg->s_szc); 4173 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4174 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 4175 SEGVN_VMSTAT_FLTVNPAGES(27); 4176 ierr = anon_map_privatepages(amp, aindx, szc, 4177 seg, a, prot, ppa, vpage, segvn_anypgsz, 4178 tron ? PG_LOCAL : 0, svd->cred); 4179 if (ierr != 0) { 4180 SEGVN_VMSTAT_FLTVNPAGES(28); 4181 anon_array_exit(&an_cookie); 4182 ANON_LOCK_EXIT(&->a_rwlock); 4183 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4184 err = FC_MAKE_ERR(ierr); 4185 goto out; 4186 } 4187 4188 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4189 /* 4190 * p_szc can't be changed for locked 4191 * swapfs pages. 4192 */ 4193 ASSERT(svd->rcookie == 4194 HAT_INVALID_REGION_COOKIE); 4195 hat_memload_array(hat, a, pgsz, ppa, prot, 4196 hat_flag); 4197 4198 if (!(hat_flag & HAT_LOAD_LOCK)) { 4199 SEGVN_VMSTAT_FLTVNPAGES(29); 4200 for (i = 0; i < pages; i++) { 4201 page_unlock(ppa[i]); 4202 } 4203 } 4204 anon_array_exit(&an_cookie); 4205 ANON_LOCK_EXIT(&->a_rwlock); 4206 goto next; 4207 } 4208 4209 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 4210 (!svd->pageprot && svd->prot == (prot & vpprot))); 4211 4212 pfn = page_pptonum(ppa[0]); 4213 /* 4214 * hat_page_demote() needs an SE_EXCL lock on one of 4215 * constituent page_t's and it decreases root's p_szc 4216 * last. This means if root's p_szc is equal szc and 4217 * all its constituent pages are locked 4218 * hat_page_demote() that could have changed p_szc to 4219 * szc is already done and no new have page_demote() 4220 * can start for this large page. 4221 */ 4222 4223 /* 4224 * we need to make sure same mapping size is used for 4225 * the same address range if there's a possibility the 4226 * adddress is already mapped because hat layer panics 4227 * when translation is loaded for the range already 4228 * mapped with a different page size. We achieve it 4229 * by always using largest page size possible subject 4230 * to the constraints of page size, segment page size 4231 * and page alignment. Since mappings are invalidated 4232 * when those constraints change and make it 4233 * impossible to use previously used mapping size no 4234 * mapping size conflicts should happen. 4235 */ 4236 4237 chkszc: 4238 if ((pszc = ppa[0]->p_szc) == szc && 4239 IS_P2ALIGNED(pfn, pages)) { 4240 4241 SEGVN_VMSTAT_FLTVNPAGES(30); 4242 #ifdef DEBUG 4243 for (i = 0; i < pages; i++) { 4244 ASSERT(PAGE_LOCKED(ppa[i])); 4245 ASSERT(!PP_ISFREE(ppa[i])); 4246 ASSERT(page_pptonum(ppa[i]) == 4247 pfn + i); 4248 ASSERT(ppa[i]->p_szc == szc); 4249 ASSERT(ppa[i]->p_vnode == vp); 4250 ASSERT(ppa[i]->p_offset == 4251 off + (i << PAGESHIFT)); 4252 } 4253 #endif /* DEBUG */ 4254 /* 4255 * All pages are of szc we need and they are 4256 * all locked so they can't change szc. load 4257 * translations. 4258 * 4259 * if page got promoted since last check 4260 * we don't need pplist. 4261 */ 4262 if (pplist != NULL) { 4263 page_free_replacement_page(pplist); 4264 page_create_putback(pages); 4265 } 4266 if (PP_ISMIGRATE(ppa[0])) { 4267 page_migrate(seg, a, ppa, pages); 4268 } 4269 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4270 prot, vpprot); 4271 hat_memload_array_region(hat, a, pgsz, 4272 ppa, prot & vpprot, hat_flag, 4273 svd->rcookie); 4274 4275 if (!(hat_flag & HAT_LOAD_LOCK)) { 4276 for (i = 0; i < pages; i++) { 4277 page_unlock(ppa[i]); 4278 } 4279 } 4280 if (amp != NULL) { 4281 anon_array_exit(&an_cookie); 4282 ANON_LOCK_EXIT(&->a_rwlock); 4283 } 4284 goto next; 4285 } 4286 4287 /* 4288 * See if upsize is possible. 4289 */ 4290 if (pszc > szc && szc < seg->s_szc && 4291 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) { 4292 pgcnt_t aphase; 4293 uint_t pszc1 = MIN(pszc, seg->s_szc); 4294 ppgsz = page_get_pagesize(pszc1); 4295 ppages = btop(ppgsz); 4296 aphase = btop(P2PHASE((uintptr_t)a, ppgsz)); 4297 4298 ASSERT(type != F_SOFTLOCK); 4299 4300 SEGVN_VMSTAT_FLTVNPAGES(31); 4301 if (aphase != P2PHASE(pfn, ppages)) { 4302 segvn_faultvnmpss_align_err4++; 4303 } else { 4304 SEGVN_VMSTAT_FLTVNPAGES(32); 4305 if (pplist != NULL) { 4306 page_t *pl = pplist; 4307 page_free_replacement_page(pl); 4308 page_create_putback(pages); 4309 } 4310 for (i = 0; i < pages; i++) { 4311 page_unlock(ppa[i]); 4312 } 4313 if (amp != NULL) { 4314 anon_array_exit(&an_cookie); 4315 ANON_LOCK_EXIT(&->a_rwlock); 4316 } 4317 pszc = pszc1; 4318 ierr = -2; 4319 break; 4320 } 4321 } 4322 4323 /* 4324 * check if we should use smallest mapping size. 4325 */ 4326 upgrdfail = 0; 4327 if (szc == 0 || 4328 (pszc >= szc && 4329 !IS_P2ALIGNED(pfn, pages)) || 4330 (pszc < szc && 4331 !segvn_full_szcpages(ppa, szc, &upgrdfail, 4332 &pszc))) { 4333 4334 if (upgrdfail && type != F_SOFTLOCK) { 4335 /* 4336 * segvn_full_szcpages failed to lock 4337 * all pages EXCL. Size down. 4338 */ 4339 ASSERT(pszc < szc); 4340 4341 SEGVN_VMSTAT_FLTVNPAGES(33); 4342 4343 if (pplist != NULL) { 4344 page_t *pl = pplist; 4345 page_free_replacement_page(pl); 4346 page_create_putback(pages); 4347 } 4348 4349 for (i = 0; i < pages; i++) { 4350 page_unlock(ppa[i]); 4351 } 4352 if (amp != NULL) { 4353 anon_array_exit(&an_cookie); 4354 ANON_LOCK_EXIT(&->a_rwlock); 4355 } 4356 ierr = -1; 4357 break; 4358 } 4359 if (szc != 0 && !upgrdfail) { 4360 segvn_faultvnmpss_align_err5++; 4361 } 4362 SEGVN_VMSTAT_FLTVNPAGES(34); 4363 if (pplist != NULL) { 4364 page_free_replacement_page(pplist); 4365 page_create_putback(pages); 4366 } 4367 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4368 prot, vpprot); 4369 if (upgrdfail && segvn_anypgsz_vnode) { 4370 /* SOFTLOCK case */ 4371 hat_memload_array_region(hat, a, pgsz, 4372 ppa, prot & vpprot, hat_flag, 4373 svd->rcookie); 4374 } else { 4375 for (i = 0; i < pages; i++) { 4376 hat_memload_region(hat, 4377 a + (i << PAGESHIFT), 4378 ppa[i], prot & vpprot, 4379 hat_flag, svd->rcookie); 4380 } 4381 } 4382 if (!(hat_flag & HAT_LOAD_LOCK)) { 4383 for (i = 0; i < pages; i++) { 4384 page_unlock(ppa[i]); 4385 } 4386 } 4387 if (amp != NULL) { 4388 anon_array_exit(&an_cookie); 4389 ANON_LOCK_EXIT(&->a_rwlock); 4390 } 4391 goto next; 4392 } 4393 4394 if (pszc == szc) { 4395 /* 4396 * segvn_full_szcpages() upgraded pages szc. 4397 */ 4398 ASSERT(pszc == ppa[0]->p_szc); 4399 ASSERT(IS_P2ALIGNED(pfn, pages)); 4400 goto chkszc; 4401 } 4402 4403 if (pszc > szc) { 4404 kmutex_t *szcmtx; 4405 SEGVN_VMSTAT_FLTVNPAGES(35); 4406 /* 4407 * p_szc of ppa[0] can change since we haven't 4408 * locked all constituent pages. Call 4409 * page_lock_szc() to prevent szc changes. 4410 * This should be a rare case that happens when 4411 * multiple segments use a different page size 4412 * to map the same file offsets. 4413 */ 4414 szcmtx = page_szc_lock(ppa[0]); 4415 pszc = ppa[0]->p_szc; 4416 ASSERT(szcmtx != NULL || pszc == 0); 4417 ASSERT(ppa[0]->p_szc <= pszc); 4418 if (pszc <= szc) { 4419 SEGVN_VMSTAT_FLTVNPAGES(36); 4420 if (szcmtx != NULL) { 4421 mutex_exit(szcmtx); 4422 } 4423 goto chkszc; 4424 } 4425 if (pplist != NULL) { 4426 /* 4427 * page got promoted since last check. 4428 * we don't need preaalocated large 4429 * page. 4430 */ 4431 SEGVN_VMSTAT_FLTVNPAGES(37); 4432 page_free_replacement_page(pplist); 4433 page_create_putback(pages); 4434 } 4435 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4436 prot, vpprot); 4437 hat_memload_array_region(hat, a, pgsz, ppa, 4438 prot & vpprot, hat_flag, svd->rcookie); 4439 mutex_exit(szcmtx); 4440 if (!(hat_flag & HAT_LOAD_LOCK)) { 4441 for (i = 0; i < pages; i++) { 4442 page_unlock(ppa[i]); 4443 } 4444 } 4445 if (amp != NULL) { 4446 anon_array_exit(&an_cookie); 4447 ANON_LOCK_EXIT(&->a_rwlock); 4448 } 4449 goto next; 4450 } 4451 4452 /* 4453 * if page got demoted since last check 4454 * we could have not allocated larger page. 4455 * allocate now. 4456 */ 4457 if (pplist == NULL && 4458 page_alloc_pages(vp, seg, a, &pplist, NULL, 4459 szc, 0, 0) && type != F_SOFTLOCK) { 4460 SEGVN_VMSTAT_FLTVNPAGES(38); 4461 for (i = 0; i < pages; i++) { 4462 page_unlock(ppa[i]); 4463 } 4464 if (amp != NULL) { 4465 anon_array_exit(&an_cookie); 4466 ANON_LOCK_EXIT(&->a_rwlock); 4467 } 4468 ierr = -1; 4469 alloc_failed |= (1 << szc); 4470 break; 4471 } 4472 4473 SEGVN_VMSTAT_FLTVNPAGES(39); 4474 4475 if (pplist != NULL) { 4476 segvn_relocate_pages(ppa, pplist); 4477 #ifdef DEBUG 4478 } else { 4479 ASSERT(type == F_SOFTLOCK); 4480 SEGVN_VMSTAT_FLTVNPAGES(40); 4481 #endif /* DEBUG */ 4482 } 4483 4484 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot); 4485 4486 if (pplist == NULL && segvn_anypgsz_vnode == 0) { 4487 ASSERT(type == F_SOFTLOCK); 4488 for (i = 0; i < pages; i++) { 4489 ASSERT(ppa[i]->p_szc < szc); 4490 hat_memload_region(hat, 4491 a + (i << PAGESHIFT), 4492 ppa[i], prot & vpprot, hat_flag, 4493 svd->rcookie); 4494 } 4495 } else { 4496 ASSERT(pplist != NULL || type == F_SOFTLOCK); 4497 hat_memload_array_region(hat, a, pgsz, ppa, 4498 prot & vpprot, hat_flag, svd->rcookie); 4499 } 4500 if (!(hat_flag & HAT_LOAD_LOCK)) { 4501 for (i = 0; i < pages; i++) { 4502 ASSERT(PAGE_SHARED(ppa[i])); 4503 page_unlock(ppa[i]); 4504 } 4505 } 4506 if (amp != NULL) { 4507 anon_array_exit(&an_cookie); 4508 ANON_LOCK_EXIT(&->a_rwlock); 4509 } 4510 4511 next: 4512 if (vpage != NULL) { 4513 vpage += pages; 4514 } 4515 adjszc_chk = 1; 4516 } 4517 if (a == lpgeaddr) 4518 break; 4519 ASSERT(a < lpgeaddr); 4520 4521 ASSERT(!brkcow && !tron && type != F_SOFTLOCK); 4522 4523 /* 4524 * ierr == -1 means we failed to map with a large page. 4525 * (either due to allocation/relocation failures or 4526 * misalignment with other mappings to this file. 4527 * 4528 * ierr == -2 means some other thread allocated a large page 4529 * after we gave up tp map with a large page. retry with 4530 * larger mapping. 4531 */ 4532 ASSERT(ierr == -1 || ierr == -2); 4533 ASSERT(ierr == -2 || szc != 0); 4534 ASSERT(ierr == -1 || szc < seg->s_szc); 4535 if (ierr == -2) { 4536 SEGVN_VMSTAT_FLTVNPAGES(41); 4537 ASSERT(pszc > szc && pszc <= seg->s_szc); 4538 szc = pszc; 4539 } else if (segvn_anypgsz_vnode) { 4540 SEGVN_VMSTAT_FLTVNPAGES(42); 4541 szc--; 4542 } else { 4543 SEGVN_VMSTAT_FLTVNPAGES(43); 4544 ASSERT(pszc < szc); 4545 /* 4546 * other process created pszc large page. 4547 * but we still have to drop to 0 szc. 4548 */ 4549 szc = 0; 4550 } 4551 4552 pgsz = page_get_pagesize(szc); 4553 pages = btop(pgsz); 4554 if (ierr == -2) { 4555 /* 4556 * Size up case. Note lpgaddr may only be needed for 4557 * softlock case so we don't adjust it here. 4558 */ 4559 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4560 ASSERT(a >= lpgaddr); 4561 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4562 off = svd->offset + (uintptr_t)(a - seg->s_base); 4563 aindx = svd->anon_index + seg_page(seg, a); 4564 vpage = (svd->vpage != NULL) ? 4565 &svd->vpage[seg_page(seg, a)] : NULL; 4566 } else { 4567 /* 4568 * Size down case. Note lpgaddr may only be needed for 4569 * softlock case so we don't adjust it here. 4570 */ 4571 ASSERT(IS_P2ALIGNED(a, pgsz)); 4572 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4573 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4574 ASSERT(a < lpgeaddr); 4575 if (a < addr) { 4576 SEGVN_VMSTAT_FLTVNPAGES(44); 4577 /* 4578 * The beginning of the large page region can 4579 * be pulled to the right to make a smaller 4580 * region. We haven't yet faulted a single 4581 * page. 4582 */ 4583 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4584 ASSERT(a >= lpgaddr); 4585 off = svd->offset + 4586 (uintptr_t)(a - seg->s_base); 4587 aindx = svd->anon_index + seg_page(seg, a); 4588 vpage = (svd->vpage != NULL) ? 4589 &svd->vpage[seg_page(seg, a)] : NULL; 4590 } 4591 } 4592 } 4593 out: 4594 kmem_free(ppa, ppasize); 4595 if (!err && !vop_size_err) { 4596 SEGVN_VMSTAT_FLTVNPAGES(45); 4597 return (0); 4598 } 4599 if (type == F_SOFTLOCK && a > lpgaddr) { 4600 SEGVN_VMSTAT_FLTVNPAGES(46); 4601 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4602 } 4603 if (!vop_size_err) { 4604 SEGVN_VMSTAT_FLTVNPAGES(47); 4605 return (err); 4606 } 4607 ASSERT(brkcow || tron || type == F_SOFTLOCK); 4608 /* 4609 * Large page end is mapped beyond the end of file and it's a cow 4610 * fault (can be a text replication induced cow) or softlock so we can't 4611 * reduce the map area. For now just demote the segment. This should 4612 * really only happen if the end of the file changed after the mapping 4613 * was established since when large page segments are created we make 4614 * sure they don't extend beyond the end of the file. 4615 */ 4616 SEGVN_VMSTAT_FLTVNPAGES(48); 4617 4618 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4619 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4620 err = 0; 4621 if (seg->s_szc != 0) { 4622 segvn_fltvnpages_clrszc_cnt++; 4623 ASSERT(svd->softlockcnt == 0); 4624 err = segvn_clrszc(seg); 4625 if (err != 0) { 4626 segvn_fltvnpages_clrszc_err++; 4627 } 4628 } 4629 ASSERT(err || seg->s_szc == 0); 4630 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock); 4631 /* segvn_fault will do its job as if szc had been zero to begin with */ 4632 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err)); 4633 } 4634 4635 /* 4636 * This routine will attempt to fault in one large page. 4637 * it will use smaller pages if that fails. 4638 * It should only be called for pure anonymous segments. 4639 */ 4640 static faultcode_t 4641 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 4642 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 4643 caddr_t eaddr, int brkcow) 4644 { 4645 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4646 struct anon_map *amp = svd->amp; 4647 uchar_t segtype = svd->type; 4648 uint_t szc = seg->s_szc; 4649 size_t pgsz = page_get_pagesize(szc); 4650 size_t maxpgsz = pgsz; 4651 pgcnt_t pages = btop(pgsz); 4652 uint_t ppaszc = szc; 4653 caddr_t a = lpgaddr; 4654 ulong_t aindx = svd->anon_index + seg_page(seg, a); 4655 struct vpage *vpage = (svd->vpage != NULL) ? 4656 &svd->vpage[seg_page(seg, a)] : NULL; 4657 page_t **ppa; 4658 uint_t ppa_szc; 4659 faultcode_t err; 4660 int ierr; 4661 uint_t protchk, prot, vpprot; 4662 ulong_t i; 4663 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 4664 anon_sync_obj_t cookie; 4665 int adjszc_chk; 4666 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0; 4667 4668 ASSERT(szc != 0); 4669 ASSERT(amp != NULL); 4670 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 4671 ASSERT(!(svd->flags & MAP_NORESERVE)); 4672 ASSERT(type != F_SOFTUNLOCK); 4673 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4674 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF); 4675 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4676 4677 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 4678 4679 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]); 4680 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]); 4681 4682 if (svd->flags & MAP_TEXT) { 4683 hat_flag |= HAT_LOAD_TEXT; 4684 } 4685 4686 if (svd->pageprot) { 4687 prot = PROT_NONE; 4688 switch (rw) { 4689 case S_READ: 4690 protchk = PROT_READ; 4691 break; 4692 case S_WRITE: 4693 protchk = PROT_WRITE; 4694 break; 4695 case S_EXEC: 4696 protchk = PROT_EXEC; 4697 break; 4698 case S_OTHER: 4699 default: 4700 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 4701 break; 4702 } 4703 VM_STAT_ADD(segvnvmstats.fltanpages[2]); 4704 } else { 4705 protchk = PROT_NONE; 4706 prot = svd->prot; 4707 /* caller has already done segment level protection check. */ 4708 } 4709 4710 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP); 4711 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4712 ierr = 0; 4713 for (;;) { 4714 adjszc_chk = 0; 4715 for (; a < lpgeaddr; a += pgsz, aindx += pages) { 4716 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 4717 VM_STAT_ADD(segvnvmstats.fltanpages[3]); 4718 ASSERT(vpage != NULL); 4719 prot = VPP_PROT(vpage); 4720 ASSERT(sameprot(seg, a, maxpgsz)); 4721 if ((prot & protchk) == 0) { 4722 err = FC_PROT; 4723 goto error; 4724 } 4725 } 4726 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) && 4727 pgsz < maxpgsz) { 4728 ASSERT(a > lpgaddr); 4729 szc = seg->s_szc; 4730 pgsz = maxpgsz; 4731 pages = btop(pgsz); 4732 ASSERT(IS_P2ALIGNED(aindx, pages)); 4733 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, 4734 pgsz); 4735 } 4736 if (type == F_SOFTLOCK) { 4737 atomic_add_long((ulong_t *)&svd->softlockcnt, 4738 pages); 4739 } 4740 anon_array_enter(amp, aindx, &cookie); 4741 ppa_szc = (uint_t)-1; 4742 ierr = anon_map_getpages(amp, aindx, szc, seg, a, 4743 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow, 4744 segvn_anypgsz, pgflags, svd->cred); 4745 if (ierr != 0) { 4746 anon_array_exit(&cookie); 4747 VM_STAT_ADD(segvnvmstats.fltanpages[4]); 4748 if (type == F_SOFTLOCK) { 4749 atomic_add_long( 4750 (ulong_t *)&svd->softlockcnt, 4751 -pages); 4752 } 4753 if (ierr > 0) { 4754 VM_STAT_ADD(segvnvmstats.fltanpages[6]); 4755 err = FC_MAKE_ERR(ierr); 4756 goto error; 4757 } 4758 break; 4759 } 4760 4761 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4762 4763 ASSERT(segtype == MAP_SHARED || 4764 ppa[0]->p_szc <= szc); 4765 ASSERT(segtype == MAP_PRIVATE || 4766 ppa[0]->p_szc >= szc); 4767 4768 /* 4769 * Handle pages that have been marked for migration 4770 */ 4771 if (lgrp_optimizations()) 4772 page_migrate(seg, a, ppa, pages); 4773 4774 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 4775 4776 if (segtype == MAP_SHARED) { 4777 vpprot |= PROT_WRITE; 4778 } 4779 4780 hat_memload_array(hat, a, pgsz, ppa, 4781 prot & vpprot, hat_flag); 4782 4783 if (hat_flag & HAT_LOAD_LOCK) { 4784 VM_STAT_ADD(segvnvmstats.fltanpages[7]); 4785 } else { 4786 VM_STAT_ADD(segvnvmstats.fltanpages[8]); 4787 for (i = 0; i < pages; i++) 4788 page_unlock(ppa[i]); 4789 } 4790 if (vpage != NULL) 4791 vpage += pages; 4792 4793 anon_array_exit(&cookie); 4794 adjszc_chk = 1; 4795 } 4796 if (a == lpgeaddr) 4797 break; 4798 ASSERT(a < lpgeaddr); 4799 /* 4800 * ierr == -1 means we failed to allocate a large page. 4801 * so do a size down operation. 4802 * 4803 * ierr == -2 means some other process that privately shares 4804 * pages with this process has allocated a larger page and we 4805 * need to retry with larger pages. So do a size up 4806 * operation. This relies on the fact that large pages are 4807 * never partially shared i.e. if we share any constituent 4808 * page of a large page with another process we must share the 4809 * entire large page. Note this cannot happen for SOFTLOCK 4810 * case, unless current address (a) is at the beginning of the 4811 * next page size boundary because the other process couldn't 4812 * have relocated locked pages. 4813 */ 4814 ASSERT(ierr == -1 || ierr == -2); 4815 4816 if (segvn_anypgsz) { 4817 ASSERT(ierr == -2 || szc != 0); 4818 ASSERT(ierr == -1 || szc < seg->s_szc); 4819 szc = (ierr == -1) ? szc - 1 : szc + 1; 4820 } else { 4821 /* 4822 * For non COW faults and segvn_anypgsz == 0 4823 * we need to be careful not to loop forever 4824 * if existing page is found with szc other 4825 * than 0 or seg->s_szc. This could be due 4826 * to page relocations on behalf of DR or 4827 * more likely large page creation. For this 4828 * case simply re-size to existing page's szc 4829 * if returned by anon_map_getpages(). 4830 */ 4831 if (ppa_szc == (uint_t)-1) { 4832 szc = (ierr == -1) ? 0 : seg->s_szc; 4833 } else { 4834 ASSERT(ppa_szc <= seg->s_szc); 4835 ASSERT(ierr == -2 || ppa_szc < szc); 4836 ASSERT(ierr == -1 || ppa_szc > szc); 4837 szc = ppa_szc; 4838 } 4839 } 4840 4841 pgsz = page_get_pagesize(szc); 4842 pages = btop(pgsz); 4843 ASSERT(type != F_SOFTLOCK || ierr == -1 || 4844 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz))); 4845 if (type == F_SOFTLOCK) { 4846 /* 4847 * For softlocks we cannot reduce the fault area 4848 * (calculated based on the largest page size for this 4849 * segment) for size down and a is already next 4850 * page size aligned as assertted above for size 4851 * ups. Therefore just continue in case of softlock. 4852 */ 4853 VM_STAT_ADD(segvnvmstats.fltanpages[9]); 4854 continue; /* keep lint happy */ 4855 } else if (ierr == -2) { 4856 4857 /* 4858 * Size up case. Note lpgaddr may only be needed for 4859 * softlock case so we don't adjust it here. 4860 */ 4861 VM_STAT_ADD(segvnvmstats.fltanpages[10]); 4862 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4863 ASSERT(a >= lpgaddr); 4864 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4865 aindx = svd->anon_index + seg_page(seg, a); 4866 vpage = (svd->vpage != NULL) ? 4867 &svd->vpage[seg_page(seg, a)] : NULL; 4868 } else { 4869 /* 4870 * Size down case. Note lpgaddr may only be needed for 4871 * softlock case so we don't adjust it here. 4872 */ 4873 VM_STAT_ADD(segvnvmstats.fltanpages[11]); 4874 ASSERT(IS_P2ALIGNED(a, pgsz)); 4875 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4876 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4877 ASSERT(a < lpgeaddr); 4878 if (a < addr) { 4879 /* 4880 * The beginning of the large page region can 4881 * be pulled to the right to make a smaller 4882 * region. We haven't yet faulted a single 4883 * page. 4884 */ 4885 VM_STAT_ADD(segvnvmstats.fltanpages[12]); 4886 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4887 ASSERT(a >= lpgaddr); 4888 aindx = svd->anon_index + seg_page(seg, a); 4889 vpage = (svd->vpage != NULL) ? 4890 &svd->vpage[seg_page(seg, a)] : NULL; 4891 } 4892 } 4893 } 4894 VM_STAT_ADD(segvnvmstats.fltanpages[13]); 4895 ANON_LOCK_EXIT(&->a_rwlock); 4896 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); 4897 return (0); 4898 error: 4899 VM_STAT_ADD(segvnvmstats.fltanpages[14]); 4900 ANON_LOCK_EXIT(&->a_rwlock); 4901 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); 4902 if (type == F_SOFTLOCK && a > lpgaddr) { 4903 VM_STAT_ADD(segvnvmstats.fltanpages[15]); 4904 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4905 } 4906 return (err); 4907 } 4908 4909 int fltadvice = 1; /* set to free behind pages for sequential access */ 4910 4911 /* 4912 * This routine is called via a machine specific fault handling routine. 4913 * It is also called by software routines wishing to lock or unlock 4914 * a range of addresses. 4915 * 4916 * Here is the basic algorithm: 4917 * If unlocking 4918 * Call segvn_softunlock 4919 * Return 4920 * endif 4921 * Checking and set up work 4922 * If we will need some non-anonymous pages 4923 * Call VOP_GETPAGE over the range of non-anonymous pages 4924 * endif 4925 * Loop over all addresses requested 4926 * Call segvn_faultpage passing in page list 4927 * to load up translations and handle anonymous pages 4928 * endloop 4929 * Load up translation to any additional pages in page list not 4930 * already handled that fit into this segment 4931 */ 4932 static faultcode_t 4933 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len, 4934 enum fault_type type, enum seg_rw rw) 4935 { 4936 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4937 page_t **plp, **ppp, *pp; 4938 u_offset_t off; 4939 caddr_t a; 4940 struct vpage *vpage; 4941 uint_t vpprot, prot = 0; 4942 int err; 4943 page_t *pl[PVN_GETPAGE_NUM + 1]; 4944 size_t plsz, pl_alloc_sz; 4945 size_t page; 4946 ulong_t anon_index = 0; 4947 struct anon_map *amp; 4948 int dogetpage = 0; 4949 caddr_t lpgaddr, lpgeaddr; 4950 size_t pgsz; 4951 anon_sync_obj_t cookie; 4952 int brkcow = BREAK_COW_SHARE(rw, type, svd->type); 4953 4954 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 4955 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE); 4956 4957 /* 4958 * First handle the easy stuff 4959 */ 4960 if (type == F_SOFTUNLOCK) { 4961 if (rw == S_READ_NOCOW) { 4962 rw = S_READ; 4963 ASSERT(AS_WRITE_HELD(seg->s_as)); 4964 } 4965 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 4966 pgsz = (seg->s_szc == 0) ? PAGESIZE : 4967 page_get_pagesize(seg->s_szc); 4968 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]); 4969 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 4970 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw); 4971 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4972 return (0); 4973 } 4974 4975 ASSERT(svd->tr_state == SEGVN_TR_OFF || 4976 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 4977 if (brkcow == 0) { 4978 if (svd->tr_state == SEGVN_TR_INIT) { 4979 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4980 if (svd->tr_state == SEGVN_TR_INIT) { 4981 ASSERT(svd->vp != NULL && svd->amp == NULL); 4982 ASSERT(svd->flags & MAP_TEXT); 4983 ASSERT(svd->type == MAP_PRIVATE); 4984 segvn_textrepl(seg); 4985 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4986 ASSERT(svd->tr_state != SEGVN_TR_ON || 4987 svd->amp != NULL); 4988 } 4989 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4990 } 4991 } else if (svd->tr_state != SEGVN_TR_OFF) { 4992 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4993 4994 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) { 4995 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 4996 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4997 return (FC_PROT); 4998 } 4999 5000 if (svd->tr_state == SEGVN_TR_ON) { 5001 ASSERT(svd->vp != NULL && svd->amp != NULL); 5002 segvn_textunrepl(seg, 0); 5003 ASSERT(svd->amp == NULL && 5004 svd->tr_state == SEGVN_TR_OFF); 5005 } else if (svd->tr_state != SEGVN_TR_OFF) { 5006 svd->tr_state = SEGVN_TR_OFF; 5007 } 5008 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 5009 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5010 } 5011 5012 top: 5013 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 5014 5015 /* 5016 * If we have the same protections for the entire segment, 5017 * insure that the access being attempted is legitimate. 5018 */ 5019 5020 if (svd->pageprot == 0) { 5021 uint_t protchk; 5022 5023 switch (rw) { 5024 case S_READ: 5025 case S_READ_NOCOW: 5026 protchk = PROT_READ; 5027 break; 5028 case S_WRITE: 5029 protchk = PROT_WRITE; 5030 break; 5031 case S_EXEC: 5032 protchk = PROT_EXEC; 5033 break; 5034 case S_OTHER: 5035 default: 5036 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 5037 break; 5038 } 5039 5040 if ((svd->prot & protchk) == 0) { 5041 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5042 return (FC_PROT); /* illegal access type */ 5043 } 5044 } 5045 5046 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5047 /* this must be SOFTLOCK S_READ fault */ 5048 ASSERT(svd->amp == NULL); 5049 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5050 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5051 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5052 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5053 /* 5054 * this must be the first ever non S_READ_NOCOW 5055 * softlock for this segment. 5056 */ 5057 ASSERT(svd->softlockcnt == 0); 5058 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5059 HAT_REGION_TEXT); 5060 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5061 } 5062 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5063 goto top; 5064 } 5065 5066 /* 5067 * We can't allow the long term use of softlocks for vmpss segments, 5068 * because in some file truncation cases we should be able to demote 5069 * the segment, which requires that there are no softlocks. The 5070 * only case where it's ok to allow a SOFTLOCK fault against a vmpss 5071 * segment is S_READ_NOCOW, where the caller holds the address space 5072 * locked as writer and calls softunlock before dropping the as lock. 5073 * S_READ_NOCOW is used by /proc to read memory from another user. 5074 * 5075 * Another deadlock between SOFTLOCK and file truncation can happen 5076 * because segvn_fault_vnodepages() calls the FS one pagesize at 5077 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages() 5078 * can cause a deadlock because the first set of page_t's remain 5079 * locked SE_SHARED. To avoid this, we demote segments on a first 5080 * SOFTLOCK if they have a length greater than the segment's 5081 * page size. 5082 * 5083 * So for now, we only avoid demoting a segment on a SOFTLOCK when 5084 * the access type is S_READ_NOCOW and the fault length is less than 5085 * or equal to the segment's page size. While this is quite restrictive, 5086 * it should be the most common case of SOFTLOCK against a vmpss 5087 * segment. 5088 * 5089 * For S_READ_NOCOW, it's safe not to do a copy on write because the 5090 * caller makes sure no COW will be caused by another thread for a 5091 * softlocked page. 5092 */ 5093 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) { 5094 int demote = 0; 5095 5096 if (rw != S_READ_NOCOW) { 5097 demote = 1; 5098 } 5099 if (!demote && len > PAGESIZE) { 5100 pgsz = page_get_pagesize(seg->s_szc); 5101 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, 5102 lpgeaddr); 5103 if (lpgeaddr - lpgaddr > pgsz) { 5104 demote = 1; 5105 } 5106 } 5107 5108 ASSERT(demote || AS_WRITE_HELD(seg->s_as)); 5109 5110 if (demote) { 5111 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5112 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5113 if (seg->s_szc != 0) { 5114 segvn_vmpss_clrszc_cnt++; 5115 ASSERT(svd->softlockcnt == 0); 5116 err = segvn_clrszc(seg); 5117 if (err) { 5118 segvn_vmpss_clrszc_err++; 5119 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5120 return (FC_MAKE_ERR(err)); 5121 } 5122 } 5123 ASSERT(seg->s_szc == 0); 5124 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5125 goto top; 5126 } 5127 } 5128 5129 /* 5130 * Check to see if we need to allocate an anon_map structure. 5131 */ 5132 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) { 5133 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5134 /* 5135 * Drop the "read" lock on the segment and acquire 5136 * the "write" version since we have to allocate the 5137 * anon_map. 5138 */ 5139 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5140 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5141 5142 if (svd->amp == NULL) { 5143 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 5144 svd->amp->a_szc = seg->s_szc; 5145 } 5146 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5147 5148 /* 5149 * Start all over again since segment protections 5150 * may have changed after we dropped the "read" lock. 5151 */ 5152 goto top; 5153 } 5154 5155 /* 5156 * S_READ_NOCOW vs S_READ distinction was 5157 * only needed for the code above. After 5158 * that we treat it as S_READ. 5159 */ 5160 if (rw == S_READ_NOCOW) { 5161 ASSERT(type == F_SOFTLOCK); 5162 ASSERT(AS_WRITE_HELD(seg->s_as)); 5163 rw = S_READ; 5164 } 5165 5166 amp = svd->amp; 5167 5168 /* 5169 * MADV_SEQUENTIAL work is ignored for large page segments. 5170 */ 5171 if (seg->s_szc != 0) { 5172 pgsz = page_get_pagesize(seg->s_szc); 5173 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 5174 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 5175 if (svd->vp == NULL) { 5176 err = segvn_fault_anonpages(hat, seg, lpgaddr, 5177 lpgeaddr, type, rw, addr, addr + len, brkcow); 5178 } else { 5179 err = segvn_fault_vnodepages(hat, seg, lpgaddr, 5180 lpgeaddr, type, rw, addr, addr + len, brkcow); 5181 if (err == IE_RETRY) { 5182 ASSERT(seg->s_szc == 0); 5183 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 5184 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5185 goto top; 5186 } 5187 } 5188 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5189 return (err); 5190 } 5191 5192 page = seg_page(seg, addr); 5193 if (amp != NULL) { 5194 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5195 anon_index = svd->anon_index + page; 5196 5197 if (type == F_PROT && rw == S_READ && 5198 svd->tr_state == SEGVN_TR_OFF && 5199 svd->type == MAP_PRIVATE && svd->pageprot == 0) { 5200 size_t index = anon_index; 5201 struct anon *ap; 5202 5203 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5204 /* 5205 * The fast path could apply to S_WRITE also, except 5206 * that the protection fault could be caused by lazy 5207 * tlb flush when ro->rw. In this case, the pte is 5208 * RW already. But RO in the other cpu's tlb causes 5209 * the fault. Since hat_chgprot won't do anything if 5210 * pte doesn't change, we may end up faulting 5211 * indefinitely until the RO tlb entry gets replaced. 5212 */ 5213 for (a = addr; a < addr + len; a += PAGESIZE, index++) { 5214 anon_array_enter(amp, index, &cookie); 5215 ap = anon_get_ptr(amp->ahp, index); 5216 anon_array_exit(&cookie); 5217 if ((ap == NULL) || (ap->an_refcnt != 1)) { 5218 ANON_LOCK_EXIT(&->a_rwlock); 5219 goto slow; 5220 } 5221 } 5222 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot); 5223 ANON_LOCK_EXIT(&->a_rwlock); 5224 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5225 return (0); 5226 } 5227 } 5228 slow: 5229 5230 if (svd->vpage == NULL) 5231 vpage = NULL; 5232 else 5233 vpage = &svd->vpage[page]; 5234 5235 off = svd->offset + (uintptr_t)(addr - seg->s_base); 5236 5237 /* 5238 * If MADV_SEQUENTIAL has been set for the particular page we 5239 * are faulting on, free behind all pages in the segment and put 5240 * them on the free list. 5241 */ 5242 5243 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) { 5244 struct vpage *vpp = NULL; 5245 ulong_t fanon_index = 0; 5246 size_t fpage; 5247 u_offset_t pgoff, fpgoff; 5248 struct vnode *fvp; 5249 struct anon *fap = NULL; 5250 5251 if (svd->advice == MADV_SEQUENTIAL || 5252 (svd->pageadvice && 5253 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) { 5254 pgoff = off - PAGESIZE; 5255 fpage = page - 1; 5256 if (vpage != NULL) 5257 vpp = &svd->vpage[fpage]; 5258 if (amp != NULL) 5259 fanon_index = svd->anon_index + fpage; 5260 5261 while (pgoff > svd->offset) { 5262 if (svd->advice != MADV_SEQUENTIAL && 5263 (!svd->pageadvice || (vpage && 5264 VPP_ADVICE(vpp) != MADV_SEQUENTIAL))) 5265 break; 5266 5267 /* 5268 * If this is an anon page, we must find the 5269 * correct <vp, offset> for it 5270 */ 5271 fap = NULL; 5272 if (amp != NULL) { 5273 ANON_LOCK_ENTER(&->a_rwlock, 5274 RW_READER); 5275 anon_array_enter(amp, fanon_index, 5276 &cookie); 5277 fap = anon_get_ptr(amp->ahp, 5278 fanon_index); 5279 if (fap != NULL) { 5280 swap_xlate(fap, &fvp, &fpgoff); 5281 } else { 5282 fpgoff = pgoff; 5283 fvp = svd->vp; 5284 } 5285 anon_array_exit(&cookie); 5286 ANON_LOCK_EXIT(&->a_rwlock); 5287 } else { 5288 fpgoff = pgoff; 5289 fvp = svd->vp; 5290 } 5291 if (fvp == NULL) 5292 break; /* XXX */ 5293 /* 5294 * Skip pages that are free or have an 5295 * "exclusive" lock. 5296 */ 5297 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED); 5298 if (pp == NULL) 5299 break; 5300 /* 5301 * We don't need the page_struct_lock to test 5302 * as this is only advisory; even if we 5303 * acquire it someone might race in and lock 5304 * the page after we unlock and before the 5305 * PUTPAGE, then VOP_PUTPAGE will do nothing. 5306 */ 5307 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) { 5308 /* 5309 * Hold the vnode before releasing 5310 * the page lock to prevent it from 5311 * being freed and re-used by some 5312 * other thread. 5313 */ 5314 VN_HOLD(fvp); 5315 page_unlock(pp); 5316 /* 5317 * We should build a page list 5318 * to kluster putpages XXX 5319 */ 5320 (void) VOP_PUTPAGE(fvp, 5321 (offset_t)fpgoff, PAGESIZE, 5322 (B_DONTNEED|B_FREE|B_ASYNC), 5323 svd->cred, NULL); 5324 VN_RELE(fvp); 5325 } else { 5326 /* 5327 * XXX - Should the loop terminate if 5328 * the page is `locked'? 5329 */ 5330 page_unlock(pp); 5331 } 5332 --vpp; 5333 --fanon_index; 5334 pgoff -= PAGESIZE; 5335 } 5336 } 5337 } 5338 5339 plp = pl; 5340 *plp = NULL; 5341 pl_alloc_sz = 0; 5342 5343 /* 5344 * See if we need to call VOP_GETPAGE for 5345 * *any* of the range being faulted on. 5346 * We can skip all of this work if there 5347 * was no original vnode. 5348 */ 5349 if (svd->vp != NULL) { 5350 u_offset_t vp_off; 5351 size_t vp_len; 5352 struct anon *ap; 5353 vnode_t *vp; 5354 5355 vp_off = off; 5356 vp_len = len; 5357 5358 if (amp == NULL) 5359 dogetpage = 1; 5360 else { 5361 /* 5362 * Only acquire reader lock to prevent amp->ahp 5363 * from being changed. It's ok to miss pages, 5364 * hence we don't do anon_array_enter 5365 */ 5366 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5367 ap = anon_get_ptr(amp->ahp, anon_index); 5368 5369 if (len <= PAGESIZE) 5370 /* inline non_anon() */ 5371 dogetpage = (ap == NULL); 5372 else 5373 dogetpage = non_anon(amp->ahp, anon_index, 5374 &vp_off, &vp_len); 5375 ANON_LOCK_EXIT(&->a_rwlock); 5376 } 5377 5378 if (dogetpage) { 5379 enum seg_rw arw; 5380 struct as *as = seg->s_as; 5381 5382 if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) { 5383 /* 5384 * Page list won't fit in local array, 5385 * allocate one of the needed size. 5386 */ 5387 pl_alloc_sz = 5388 (btop(len) + 1) * sizeof (page_t *); 5389 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP); 5390 plp[0] = NULL; 5391 plsz = len; 5392 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE || 5393 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER || 5394 (((size_t)(addr + PAGESIZE) < 5395 (size_t)(seg->s_base + seg->s_size)) && 5396 hat_probe(as->a_hat, addr + PAGESIZE))) { 5397 /* 5398 * Ask VOP_GETPAGE to return the exact number 5399 * of pages if 5400 * (a) this is a COW fault, or 5401 * (b) this is a software fault, or 5402 * (c) next page is already mapped. 5403 */ 5404 plsz = len; 5405 } else { 5406 /* 5407 * Ask VOP_GETPAGE to return adjacent pages 5408 * within the segment. 5409 */ 5410 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t) 5411 ((seg->s_base + seg->s_size) - addr)); 5412 ASSERT((addr + plsz) <= 5413 (seg->s_base + seg->s_size)); 5414 } 5415 5416 /* 5417 * Need to get some non-anonymous pages. 5418 * We need to make only one call to GETPAGE to do 5419 * this to prevent certain deadlocking conditions 5420 * when we are doing locking. In this case 5421 * non_anon() should have picked up the smallest 5422 * range which includes all the non-anonymous 5423 * pages in the requested range. We have to 5424 * be careful regarding which rw flag to pass in 5425 * because on a private mapping, the underlying 5426 * object is never allowed to be written. 5427 */ 5428 if (rw == S_WRITE && svd->type == MAP_PRIVATE) { 5429 arw = S_READ; 5430 } else { 5431 arw = rw; 5432 } 5433 vp = svd->vp; 5434 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5435 "segvn_getpage:seg %p addr %p vp %p", 5436 seg, addr, vp); 5437 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len, 5438 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw, 5439 svd->cred, NULL); 5440 if (err) { 5441 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5442 segvn_pagelist_rele(plp); 5443 if (pl_alloc_sz) 5444 kmem_free(plp, pl_alloc_sz); 5445 return (FC_MAKE_ERR(err)); 5446 } 5447 if (svd->type == MAP_PRIVATE) 5448 vpprot &= ~PROT_WRITE; 5449 } 5450 } 5451 5452 /* 5453 * N.B. at this time the plp array has all the needed non-anon 5454 * pages in addition to (possibly) having some adjacent pages. 5455 */ 5456 5457 /* 5458 * Always acquire the anon_array_lock to prevent 5459 * 2 threads from allocating separate anon slots for 5460 * the same "addr". 5461 * 5462 * If this is a copy-on-write fault and we don't already 5463 * have the anon_array_lock, acquire it to prevent the 5464 * fault routine from handling multiple copy-on-write faults 5465 * on the same "addr" in the same address space. 5466 * 5467 * Only one thread should deal with the fault since after 5468 * it is handled, the other threads can acquire a translation 5469 * to the newly created private page. This prevents two or 5470 * more threads from creating different private pages for the 5471 * same fault. 5472 * 5473 * We grab "serialization" lock here if this is a MAP_PRIVATE segment 5474 * to prevent deadlock between this thread and another thread 5475 * which has soft-locked this page and wants to acquire serial_lock. 5476 * ( bug 4026339 ) 5477 * 5478 * The fix for bug 4026339 becomes unnecessary when using the 5479 * locking scheme with per amp rwlock and a global set of hash 5480 * lock, anon_array_lock. If we steal a vnode page when low 5481 * on memory and upgrad the page lock through page_rename, 5482 * then the page is PAGE_HANDLED, nothing needs to be done 5483 * for this page after returning from segvn_faultpage. 5484 * 5485 * But really, the page lock should be downgraded after 5486 * the stolen page is page_rename'd. 5487 */ 5488 5489 if (amp != NULL) 5490 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5491 5492 /* 5493 * Ok, now loop over the address range and handle faults 5494 */ 5495 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) { 5496 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot, 5497 type, rw, brkcow); 5498 if (err) { 5499 if (amp != NULL) 5500 ANON_LOCK_EXIT(&->a_rwlock); 5501 if (type == F_SOFTLOCK && a > addr) { 5502 segvn_softunlock(seg, addr, (a - addr), 5503 S_OTHER); 5504 } 5505 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5506 segvn_pagelist_rele(plp); 5507 if (pl_alloc_sz) 5508 kmem_free(plp, pl_alloc_sz); 5509 return (err); 5510 } 5511 if (vpage) { 5512 vpage++; 5513 } else if (svd->vpage) { 5514 page = seg_page(seg, addr); 5515 vpage = &svd->vpage[++page]; 5516 } 5517 } 5518 5519 /* Didn't get pages from the underlying fs so we're done */ 5520 if (!dogetpage) 5521 goto done; 5522 5523 /* 5524 * Now handle any other pages in the list returned. 5525 * If the page can be used, load up the translations now. 5526 * Note that the for loop will only be entered if "plp" 5527 * is pointing to a non-NULL page pointer which means that 5528 * VOP_GETPAGE() was called and vpprot has been initialized. 5529 */ 5530 if (svd->pageprot == 0) 5531 prot = svd->prot & vpprot; 5532 5533 5534 /* 5535 * Large Files: diff should be unsigned value because we started 5536 * supporting > 2GB segment sizes from 2.5.1 and when a 5537 * large file of size > 2GB gets mapped to address space 5538 * the diff value can be > 2GB. 5539 */ 5540 5541 for (ppp = plp; (pp = *ppp) != NULL; ppp++) { 5542 size_t diff; 5543 struct anon *ap; 5544 int anon_index; 5545 anon_sync_obj_t cookie; 5546 int hat_flag = HAT_LOAD_ADV; 5547 5548 if (svd->flags & MAP_TEXT) { 5549 hat_flag |= HAT_LOAD_TEXT; 5550 } 5551 5552 if (pp == PAGE_HANDLED) 5553 continue; 5554 5555 if (svd->tr_state != SEGVN_TR_ON && 5556 pp->p_offset >= svd->offset && 5557 pp->p_offset < svd->offset + seg->s_size) { 5558 5559 diff = pp->p_offset - svd->offset; 5560 5561 /* 5562 * Large Files: Following is the assertion 5563 * validating the above cast. 5564 */ 5565 ASSERT(svd->vp == pp->p_vnode); 5566 5567 page = btop(diff); 5568 if (svd->pageprot) 5569 prot = VPP_PROT(&svd->vpage[page]) & vpprot; 5570 5571 /* 5572 * Prevent other threads in the address space from 5573 * creating private pages (i.e., allocating anon slots) 5574 * while we are in the process of loading translations 5575 * to additional pages returned by the underlying 5576 * object. 5577 */ 5578 if (amp != NULL) { 5579 anon_index = svd->anon_index + page; 5580 anon_array_enter(amp, anon_index, &cookie); 5581 ap = anon_get_ptr(amp->ahp, anon_index); 5582 } 5583 if ((amp == NULL) || (ap == NULL)) { 5584 if (IS_VMODSORT(pp->p_vnode) || 5585 enable_mbit_wa) { 5586 if (rw == S_WRITE) 5587 hat_setmod(pp); 5588 else if (rw != S_OTHER && 5589 !hat_ismod(pp)) 5590 prot &= ~PROT_WRITE; 5591 } 5592 /* 5593 * Skip mapping read ahead pages marked 5594 * for migration, so they will get migrated 5595 * properly on fault 5596 */ 5597 ASSERT(amp == NULL || 5598 svd->rcookie == HAT_INVALID_REGION_COOKIE); 5599 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) { 5600 hat_memload_region(hat, 5601 seg->s_base + diff, 5602 pp, prot, hat_flag, 5603 svd->rcookie); 5604 } 5605 } 5606 if (amp != NULL) 5607 anon_array_exit(&cookie); 5608 } 5609 page_unlock(pp); 5610 } 5611 done: 5612 if (amp != NULL) 5613 ANON_LOCK_EXIT(&->a_rwlock); 5614 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5615 if (pl_alloc_sz) 5616 kmem_free(plp, pl_alloc_sz); 5617 return (0); 5618 } 5619 5620 /* 5621 * This routine is used to start I/O on pages asynchronously. XXX it will 5622 * only create PAGESIZE pages. At fault time they will be relocated into 5623 * larger pages. 5624 */ 5625 static faultcode_t 5626 segvn_faulta(struct seg *seg, caddr_t addr) 5627 { 5628 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5629 int err; 5630 struct anon_map *amp; 5631 vnode_t *vp; 5632 5633 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 5634 5635 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 5636 if ((amp = svd->amp) != NULL) { 5637 struct anon *ap; 5638 5639 /* 5640 * Reader lock to prevent amp->ahp from being changed. 5641 * This is advisory, it's ok to miss a page, so 5642 * we don't do anon_array_enter lock. 5643 */ 5644 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5645 if ((ap = anon_get_ptr(amp->ahp, 5646 svd->anon_index + seg_page(seg, addr))) != NULL) { 5647 5648 err = anon_getpage(&ap, NULL, NULL, 5649 0, seg, addr, S_READ, svd->cred); 5650 5651 ANON_LOCK_EXIT(&->a_rwlock); 5652 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5653 if (err) 5654 return (FC_MAKE_ERR(err)); 5655 return (0); 5656 } 5657 ANON_LOCK_EXIT(&->a_rwlock); 5658 } 5659 5660 if (svd->vp == NULL) { 5661 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5662 return (0); /* zfod page - do nothing now */ 5663 } 5664 5665 vp = svd->vp; 5666 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5667 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp); 5668 err = VOP_GETPAGE(vp, 5669 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)), 5670 PAGESIZE, NULL, NULL, 0, seg, addr, 5671 S_OTHER, svd->cred, NULL); 5672 5673 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5674 if (err) 5675 return (FC_MAKE_ERR(err)); 5676 return (0); 5677 } 5678 5679 static int 5680 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 5681 { 5682 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5683 struct vpage *cvp, *svp = NULL, *evp = NULL; 5684 struct vnode *vp; 5685 size_t pgsz; 5686 pgcnt_t pgcnt = 0; 5687 anon_sync_obj_t cookie; 5688 int unload_done = 0; 5689 5690 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 5691 5692 if ((svd->maxprot & prot) != prot) 5693 return (EACCES); /* violated maxprot */ 5694 5695 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5696 5697 /* return if prot is the same */ 5698 if (!svd->pageprot && svd->prot == prot) { 5699 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5700 return (0); 5701 } 5702 5703 /* 5704 * Since we change protections we first have to flush the cache. 5705 * This makes sure all the pagelock calls have to recheck 5706 * protections. 5707 */ 5708 if (svd->softlockcnt > 0) { 5709 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5710 5711 /* 5712 * If this is shared segment non 0 softlockcnt 5713 * means locked pages are still in use. 5714 */ 5715 if (svd->type == MAP_SHARED) { 5716 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5717 return (EAGAIN); 5718 } 5719 5720 /* 5721 * Since we do have the segvn writers lock nobody can fill 5722 * the cache with entries belonging to this seg during 5723 * the purge. The flush either succeeds or we still have 5724 * pending I/Os. 5725 */ 5726 segvn_purge(seg); 5727 if (svd->softlockcnt > 0) { 5728 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5729 return (EAGAIN); 5730 } 5731 } 5732 5733 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5734 ASSERT(svd->amp == NULL); 5735 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5736 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5737 HAT_REGION_TEXT); 5738 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5739 unload_done = 1; 5740 } else if (svd->tr_state == SEGVN_TR_INIT) { 5741 svd->tr_state = SEGVN_TR_OFF; 5742 } else if (svd->tr_state == SEGVN_TR_ON) { 5743 ASSERT(svd->amp != NULL); 5744 segvn_textunrepl(seg, 0); 5745 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 5746 unload_done = 1; 5747 } 5748 5749 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED && 5750 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) { 5751 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 5752 segvn_inval_trcache(svd->vp); 5753 } 5754 if (seg->s_szc != 0) { 5755 int err; 5756 pgsz = page_get_pagesize(seg->s_szc); 5757 pgcnt = pgsz >> PAGESHIFT; 5758 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 5759 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 5760 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5761 ASSERT(seg->s_base != addr || seg->s_size != len); 5762 /* 5763 * If we are holding the as lock as a reader then 5764 * we need to return IE_RETRY and let the as 5765 * layer drop and re-acquire the lock as a writer. 5766 */ 5767 if (AS_READ_HELD(seg->s_as)) 5768 return (IE_RETRY); 5769 VM_STAT_ADD(segvnvmstats.demoterange[1]); 5770 if (svd->type == MAP_PRIVATE || svd->vp != NULL) { 5771 err = segvn_demote_range(seg, addr, len, 5772 SDR_END, 0); 5773 } else { 5774 uint_t szcvec = map_pgszcvec(seg->s_base, 5775 pgsz, (uintptr_t)seg->s_base, 5776 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0); 5777 err = segvn_demote_range(seg, addr, len, 5778 SDR_END, szcvec); 5779 } 5780 if (err == 0) 5781 return (IE_RETRY); 5782 if (err == ENOMEM) 5783 return (IE_NOMEM); 5784 return (err); 5785 } 5786 } 5787 5788 5789 /* 5790 * If it's a private mapping and we're making it writable then we 5791 * may have to reserve the additional swap space now. If we are 5792 * making writable only a part of the segment then we use its vpage 5793 * array to keep a record of the pages for which we have reserved 5794 * swap. In this case we set the pageswap field in the segment's 5795 * segvn structure to record this. 5796 * 5797 * If it's a private mapping to a file (i.e., vp != NULL) and we're 5798 * removing write permission on the entire segment and we haven't 5799 * modified any pages, we can release the swap space. 5800 */ 5801 if (svd->type == MAP_PRIVATE) { 5802 if (prot & PROT_WRITE) { 5803 if (!(svd->flags & MAP_NORESERVE) && 5804 !(svd->swresv && svd->pageswap == 0)) { 5805 size_t sz = 0; 5806 5807 /* 5808 * Start by determining how much swap 5809 * space is required. 5810 */ 5811 if (addr == seg->s_base && 5812 len == seg->s_size && 5813 svd->pageswap == 0) { 5814 /* The whole segment */ 5815 sz = seg->s_size; 5816 } else { 5817 /* 5818 * Make sure that the vpage array 5819 * exists, and make a note of the 5820 * range of elements corresponding 5821 * to len. 5822 */ 5823 segvn_vpage(seg); 5824 if (svd->vpage == NULL) { 5825 SEGVN_LOCK_EXIT(seg->s_as, 5826 &svd->lock); 5827 return (ENOMEM); 5828 } 5829 svp = &svd->vpage[seg_page(seg, addr)]; 5830 evp = &svd->vpage[seg_page(seg, 5831 addr + len)]; 5832 5833 if (svd->pageswap == 0) { 5834 /* 5835 * This is the first time we've 5836 * asked for a part of this 5837 * segment, so we need to 5838 * reserve everything we've 5839 * been asked for. 5840 */ 5841 sz = len; 5842 } else { 5843 /* 5844 * We have to count the number 5845 * of pages required. 5846 */ 5847 for (cvp = svp; cvp < evp; 5848 cvp++) { 5849 if (!VPP_ISSWAPRES(cvp)) 5850 sz++; 5851 } 5852 sz <<= PAGESHIFT; 5853 } 5854 } 5855 5856 /* Try to reserve the necessary swap. */ 5857 if (anon_resv_zone(sz, 5858 seg->s_as->a_proc->p_zone) == 0) { 5859 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5860 return (IE_NOMEM); 5861 } 5862 5863 /* 5864 * Make a note of how much swap space 5865 * we've reserved. 5866 */ 5867 if (svd->pageswap == 0 && sz == seg->s_size) { 5868 svd->swresv = sz; 5869 } else { 5870 ASSERT(svd->vpage != NULL); 5871 svd->swresv += sz; 5872 svd->pageswap = 1; 5873 for (cvp = svp; cvp < evp; cvp++) { 5874 if (!VPP_ISSWAPRES(cvp)) 5875 VPP_SETSWAPRES(cvp); 5876 } 5877 } 5878 } 5879 } else { 5880 /* 5881 * Swap space is released only if this segment 5882 * does not map anonymous memory, since read faults 5883 * on such segments still need an anon slot to read 5884 * in the data. 5885 */ 5886 if (svd->swresv != 0 && svd->vp != NULL && 5887 svd->amp == NULL && addr == seg->s_base && 5888 len == seg->s_size && svd->pageprot == 0) { 5889 ASSERT(svd->pageswap == 0); 5890 anon_unresv_zone(svd->swresv, 5891 seg->s_as->a_proc->p_zone); 5892 svd->swresv = 0; 5893 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 5894 "anon proc:%p %lu %u", seg, 0, 0); 5895 } 5896 } 5897 } 5898 5899 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) { 5900 if (svd->prot == prot) { 5901 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5902 return (0); /* all done */ 5903 } 5904 svd->prot = (uchar_t)prot; 5905 } else if (svd->type == MAP_PRIVATE) { 5906 struct anon *ap = NULL; 5907 page_t *pp; 5908 u_offset_t offset, off; 5909 struct anon_map *amp; 5910 ulong_t anon_idx = 0; 5911 5912 /* 5913 * A vpage structure exists or else the change does not 5914 * involve the entire segment. Establish a vpage structure 5915 * if none is there. Then, for each page in the range, 5916 * adjust its individual permissions. Note that write- 5917 * enabling a MAP_PRIVATE page can affect the claims for 5918 * locked down memory. Overcommitting memory terminates 5919 * the operation. 5920 */ 5921 segvn_vpage(seg); 5922 if (svd->vpage == NULL) { 5923 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5924 return (ENOMEM); 5925 } 5926 svd->pageprot = 1; 5927 if ((amp = svd->amp) != NULL) { 5928 anon_idx = svd->anon_index + seg_page(seg, addr); 5929 ASSERT(seg->s_szc == 0 || 5930 IS_P2ALIGNED(anon_idx, pgcnt)); 5931 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5932 } 5933 5934 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 5935 evp = &svd->vpage[seg_page(seg, addr + len)]; 5936 5937 /* 5938 * See Statement at the beginning of segvn_lockop regarding 5939 * the way cowcnts and lckcnts are handled. 5940 */ 5941 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 5942 5943 if (seg->s_szc != 0) { 5944 if (amp != NULL) { 5945 anon_array_enter(amp, anon_idx, 5946 &cookie); 5947 } 5948 if (IS_P2ALIGNED(anon_idx, pgcnt) && 5949 !segvn_claim_pages(seg, svp, offset, 5950 anon_idx, prot)) { 5951 if (amp != NULL) { 5952 anon_array_exit(&cookie); 5953 } 5954 break; 5955 } 5956 if (amp != NULL) { 5957 anon_array_exit(&cookie); 5958 } 5959 anon_idx++; 5960 } else { 5961 if (amp != NULL) { 5962 anon_array_enter(amp, anon_idx, 5963 &cookie); 5964 ap = anon_get_ptr(amp->ahp, anon_idx++); 5965 } 5966 5967 if (VPP_ISPPLOCK(svp) && 5968 VPP_PROT(svp) != prot) { 5969 5970 if (amp == NULL || ap == NULL) { 5971 vp = svd->vp; 5972 off = offset; 5973 } else 5974 swap_xlate(ap, &vp, &off); 5975 if (amp != NULL) 5976 anon_array_exit(&cookie); 5977 5978 if ((pp = page_lookup(vp, off, 5979 SE_SHARED)) == NULL) { 5980 panic("segvn_setprot: no page"); 5981 /*NOTREACHED*/ 5982 } 5983 ASSERT(seg->s_szc == 0); 5984 if ((VPP_PROT(svp) ^ prot) & 5985 PROT_WRITE) { 5986 if (prot & PROT_WRITE) { 5987 if (!page_addclaim( 5988 pp)) { 5989 page_unlock(pp); 5990 break; 5991 } 5992 } else { 5993 if (!page_subclaim( 5994 pp)) { 5995 page_unlock(pp); 5996 break; 5997 } 5998 } 5999 } 6000 page_unlock(pp); 6001 } else if (amp != NULL) 6002 anon_array_exit(&cookie); 6003 } 6004 VPP_SETPROT(svp, prot); 6005 offset += PAGESIZE; 6006 } 6007 if (amp != NULL) 6008 ANON_LOCK_EXIT(&->a_rwlock); 6009 6010 /* 6011 * Did we terminate prematurely? If so, simply unload 6012 * the translations to the things we've updated so far. 6013 */ 6014 if (svp != evp) { 6015 if (unload_done) { 6016 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6017 return (IE_NOMEM); 6018 } 6019 len = (svp - &svd->vpage[seg_page(seg, addr)]) * 6020 PAGESIZE; 6021 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz)); 6022 if (len != 0) 6023 hat_unload(seg->s_as->a_hat, addr, 6024 len, HAT_UNLOAD); 6025 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6026 return (IE_NOMEM); 6027 } 6028 } else { 6029 segvn_vpage(seg); 6030 if (svd->vpage == NULL) { 6031 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6032 return (ENOMEM); 6033 } 6034 svd->pageprot = 1; 6035 evp = &svd->vpage[seg_page(seg, addr + len)]; 6036 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 6037 VPP_SETPROT(svp, prot); 6038 } 6039 } 6040 6041 if (unload_done) { 6042 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6043 return (0); 6044 } 6045 6046 if (((prot & PROT_WRITE) != 0 && 6047 (svd->vp != NULL || svd->type == MAP_PRIVATE)) || 6048 (prot & ~PROT_USER) == PROT_NONE) { 6049 /* 6050 * Either private or shared data with write access (in 6051 * which case we need to throw out all former translations 6052 * so that we get the right translations set up on fault 6053 * and we don't allow write access to any copy-on-write pages 6054 * that might be around or to prevent write access to pages 6055 * representing holes in a file), or we don't have permission 6056 * to access the memory at all (in which case we have to 6057 * unload any current translations that might exist). 6058 */ 6059 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 6060 } else { 6061 /* 6062 * A shared mapping or a private mapping in which write 6063 * protection is going to be denied - just change all the 6064 * protections over the range of addresses in question. 6065 * segvn does not support any other attributes other 6066 * than prot so we can use hat_chgattr. 6067 */ 6068 hat_chgattr(seg->s_as->a_hat, addr, len, prot); 6069 } 6070 6071 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6072 6073 return (0); 6074 } 6075 6076 /* 6077 * segvn_setpagesize is called via SEGOP_SETPAGESIZE from as_setpagesize, 6078 * to determine if the seg is capable of mapping the requested szc. 6079 */ 6080 static int 6081 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 6082 { 6083 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6084 struct segvn_data *nsvd; 6085 struct anon_map *amp = svd->amp; 6086 struct seg *nseg; 6087 caddr_t eaddr = addr + len, a; 6088 size_t pgsz = page_get_pagesize(szc); 6089 pgcnt_t pgcnt = page_get_pagecnt(szc); 6090 int err; 6091 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base); 6092 6093 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); 6094 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6095 6096 if (seg->s_szc == szc || segvn_lpg_disable != 0) { 6097 return (0); 6098 } 6099 6100 /* 6101 * addr should always be pgsz aligned but eaddr may be misaligned if 6102 * it's at the end of the segment. 6103 * 6104 * XXX we should assert this condition since as_setpagesize() logic 6105 * guarantees it. 6106 */ 6107 if (!IS_P2ALIGNED(addr, pgsz) || 6108 (!IS_P2ALIGNED(eaddr, pgsz) && 6109 eaddr != seg->s_base + seg->s_size)) { 6110 6111 segvn_setpgsz_align_err++; 6112 return (EINVAL); 6113 } 6114 6115 if (amp != NULL && svd->type == MAP_SHARED) { 6116 ulong_t an_idx = svd->anon_index + seg_page(seg, addr); 6117 if (!IS_P2ALIGNED(an_idx, pgcnt)) { 6118 6119 segvn_setpgsz_anon_align_err++; 6120 return (EINVAL); 6121 } 6122 } 6123 6124 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas || 6125 szc > segvn_maxpgszc) { 6126 return (EINVAL); 6127 } 6128 6129 /* paranoid check */ 6130 if (svd->vp != NULL && 6131 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) { 6132 return (EINVAL); 6133 } 6134 6135 if (seg->s_szc == 0 && svd->vp != NULL && 6136 map_addr_vacalign_check(addr, off)) { 6137 return (EINVAL); 6138 } 6139 6140 /* 6141 * Check that protections are the same within new page 6142 * size boundaries. 6143 */ 6144 if (svd->pageprot) { 6145 for (a = addr; a < eaddr; a += pgsz) { 6146 if ((a + pgsz) > eaddr) { 6147 if (!sameprot(seg, a, eaddr - a)) { 6148 return (EINVAL); 6149 } 6150 } else { 6151 if (!sameprot(seg, a, pgsz)) { 6152 return (EINVAL); 6153 } 6154 } 6155 } 6156 } 6157 6158 /* 6159 * Since we are changing page size we first have to flush 6160 * the cache. This makes sure all the pagelock calls have 6161 * to recheck protections. 6162 */ 6163 if (svd->softlockcnt > 0) { 6164 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6165 6166 /* 6167 * If this is shared segment non 0 softlockcnt 6168 * means locked pages are still in use. 6169 */ 6170 if (svd->type == MAP_SHARED) { 6171 return (EAGAIN); 6172 } 6173 6174 /* 6175 * Since we do have the segvn writers lock nobody can fill 6176 * the cache with entries belonging to this seg during 6177 * the purge. The flush either succeeds or we still have 6178 * pending I/Os. 6179 */ 6180 segvn_purge(seg); 6181 if (svd->softlockcnt > 0) { 6182 return (EAGAIN); 6183 } 6184 } 6185 6186 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6187 ASSERT(svd->amp == NULL); 6188 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6189 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6190 HAT_REGION_TEXT); 6191 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6192 } else if (svd->tr_state == SEGVN_TR_INIT) { 6193 svd->tr_state = SEGVN_TR_OFF; 6194 } else if (svd->tr_state == SEGVN_TR_ON) { 6195 ASSERT(svd->amp != NULL); 6196 segvn_textunrepl(seg, 1); 6197 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6198 amp = NULL; 6199 } 6200 6201 /* 6202 * Operation for sub range of existing segment. 6203 */ 6204 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) { 6205 if (szc < seg->s_szc) { 6206 VM_STAT_ADD(segvnvmstats.demoterange[2]); 6207 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0); 6208 if (err == 0) { 6209 return (IE_RETRY); 6210 } 6211 if (err == ENOMEM) { 6212 return (IE_NOMEM); 6213 } 6214 return (err); 6215 } 6216 if (addr != seg->s_base) { 6217 nseg = segvn_split_seg(seg, addr); 6218 if (eaddr != (nseg->s_base + nseg->s_size)) { 6219 /* eaddr is szc aligned */ 6220 (void) segvn_split_seg(nseg, eaddr); 6221 } 6222 return (IE_RETRY); 6223 } 6224 if (eaddr != (seg->s_base + seg->s_size)) { 6225 /* eaddr is szc aligned */ 6226 (void) segvn_split_seg(seg, eaddr); 6227 } 6228 return (IE_RETRY); 6229 } 6230 6231 /* 6232 * Break any low level sharing and reset seg->s_szc to 0. 6233 */ 6234 if ((err = segvn_clrszc(seg)) != 0) { 6235 if (err == ENOMEM) { 6236 err = IE_NOMEM; 6237 } 6238 return (err); 6239 } 6240 ASSERT(seg->s_szc == 0); 6241 6242 /* 6243 * If the end of the current segment is not pgsz aligned 6244 * then attempt to concatenate with the next segment. 6245 */ 6246 if (!IS_P2ALIGNED(eaddr, pgsz)) { 6247 nseg = AS_SEGNEXT(seg->s_as, seg); 6248 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) { 6249 return (ENOMEM); 6250 } 6251 if (nseg->s_ops != &segvn_ops) { 6252 return (EINVAL); 6253 } 6254 nsvd = (struct segvn_data *)nseg->s_data; 6255 if (nsvd->softlockcnt > 0) { 6256 /* 6257 * If this is shared segment non 0 softlockcnt 6258 * means locked pages are still in use. 6259 */ 6260 if (nsvd->type == MAP_SHARED) { 6261 return (EAGAIN); 6262 } 6263 segvn_purge(nseg); 6264 if (nsvd->softlockcnt > 0) { 6265 return (EAGAIN); 6266 } 6267 } 6268 err = segvn_clrszc(nseg); 6269 if (err == ENOMEM) { 6270 err = IE_NOMEM; 6271 } 6272 if (err != 0) { 6273 return (err); 6274 } 6275 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6276 err = segvn_concat(seg, nseg, 1); 6277 if (err == -1) { 6278 return (EINVAL); 6279 } 6280 if (err == -2) { 6281 return (IE_NOMEM); 6282 } 6283 return (IE_RETRY); 6284 } 6285 6286 /* 6287 * May need to re-align anon array to 6288 * new szc. 6289 */ 6290 if (amp != NULL) { 6291 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) { 6292 struct anon_hdr *nahp; 6293 6294 ASSERT(svd->type == MAP_PRIVATE); 6295 6296 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6297 ASSERT(amp->refcnt == 1); 6298 nahp = anon_create(btop(amp->size), ANON_NOSLEEP); 6299 if (nahp == NULL) { 6300 ANON_LOCK_EXIT(&->a_rwlock); 6301 return (IE_NOMEM); 6302 } 6303 if (anon_copy_ptr(amp->ahp, svd->anon_index, 6304 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) { 6305 anon_release(nahp, btop(amp->size)); 6306 ANON_LOCK_EXIT(&->a_rwlock); 6307 return (IE_NOMEM); 6308 } 6309 anon_release(amp->ahp, btop(amp->size)); 6310 amp->ahp = nahp; 6311 svd->anon_index = 0; 6312 ANON_LOCK_EXIT(&->a_rwlock); 6313 } 6314 } 6315 if (svd->vp != NULL && szc != 0) { 6316 struct vattr va; 6317 u_offset_t eoffpage = svd->offset; 6318 va.va_mask = AT_SIZE; 6319 eoffpage += seg->s_size; 6320 eoffpage = btopr(eoffpage); 6321 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) { 6322 segvn_setpgsz_getattr_err++; 6323 return (EINVAL); 6324 } 6325 if (btopr(va.va_size) < eoffpage) { 6326 segvn_setpgsz_eof_err++; 6327 return (EINVAL); 6328 } 6329 if (amp != NULL) { 6330 /* 6331 * anon_fill_cow_holes() may call VOP_GETPAGE(). 6332 * don't take anon map lock here to avoid holding it 6333 * across VOP_GETPAGE() calls that may call back into 6334 * segvn for klsutering checks. We don't really need 6335 * anon map lock here since it's a private segment and 6336 * we hold as level lock as writers. 6337 */ 6338 if ((err = anon_fill_cow_holes(seg, seg->s_base, 6339 amp->ahp, svd->anon_index, svd->vp, svd->offset, 6340 seg->s_size, szc, svd->prot, svd->vpage, 6341 svd->cred)) != 0) { 6342 return (EINVAL); 6343 } 6344 } 6345 segvn_setvnode_mpss(svd->vp); 6346 } 6347 6348 if (amp != NULL) { 6349 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6350 if (svd->type == MAP_PRIVATE) { 6351 amp->a_szc = szc; 6352 } else if (szc > amp->a_szc) { 6353 amp->a_szc = szc; 6354 } 6355 ANON_LOCK_EXIT(&->a_rwlock); 6356 } 6357 6358 seg->s_szc = szc; 6359 6360 return (0); 6361 } 6362 6363 static int 6364 segvn_clrszc(struct seg *seg) 6365 { 6366 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6367 struct anon_map *amp = svd->amp; 6368 size_t pgsz; 6369 pgcnt_t pages; 6370 int err = 0; 6371 caddr_t a = seg->s_base; 6372 caddr_t ea = a + seg->s_size; 6373 ulong_t an_idx = svd->anon_index; 6374 vnode_t *vp = svd->vp; 6375 struct vpage *vpage = svd->vpage; 6376 page_t *anon_pl[1 + 1], *pp; 6377 struct anon *ap, *oldap; 6378 uint_t prot = svd->prot, vpprot; 6379 int pageflag = 0; 6380 6381 ASSERT(AS_WRITE_HELD(seg->s_as) || 6382 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 6383 ASSERT(svd->softlockcnt == 0); 6384 6385 if (vp == NULL && amp == NULL) { 6386 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6387 seg->s_szc = 0; 6388 return (0); 6389 } 6390 6391 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6392 ASSERT(svd->amp == NULL); 6393 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6394 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6395 HAT_REGION_TEXT); 6396 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6397 } else if (svd->tr_state == SEGVN_TR_ON) { 6398 ASSERT(svd->amp != NULL); 6399 segvn_textunrepl(seg, 1); 6400 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6401 amp = NULL; 6402 } else { 6403 if (svd->tr_state != SEGVN_TR_OFF) { 6404 ASSERT(svd->tr_state == SEGVN_TR_INIT); 6405 svd->tr_state = SEGVN_TR_OFF; 6406 } 6407 6408 /* 6409 * do HAT_UNLOAD_UNMAP since we are changing the pagesize. 6410 * unload argument is 0 when we are freeing the segment 6411 * and unload was already done. 6412 */ 6413 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size, 6414 HAT_UNLOAD_UNMAP); 6415 } 6416 6417 if (amp == NULL || svd->type == MAP_SHARED) { 6418 seg->s_szc = 0; 6419 return (0); 6420 } 6421 6422 pgsz = page_get_pagesize(seg->s_szc); 6423 pages = btop(pgsz); 6424 6425 /* 6426 * XXX anon rwlock is not really needed because this is a 6427 * private segment and we are writers. 6428 */ 6429 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6430 6431 for (; a < ea; a += pgsz, an_idx += pages) { 6432 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) { 6433 ASSERT(vpage != NULL || svd->pageprot == 0); 6434 if (vpage != NULL) { 6435 ASSERT(sameprot(seg, a, pgsz)); 6436 prot = VPP_PROT(vpage); 6437 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0; 6438 } 6439 if (seg->s_szc != 0) { 6440 ASSERT(vp == NULL || anon_pages(amp->ahp, 6441 an_idx, pages) == pages); 6442 if ((err = anon_map_demotepages(amp, an_idx, 6443 seg, a, prot, vpage, svd->cred)) != 0) { 6444 goto out; 6445 } 6446 } else { 6447 if (oldap->an_refcnt == 1) { 6448 continue; 6449 } 6450 if ((err = anon_getpage(&oldap, &vpprot, 6451 anon_pl, PAGESIZE, seg, a, S_READ, 6452 svd->cred))) { 6453 goto out; 6454 } 6455 if ((pp = anon_private(&ap, seg, a, prot, 6456 anon_pl[0], pageflag, svd->cred)) == NULL) { 6457 err = ENOMEM; 6458 goto out; 6459 } 6460 anon_decref(oldap); 6461 (void) anon_set_ptr(amp->ahp, an_idx, ap, 6462 ANON_SLEEP); 6463 page_unlock(pp); 6464 } 6465 } 6466 vpage = (vpage == NULL) ? NULL : vpage + pages; 6467 } 6468 6469 amp->a_szc = 0; 6470 seg->s_szc = 0; 6471 out: 6472 ANON_LOCK_EXIT(&->a_rwlock); 6473 return (err); 6474 } 6475 6476 static int 6477 segvn_claim_pages( 6478 struct seg *seg, 6479 struct vpage *svp, 6480 u_offset_t off, 6481 ulong_t anon_idx, 6482 uint_t prot) 6483 { 6484 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6485 size_t ppasize = (pgcnt + 1) * sizeof (page_t *); 6486 page_t **ppa; 6487 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6488 struct anon_map *amp = svd->amp; 6489 struct vpage *evp = svp + pgcnt; 6490 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT) 6491 + seg->s_base; 6492 struct anon *ap; 6493 struct vnode *vp = svd->vp; 6494 page_t *pp; 6495 pgcnt_t pg_idx, i; 6496 int err = 0; 6497 anoff_t aoff; 6498 int anon = (amp != NULL) ? 1 : 0; 6499 6500 ASSERT(svd->type == MAP_PRIVATE); 6501 ASSERT(svd->vpage != NULL); 6502 ASSERT(seg->s_szc != 0); 6503 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 6504 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt)); 6505 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT)); 6506 6507 if (VPP_PROT(svp) == prot) 6508 return (1); 6509 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE)) 6510 return (1); 6511 6512 ppa = kmem_alloc(ppasize, KM_SLEEP); 6513 if (anon && vp != NULL) { 6514 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) { 6515 anon = 0; 6516 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt)); 6517 } 6518 ASSERT(!anon || 6519 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt); 6520 } 6521 6522 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) { 6523 if (!VPP_ISPPLOCK(svp)) 6524 continue; 6525 if (anon) { 6526 ap = anon_get_ptr(amp->ahp, anon_idx); 6527 if (ap == NULL) { 6528 panic("segvn_claim_pages: no anon slot"); 6529 } 6530 swap_xlate(ap, &vp, &aoff); 6531 off = (u_offset_t)aoff; 6532 } 6533 ASSERT(vp != NULL); 6534 if ((pp = page_lookup(vp, 6535 (u_offset_t)off, SE_SHARED)) == NULL) { 6536 panic("segvn_claim_pages: no page"); 6537 } 6538 ppa[pg_idx++] = pp; 6539 off += PAGESIZE; 6540 } 6541 6542 if (ppa[0] == NULL) { 6543 kmem_free(ppa, ppasize); 6544 return (1); 6545 } 6546 6547 ASSERT(pg_idx <= pgcnt); 6548 ppa[pg_idx] = NULL; 6549 6550 6551 /* Find each large page within ppa, and adjust its claim */ 6552 6553 /* Does ppa cover a single large page? */ 6554 if (ppa[0]->p_szc == seg->s_szc) { 6555 if (prot & PROT_WRITE) 6556 err = page_addclaim_pages(ppa); 6557 else 6558 err = page_subclaim_pages(ppa); 6559 } else { 6560 for (i = 0; ppa[i]; i += pgcnt) { 6561 ASSERT(IS_P2ALIGNED(page_pptonum(ppa[i]), pgcnt)); 6562 if (prot & PROT_WRITE) 6563 err = page_addclaim_pages(&ppa[i]); 6564 else 6565 err = page_subclaim_pages(&ppa[i]); 6566 if (err == 0) 6567 break; 6568 } 6569 } 6570 6571 for (i = 0; i < pg_idx; i++) { 6572 ASSERT(ppa[i] != NULL); 6573 page_unlock(ppa[i]); 6574 } 6575 6576 kmem_free(ppa, ppasize); 6577 return (err); 6578 } 6579 6580 /* 6581 * Returns right (upper address) segment if split occurred. 6582 * If the address is equal to the beginning or end of its segment it returns 6583 * the current segment. 6584 */ 6585 static struct seg * 6586 segvn_split_seg(struct seg *seg, caddr_t addr) 6587 { 6588 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6589 struct seg *nseg; 6590 size_t nsize; 6591 struct segvn_data *nsvd; 6592 6593 ASSERT(AS_WRITE_HELD(seg->s_as)); 6594 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6595 6596 ASSERT(addr >= seg->s_base); 6597 ASSERT(addr <= seg->s_base + seg->s_size); 6598 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6599 6600 if (addr == seg->s_base || addr == seg->s_base + seg->s_size) 6601 return (seg); 6602 6603 nsize = seg->s_base + seg->s_size - addr; 6604 seg->s_size = addr - seg->s_base; 6605 nseg = seg_alloc(seg->s_as, addr, nsize); 6606 ASSERT(nseg != NULL); 6607 nseg->s_ops = seg->s_ops; 6608 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 6609 nseg->s_data = (void *)nsvd; 6610 nseg->s_szc = seg->s_szc; 6611 *nsvd = *svd; 6612 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6613 nsvd->seg = nseg; 6614 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL); 6615 6616 if (nsvd->vp != NULL) { 6617 VN_HOLD(nsvd->vp); 6618 nsvd->offset = svd->offset + 6619 (uintptr_t)(nseg->s_base - seg->s_base); 6620 if (nsvd->type == MAP_SHARED) 6621 lgrp_shm_policy_init(NULL, nsvd->vp); 6622 } else { 6623 /* 6624 * The offset for an anonymous segment has no signifigance in 6625 * terms of an offset into a file. If we were to use the above 6626 * calculation instead, the structures read out of 6627 * /proc/<pid>/xmap would be more difficult to decipher since 6628 * it would be unclear whether two seemingly contiguous 6629 * prxmap_t structures represented different segments or a 6630 * single segment that had been split up into multiple prxmap_t 6631 * structures (e.g. if some part of the segment had not yet 6632 * been faulted in). 6633 */ 6634 nsvd->offset = 0; 6635 } 6636 6637 ASSERT(svd->softlockcnt == 0); 6638 ASSERT(svd->softlockcnt_sbase == 0); 6639 ASSERT(svd->softlockcnt_send == 0); 6640 crhold(svd->cred); 6641 6642 if (svd->vpage != NULL) { 6643 size_t bytes = vpgtob(seg_pages(seg)); 6644 size_t nbytes = vpgtob(seg_pages(nseg)); 6645 struct vpage *ovpage = svd->vpage; 6646 6647 svd->vpage = kmem_alloc(bytes, KM_SLEEP); 6648 bcopy(ovpage, svd->vpage, bytes); 6649 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 6650 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes); 6651 kmem_free(ovpage, bytes + nbytes); 6652 } 6653 if (svd->amp != NULL && svd->type == MAP_PRIVATE) { 6654 struct anon_map *oamp = svd->amp, *namp; 6655 struct anon_hdr *nahp; 6656 6657 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER); 6658 ASSERT(oamp->refcnt == 1); 6659 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 6660 (void) anon_copy_ptr(oamp->ahp, svd->anon_index, 6661 nahp, 0, btop(seg->s_size), ANON_SLEEP); 6662 6663 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 6664 namp->a_szc = nseg->s_szc; 6665 (void) anon_copy_ptr(oamp->ahp, 6666 svd->anon_index + btop(seg->s_size), 6667 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 6668 anon_release(oamp->ahp, btop(oamp->size)); 6669 oamp->ahp = nahp; 6670 oamp->size = seg->s_size; 6671 svd->anon_index = 0; 6672 nsvd->amp = namp; 6673 nsvd->anon_index = 0; 6674 ANON_LOCK_EXIT(&oamp->a_rwlock); 6675 } else if (svd->amp != NULL) { 6676 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6677 ASSERT(svd->amp == nsvd->amp); 6678 ASSERT(seg->s_szc <= svd->amp->a_szc); 6679 nsvd->anon_index = svd->anon_index + seg_pages(seg); 6680 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt)); 6681 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER); 6682 svd->amp->refcnt++; 6683 ANON_LOCK_EXIT(&svd->amp->a_rwlock); 6684 } 6685 6686 /* 6687 * Split the amount of swap reserved. 6688 */ 6689 if (svd->swresv) { 6690 /* 6691 * For MAP_NORESERVE, only allocate swap reserve for pages 6692 * being used. Other segments get enough to cover whole 6693 * segment. 6694 */ 6695 if (svd->flags & MAP_NORESERVE) { 6696 size_t oswresv; 6697 6698 ASSERT(svd->amp); 6699 oswresv = svd->swresv; 6700 svd->swresv = ptob(anon_pages(svd->amp->ahp, 6701 svd->anon_index, btop(seg->s_size))); 6702 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 6703 nsvd->anon_index, btop(nseg->s_size))); 6704 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 6705 } else { 6706 if (svd->pageswap) { 6707 svd->swresv = segvn_count_swap_by_vpages(seg); 6708 ASSERT(nsvd->swresv >= svd->swresv); 6709 nsvd->swresv -= svd->swresv; 6710 } else { 6711 ASSERT(svd->swresv == seg->s_size + 6712 nseg->s_size); 6713 svd->swresv = seg->s_size; 6714 nsvd->swresv = nseg->s_size; 6715 } 6716 } 6717 } 6718 6719 return (nseg); 6720 } 6721 6722 /* 6723 * called on memory operations (unmap, setprot, setpagesize) for a subset 6724 * of a large page segment to either demote the memory range (SDR_RANGE) 6725 * or the ends (SDR_END) by addr/len. 6726 * 6727 * returns 0 on success. returns errno, including ENOMEM, on failure. 6728 */ 6729 static int 6730 segvn_demote_range( 6731 struct seg *seg, 6732 caddr_t addr, 6733 size_t len, 6734 int flag, 6735 uint_t szcvec) 6736 { 6737 caddr_t eaddr = addr + len; 6738 caddr_t lpgaddr, lpgeaddr; 6739 struct seg *nseg; 6740 struct seg *badseg1 = NULL; 6741 struct seg *badseg2 = NULL; 6742 size_t pgsz; 6743 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6744 int err; 6745 uint_t szc = seg->s_szc; 6746 uint_t tszcvec; 6747 6748 ASSERT(AS_WRITE_HELD(seg->s_as)); 6749 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6750 ASSERT(szc != 0); 6751 pgsz = page_get_pagesize(szc); 6752 ASSERT(seg->s_base != addr || seg->s_size != len); 6753 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6754 ASSERT(svd->softlockcnt == 0); 6755 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6756 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED)); 6757 6758 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 6759 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr); 6760 if (flag == SDR_RANGE) { 6761 /* demote entire range */ 6762 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6763 (void) segvn_split_seg(nseg, lpgeaddr); 6764 ASSERT(badseg1->s_base == lpgaddr); 6765 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr); 6766 } else if (addr != lpgaddr) { 6767 ASSERT(flag == SDR_END); 6768 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6769 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz && 6770 eaddr < lpgaddr + 2 * pgsz) { 6771 (void) segvn_split_seg(nseg, lpgeaddr); 6772 ASSERT(badseg1->s_base == lpgaddr); 6773 ASSERT(badseg1->s_size == 2 * pgsz); 6774 } else { 6775 nseg = segvn_split_seg(nseg, lpgaddr + pgsz); 6776 ASSERT(badseg1->s_base == lpgaddr); 6777 ASSERT(badseg1->s_size == pgsz); 6778 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) { 6779 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz); 6780 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz); 6781 badseg2 = nseg; 6782 (void) segvn_split_seg(nseg, lpgeaddr); 6783 ASSERT(badseg2->s_base == lpgeaddr - pgsz); 6784 ASSERT(badseg2->s_size == pgsz); 6785 } 6786 } 6787 } else { 6788 ASSERT(flag == SDR_END); 6789 ASSERT(eaddr < lpgeaddr); 6790 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz); 6791 (void) segvn_split_seg(nseg, lpgeaddr); 6792 ASSERT(badseg1->s_base == lpgeaddr - pgsz); 6793 ASSERT(badseg1->s_size == pgsz); 6794 } 6795 6796 ASSERT(badseg1 != NULL); 6797 ASSERT(badseg1->s_szc == szc); 6798 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz || 6799 badseg1->s_size == 2 * pgsz); 6800 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz)); 6801 ASSERT(badseg1->s_size == pgsz || 6802 sameprot(badseg1, badseg1->s_base + pgsz, pgsz)); 6803 if (err = segvn_clrszc(badseg1)) { 6804 return (err); 6805 } 6806 ASSERT(badseg1->s_szc == 0); 6807 6808 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6809 uint_t tszc = highbit(tszcvec) - 1; 6810 caddr_t ta = MAX(addr, badseg1->s_base); 6811 caddr_t te; 6812 size_t tpgsz = page_get_pagesize(tszc); 6813 6814 ASSERT(svd->type == MAP_SHARED); 6815 ASSERT(flag == SDR_END); 6816 ASSERT(tszc < szc && tszc > 0); 6817 6818 if (eaddr > badseg1->s_base + badseg1->s_size) { 6819 te = badseg1->s_base + badseg1->s_size; 6820 } else { 6821 te = eaddr; 6822 } 6823 6824 ASSERT(ta <= te); 6825 badseg1->s_szc = tszc; 6826 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) { 6827 if (badseg2 != NULL) { 6828 err = segvn_demote_range(badseg1, ta, te - ta, 6829 SDR_END, tszcvec); 6830 if (err != 0) { 6831 return (err); 6832 } 6833 } else { 6834 return (segvn_demote_range(badseg1, ta, 6835 te - ta, SDR_END, tszcvec)); 6836 } 6837 } 6838 } 6839 6840 if (badseg2 == NULL) 6841 return (0); 6842 ASSERT(badseg2->s_szc == szc); 6843 ASSERT(badseg2->s_size == pgsz); 6844 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size)); 6845 if (err = segvn_clrszc(badseg2)) { 6846 return (err); 6847 } 6848 ASSERT(badseg2->s_szc == 0); 6849 6850 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6851 uint_t tszc = highbit(tszcvec) - 1; 6852 size_t tpgsz = page_get_pagesize(tszc); 6853 6854 ASSERT(svd->type == MAP_SHARED); 6855 ASSERT(flag == SDR_END); 6856 ASSERT(tszc < szc && tszc > 0); 6857 ASSERT(badseg2->s_base > addr); 6858 ASSERT(eaddr > badseg2->s_base); 6859 ASSERT(eaddr < badseg2->s_base + badseg2->s_size); 6860 6861 badseg2->s_szc = tszc; 6862 if (!IS_P2ALIGNED(eaddr, tpgsz)) { 6863 return (segvn_demote_range(badseg2, badseg2->s_base, 6864 eaddr - badseg2->s_base, SDR_END, tszcvec)); 6865 } 6866 } 6867 6868 return (0); 6869 } 6870 6871 static int 6872 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 6873 { 6874 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6875 struct vpage *vp, *evp; 6876 6877 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 6878 6879 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6880 /* 6881 * If segment protection can be used, simply check against them. 6882 */ 6883 if (svd->pageprot == 0) { 6884 int err; 6885 6886 err = ((svd->prot & prot) != prot) ? EACCES : 0; 6887 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6888 return (err); 6889 } 6890 6891 /* 6892 * Have to check down to the vpage level. 6893 */ 6894 evp = &svd->vpage[seg_page(seg, addr + len)]; 6895 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) { 6896 if ((VPP_PROT(vp) & prot) != prot) { 6897 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6898 return (EACCES); 6899 } 6900 } 6901 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6902 return (0); 6903 } 6904 6905 static int 6906 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 6907 { 6908 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6909 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1; 6910 6911 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 6912 6913 if (pgno != 0) { 6914 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6915 if (svd->pageprot == 0) { 6916 do { 6917 protv[--pgno] = svd->prot; 6918 } while (pgno != 0); 6919 } else { 6920 size_t pgoff = seg_page(seg, addr); 6921 6922 do { 6923 pgno--; 6924 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]); 6925 } while (pgno != 0); 6926 } 6927 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6928 } 6929 return (0); 6930 } 6931 6932 static u_offset_t 6933 segvn_getoffset(struct seg *seg, caddr_t addr) 6934 { 6935 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6936 6937 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 6938 6939 return (svd->offset + (uintptr_t)(addr - seg->s_base)); 6940 } 6941 6942 /*ARGSUSED*/ 6943 static int 6944 segvn_gettype(struct seg *seg, caddr_t addr) 6945 { 6946 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6947 6948 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 6949 6950 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT | 6951 MAP_INITDATA))); 6952 } 6953 6954 /*ARGSUSED*/ 6955 static int 6956 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 6957 { 6958 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6959 6960 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 6961 6962 *vpp = svd->vp; 6963 return (0); 6964 } 6965 6966 /* 6967 * Check to see if it makes sense to do kluster/read ahead to 6968 * addr + delta relative to the mapping at addr. We assume here 6969 * that delta is a signed PAGESIZE'd multiple (which can be negative). 6970 * 6971 * For segvn, we currently "approve" of the action if we are 6972 * still in the segment and it maps from the same vp/off, 6973 * or if the advice stored in segvn_data or vpages allows it. 6974 * Currently, klustering is not allowed only if MADV_RANDOM is set. 6975 */ 6976 static int 6977 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 6978 { 6979 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6980 struct anon *oap, *ap; 6981 ssize_t pd; 6982 size_t page; 6983 struct vnode *vp1, *vp2; 6984 u_offset_t off1, off2; 6985 struct anon_map *amp; 6986 6987 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 6988 ASSERT(AS_WRITE_HELD(seg->s_as) || 6989 SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 6990 6991 if (addr + delta < seg->s_base || 6992 addr + delta >= (seg->s_base + seg->s_size)) 6993 return (-1); /* exceeded segment bounds */ 6994 6995 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */ 6996 page = seg_page(seg, addr); 6997 6998 /* 6999 * Check to see if either of the pages addr or addr + delta 7000 * have advice set that prevents klustering (if MADV_RANDOM advice 7001 * is set for entire segment, or MADV_SEQUENTIAL is set and delta 7002 * is negative). 7003 */ 7004 if (svd->advice == MADV_RANDOM || 7005 svd->advice == MADV_SEQUENTIAL && delta < 0) 7006 return (-1); 7007 else if (svd->pageadvice && svd->vpage) { 7008 struct vpage *bvpp, *evpp; 7009 7010 bvpp = &svd->vpage[page]; 7011 evpp = &svd->vpage[page + pd]; 7012 if (VPP_ADVICE(bvpp) == MADV_RANDOM || 7013 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0) 7014 return (-1); 7015 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) && 7016 VPP_ADVICE(evpp) == MADV_RANDOM) 7017 return (-1); 7018 } 7019 7020 if (svd->type == MAP_SHARED) 7021 return (0); /* shared mapping - all ok */ 7022 7023 if ((amp = svd->amp) == NULL) 7024 return (0); /* off original vnode */ 7025 7026 page += svd->anon_index; 7027 7028 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7029 7030 oap = anon_get_ptr(amp->ahp, page); 7031 ap = anon_get_ptr(amp->ahp, page + pd); 7032 7033 ANON_LOCK_EXIT(&->a_rwlock); 7034 7035 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) { 7036 return (-1); /* one with and one without an anon */ 7037 } 7038 7039 if (oap == NULL) { /* implies that ap == NULL */ 7040 return (0); /* off original vnode */ 7041 } 7042 7043 /* 7044 * Now we know we have two anon pointers - check to 7045 * see if they happen to be properly allocated. 7046 */ 7047 7048 /* 7049 * XXX We cheat here and don't lock the anon slots. We can't because 7050 * we may have been called from the anon layer which might already 7051 * have locked them. We are holding a refcnt on the slots so they 7052 * can't disappear. The worst that will happen is we'll get the wrong 7053 * names (vp, off) for the slots and make a poor klustering decision. 7054 */ 7055 swap_xlate(ap, &vp1, &off1); 7056 swap_xlate(oap, &vp2, &off2); 7057 7058 7059 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta) 7060 return (-1); 7061 return (0); 7062 } 7063 7064 /* 7065 * Swap the pages of seg out to secondary storage, returning the 7066 * number of bytes of storage freed. 7067 * 7068 * The basic idea is first to unload all translations and then to call 7069 * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the 7070 * swap device. Pages to which other segments have mappings will remain 7071 * mapped and won't be swapped. Our caller (as_swapout) has already 7072 * performed the unloading step. 7073 * 7074 * The value returned is intended to correlate well with the process's 7075 * memory requirements. However, there are some caveats: 7076 * 1) When given a shared segment as argument, this routine will 7077 * only succeed in swapping out pages for the last sharer of the 7078 * segment. (Previous callers will only have decremented mapping 7079 * reference counts.) 7080 * 2) We assume that the hat layer maintains a large enough translation 7081 * cache to capture process reference patterns. 7082 */ 7083 static size_t 7084 segvn_swapout(struct seg *seg) 7085 { 7086 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7087 struct anon_map *amp; 7088 pgcnt_t pgcnt = 0; 7089 pgcnt_t npages; 7090 pgcnt_t page; 7091 ulong_t anon_index; 7092 7093 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 7094 7095 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7096 /* 7097 * Find pages unmapped by our caller and force them 7098 * out to the virtual swap device. 7099 */ 7100 if ((amp = svd->amp) != NULL) 7101 anon_index = svd->anon_index; 7102 npages = seg->s_size >> PAGESHIFT; 7103 for (page = 0; page < npages; page++) { 7104 page_t *pp; 7105 struct anon *ap; 7106 struct vnode *vp; 7107 u_offset_t off; 7108 anon_sync_obj_t cookie; 7109 7110 /* 7111 * Obtain <vp, off> pair for the page, then look it up. 7112 * 7113 * Note that this code is willing to consider regular 7114 * pages as well as anon pages. Is this appropriate here? 7115 */ 7116 ap = NULL; 7117 if (amp != NULL) { 7118 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7119 if (anon_array_try_enter(amp, anon_index + page, 7120 &cookie)) { 7121 ANON_LOCK_EXIT(&->a_rwlock); 7122 continue; 7123 } 7124 ap = anon_get_ptr(amp->ahp, anon_index + page); 7125 if (ap != NULL) { 7126 swap_xlate(ap, &vp, &off); 7127 } else { 7128 vp = svd->vp; 7129 off = svd->offset + ptob(page); 7130 } 7131 anon_array_exit(&cookie); 7132 ANON_LOCK_EXIT(&->a_rwlock); 7133 } else { 7134 vp = svd->vp; 7135 off = svd->offset + ptob(page); 7136 } 7137 if (vp == NULL) { /* untouched zfod page */ 7138 ASSERT(ap == NULL); 7139 continue; 7140 } 7141 7142 pp = page_lookup_nowait(vp, off, SE_SHARED); 7143 if (pp == NULL) 7144 continue; 7145 7146 7147 /* 7148 * Examine the page to see whether it can be tossed out, 7149 * keeping track of how many we've found. 7150 */ 7151 if (!page_tryupgrade(pp)) { 7152 /* 7153 * If the page has an i/o lock and no mappings, 7154 * it's very likely that the page is being 7155 * written out as a result of klustering. 7156 * Assume this is so and take credit for it here. 7157 */ 7158 if (!page_io_trylock(pp)) { 7159 if (!hat_page_is_mapped(pp)) 7160 pgcnt++; 7161 } else { 7162 page_io_unlock(pp); 7163 } 7164 page_unlock(pp); 7165 continue; 7166 } 7167 ASSERT(!page_iolock_assert(pp)); 7168 7169 7170 /* 7171 * Skip if page is locked or has mappings. 7172 * We don't need the page_struct_lock to look at lckcnt 7173 * and cowcnt because the page is exclusive locked. 7174 */ 7175 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 7176 hat_page_is_mapped(pp)) { 7177 page_unlock(pp); 7178 continue; 7179 } 7180 7181 /* 7182 * dispose skips large pages so try to demote first. 7183 */ 7184 if (pp->p_szc != 0 && !page_try_demote_pages(pp)) { 7185 page_unlock(pp); 7186 /* 7187 * XXX should skip the remaining page_t's of this 7188 * large page. 7189 */ 7190 continue; 7191 } 7192 7193 ASSERT(pp->p_szc == 0); 7194 7195 /* 7196 * No longer mapped -- we can toss it out. How 7197 * we do so depends on whether or not it's dirty. 7198 */ 7199 if (hat_ismod(pp) && pp->p_vnode) { 7200 /* 7201 * We must clean the page before it can be 7202 * freed. Setting B_FREE will cause pvn_done 7203 * to free the page when the i/o completes. 7204 * XXX: This also causes it to be accounted 7205 * as a pageout instead of a swap: need 7206 * B_SWAPOUT bit to use instead of B_FREE. 7207 * 7208 * Hold the vnode before releasing the page lock 7209 * to prevent it from being freed and re-used by 7210 * some other thread. 7211 */ 7212 VN_HOLD(vp); 7213 page_unlock(pp); 7214 7215 /* 7216 * Queue all i/o requests for the pageout thread 7217 * to avoid saturating the pageout devices. 7218 */ 7219 if (!queue_io_request(vp, off)) 7220 VN_RELE(vp); 7221 } else { 7222 /* 7223 * The page was clean, free it. 7224 * 7225 * XXX: Can we ever encounter modified pages 7226 * with no associated vnode here? 7227 */ 7228 ASSERT(pp->p_vnode != NULL); 7229 /*LINTED: constant in conditional context*/ 7230 VN_DISPOSE(pp, B_FREE, 0, kcred); 7231 } 7232 7233 /* 7234 * Credit now even if i/o is in progress. 7235 */ 7236 pgcnt++; 7237 } 7238 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7239 7240 /* 7241 * Wakeup pageout to initiate i/o on all queued requests. 7242 */ 7243 cv_signal_pageout(); 7244 return (ptob(pgcnt)); 7245 } 7246 7247 /* 7248 * Synchronize primary storage cache with real object in virtual memory. 7249 * 7250 * XXX - Anonymous pages should not be sync'ed out at all. 7251 */ 7252 static int 7253 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) 7254 { 7255 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7256 struct vpage *vpp; 7257 page_t *pp; 7258 u_offset_t offset; 7259 struct vnode *vp; 7260 u_offset_t off; 7261 caddr_t eaddr; 7262 int bflags; 7263 int err = 0; 7264 int segtype; 7265 int pageprot = 0; 7266 int prot; 7267 ulong_t anon_index = 0; 7268 struct anon_map *amp; 7269 struct anon *ap; 7270 anon_sync_obj_t cookie; 7271 7272 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 7273 7274 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7275 7276 if (svd->softlockcnt > 0) { 7277 /* 7278 * If this is shared segment non 0 softlockcnt 7279 * means locked pages are still in use. 7280 */ 7281 if (svd->type == MAP_SHARED) { 7282 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7283 return (EAGAIN); 7284 } 7285 7286 /* 7287 * flush all pages from seg cache 7288 * otherwise we may deadlock in swap_putpage 7289 * for B_INVAL page (4175402). 7290 * 7291 * Even if we grab segvn WRITER's lock 7292 * here, there might be another thread which could've 7293 * successfully performed lookup/insert just before 7294 * we acquired the lock here. So, grabbing either 7295 * lock here is of not much use. Until we devise 7296 * a strategy at upper layers to solve the 7297 * synchronization issues completely, we expect 7298 * applications to handle this appropriately. 7299 */ 7300 segvn_purge(seg); 7301 if (svd->softlockcnt > 0) { 7302 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7303 return (EAGAIN); 7304 } 7305 } else if (svd->type == MAP_SHARED && svd->amp != NULL && 7306 svd->amp->a_softlockcnt > 0) { 7307 /* 7308 * Try to purge this amp's entries from pcache. It will 7309 * succeed only if other segments that share the amp have no 7310 * outstanding softlock's. 7311 */ 7312 segvn_purge(seg); 7313 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) { 7314 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7315 return (EAGAIN); 7316 } 7317 } 7318 7319 vpp = svd->vpage; 7320 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7321 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) | 7322 ((flags & MS_INVALIDATE) ? B_INVAL : 0); 7323 7324 if (attr) { 7325 pageprot = attr & ~(SHARED|PRIVATE); 7326 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE; 7327 7328 /* 7329 * We are done if the segment types don't match 7330 * or if we have segment level protections and 7331 * they don't match. 7332 */ 7333 if (svd->type != segtype) { 7334 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7335 return (0); 7336 } 7337 if (vpp == NULL) { 7338 if (svd->prot != pageprot) { 7339 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7340 return (0); 7341 } 7342 prot = svd->prot; 7343 } else 7344 vpp = &svd->vpage[seg_page(seg, addr)]; 7345 7346 } else if (svd->vp && svd->amp == NULL && 7347 (flags & MS_INVALIDATE) == 0) { 7348 7349 /* 7350 * No attributes, no anonymous pages and MS_INVALIDATE flag 7351 * is not on, just use one big request. 7352 */ 7353 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len, 7354 bflags, svd->cred, NULL); 7355 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7356 return (err); 7357 } 7358 7359 if ((amp = svd->amp) != NULL) 7360 anon_index = svd->anon_index + seg_page(seg, addr); 7361 7362 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) { 7363 ap = NULL; 7364 if (amp != NULL) { 7365 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7366 anon_array_enter(amp, anon_index, &cookie); 7367 ap = anon_get_ptr(amp->ahp, anon_index++); 7368 if (ap != NULL) { 7369 swap_xlate(ap, &vp, &off); 7370 } else { 7371 vp = svd->vp; 7372 off = offset; 7373 } 7374 anon_array_exit(&cookie); 7375 ANON_LOCK_EXIT(&->a_rwlock); 7376 } else { 7377 vp = svd->vp; 7378 off = offset; 7379 } 7380 offset += PAGESIZE; 7381 7382 if (vp == NULL) /* untouched zfod page */ 7383 continue; 7384 7385 if (attr) { 7386 if (vpp) { 7387 prot = VPP_PROT(vpp); 7388 vpp++; 7389 } 7390 if (prot != pageprot) { 7391 continue; 7392 } 7393 } 7394 7395 /* 7396 * See if any of these pages are locked -- if so, then we 7397 * will have to truncate an invalidate request at the first 7398 * locked one. We don't need the page_struct_lock to test 7399 * as this is only advisory; even if we acquire it someone 7400 * might race in and lock the page after we unlock and before 7401 * we do the PUTPAGE, then PUTPAGE simply does nothing. 7402 */ 7403 if (flags & MS_INVALIDATE) { 7404 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) { 7405 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 7406 page_unlock(pp); 7407 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7408 return (EBUSY); 7409 } 7410 if (ap != NULL && pp->p_szc != 0 && 7411 page_tryupgrade(pp)) { 7412 if (pp->p_lckcnt == 0 && 7413 pp->p_cowcnt == 0) { 7414 /* 7415 * swapfs VN_DISPOSE() won't 7416 * invalidate large pages. 7417 * Attempt to demote. 7418 * XXX can't help it if it 7419 * fails. But for swapfs 7420 * pages it is no big deal. 7421 */ 7422 (void) page_try_demote_pages( 7423 pp); 7424 } 7425 } 7426 page_unlock(pp); 7427 } 7428 } else if (svd->type == MAP_SHARED && amp != NULL) { 7429 /* 7430 * Avoid writing out to disk ISM's large pages 7431 * because segspt_free_pages() relies on NULL an_pvp 7432 * of anon slots of such pages. 7433 */ 7434 7435 ASSERT(svd->vp == NULL); 7436 /* 7437 * swapfs uses page_lookup_nowait if not freeing or 7438 * invalidating and skips a page if 7439 * page_lookup_nowait returns NULL. 7440 */ 7441 pp = page_lookup_nowait(vp, off, SE_SHARED); 7442 if (pp == NULL) { 7443 continue; 7444 } 7445 if (pp->p_szc != 0) { 7446 page_unlock(pp); 7447 continue; 7448 } 7449 7450 /* 7451 * Note ISM pages are created large so (vp, off)'s 7452 * page cannot suddenly become large after we unlock 7453 * pp. 7454 */ 7455 page_unlock(pp); 7456 } 7457 /* 7458 * XXX - Should ultimately try to kluster 7459 * calls to VOP_PUTPAGE() for performance. 7460 */ 7461 VN_HOLD(vp); 7462 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 7463 (bflags | (IS_SWAPFSVP(vp) ? B_PAGE_NOWAIT : 0)), 7464 svd->cred, NULL); 7465 7466 VN_RELE(vp); 7467 if (err) 7468 break; 7469 } 7470 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7471 return (err); 7472 } 7473 7474 /* 7475 * Determine if we have data corresponding to pages in the 7476 * primary storage virtual memory cache (i.e., "in core"). 7477 */ 7478 static size_t 7479 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec) 7480 { 7481 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7482 struct vnode *vp, *avp; 7483 u_offset_t offset, aoffset; 7484 size_t p, ep; 7485 int ret; 7486 struct vpage *vpp; 7487 page_t *pp; 7488 uint_t start; 7489 struct anon_map *amp; /* XXX - for locknest */ 7490 struct anon *ap; 7491 uint_t attr; 7492 anon_sync_obj_t cookie; 7493 7494 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 7495 7496 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7497 if (svd->amp == NULL && svd->vp == NULL) { 7498 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7499 bzero(vec, btopr(len)); 7500 return (len); /* no anonymous pages created yet */ 7501 } 7502 7503 p = seg_page(seg, addr); 7504 ep = seg_page(seg, addr + len); 7505 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0; 7506 7507 amp = svd->amp; 7508 for (; p < ep; p++, addr += PAGESIZE) { 7509 vpp = (svd->vpage) ? &svd->vpage[p]: NULL; 7510 ret = start; 7511 ap = NULL; 7512 avp = NULL; 7513 /* Grab the vnode/offset for the anon slot */ 7514 if (amp != NULL) { 7515 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7516 anon_array_enter(amp, svd->anon_index + p, &cookie); 7517 ap = anon_get_ptr(amp->ahp, svd->anon_index + p); 7518 if (ap != NULL) { 7519 swap_xlate(ap, &avp, &aoffset); 7520 } 7521 anon_array_exit(&cookie); 7522 ANON_LOCK_EXIT(&->a_rwlock); 7523 } 7524 if ((avp != NULL) && page_exists(avp, aoffset)) { 7525 /* A page exists for the anon slot */ 7526 ret |= SEG_PAGE_INCORE; 7527 7528 /* 7529 * If page is mapped and writable 7530 */ 7531 attr = (uint_t)0; 7532 if ((hat_getattr(seg->s_as->a_hat, addr, 7533 &attr) != -1) && (attr & PROT_WRITE)) { 7534 ret |= SEG_PAGE_ANON; 7535 } 7536 /* 7537 * Don't get page_struct lock for lckcnt and cowcnt, 7538 * since this is purely advisory. 7539 */ 7540 if ((pp = page_lookup_nowait(avp, aoffset, 7541 SE_SHARED)) != NULL) { 7542 if (pp->p_lckcnt) 7543 ret |= SEG_PAGE_SOFTLOCK; 7544 if (pp->p_cowcnt) 7545 ret |= SEG_PAGE_HASCOW; 7546 page_unlock(pp); 7547 } 7548 } 7549 7550 /* Gather vnode statistics */ 7551 vp = svd->vp; 7552 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7553 7554 if (vp != NULL) { 7555 /* 7556 * Try to obtain a "shared" lock on the page 7557 * without blocking. If this fails, determine 7558 * if the page is in memory. 7559 */ 7560 pp = page_lookup_nowait(vp, offset, SE_SHARED); 7561 if ((pp == NULL) && (page_exists(vp, offset))) { 7562 /* Page is incore, and is named */ 7563 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7564 } 7565 /* 7566 * Don't get page_struct lock for lckcnt and cowcnt, 7567 * since this is purely advisory. 7568 */ 7569 if (pp != NULL) { 7570 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7571 if (pp->p_lckcnt) 7572 ret |= SEG_PAGE_SOFTLOCK; 7573 if (pp->p_cowcnt) 7574 ret |= SEG_PAGE_HASCOW; 7575 page_unlock(pp); 7576 } 7577 } 7578 7579 /* Gather virtual page information */ 7580 if (vpp) { 7581 if (VPP_ISPPLOCK(vpp)) 7582 ret |= SEG_PAGE_LOCKED; 7583 vpp++; 7584 } 7585 7586 *vec++ = (char)ret; 7587 } 7588 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7589 return (len); 7590 } 7591 7592 /* 7593 * Statement for p_cowcnts/p_lckcnts. 7594 * 7595 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region 7596 * irrespective of the following factors or anything else: 7597 * 7598 * (1) anon slots are populated or not 7599 * (2) cow is broken or not 7600 * (3) refcnt on ap is 1 or greater than 1 7601 * 7602 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock 7603 * and munlock. 7604 * 7605 * 7606 * Handling p_cowcnts/p_lckcnts during copy-on-write fault: 7607 * 7608 * if vpage has PROT_WRITE 7609 * transfer cowcnt on the oldpage -> cowcnt on the newpage 7610 * else 7611 * transfer lckcnt on the oldpage -> lckcnt on the newpage 7612 * 7613 * During copy-on-write, decrement p_cowcnt on the oldpage and increment 7614 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE. 7615 * 7616 * We may also break COW if softlocking on read access in the physio case. 7617 * In this case, vpage may not have PROT_WRITE. So, we need to decrement 7618 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the 7619 * vpage doesn't have PROT_WRITE. 7620 * 7621 * 7622 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region: 7623 * 7624 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and 7625 * increment p_lckcnt by calling page_subclaim() which takes care of 7626 * availrmem accounting and p_lckcnt overflow. 7627 * 7628 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and 7629 * increment p_cowcnt by calling page_addclaim() which takes care of 7630 * availrmem availability and p_cowcnt overflow. 7631 */ 7632 7633 /* 7634 * Lock down (or unlock) pages mapped by this segment. 7635 * 7636 * XXX only creates PAGESIZE pages if anon slots are not initialized. 7637 * At fault time they will be relocated into larger pages. 7638 */ 7639 static int 7640 segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 7641 int attr, int op, ulong_t *lockmap, size_t pos) 7642 { 7643 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7644 struct vpage *vpp; 7645 struct vpage *evp; 7646 page_t *pp; 7647 u_offset_t offset; 7648 u_offset_t off; 7649 int segtype; 7650 int pageprot = 0; 7651 int claim; 7652 struct vnode *vp; 7653 ulong_t anon_index = 0; 7654 struct anon_map *amp; 7655 struct anon *ap; 7656 struct vattr va; 7657 anon_sync_obj_t cookie; 7658 struct kshmid *sp = NULL; 7659 struct proc *p = curproc; 7660 kproject_t *proj = NULL; 7661 int chargeproc = 1; 7662 size_t locked_bytes = 0; 7663 size_t unlocked_bytes = 0; 7664 int err = 0; 7665 7666 /* 7667 * Hold write lock on address space because may split or concatenate 7668 * segments 7669 */ 7670 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 7671 7672 /* 7673 * If this is a shm, use shm's project and zone, else use 7674 * project and zone of calling process 7675 */ 7676 7677 /* Determine if this segment backs a sysV shm */ 7678 if (svd->amp != NULL && svd->amp->a_sp != NULL) { 7679 ASSERT(svd->type == MAP_SHARED); 7680 ASSERT(svd->tr_state == SEGVN_TR_OFF); 7681 sp = svd->amp->a_sp; 7682 proj = sp->shm_perm.ipc_proj; 7683 chargeproc = 0; 7684 } 7685 7686 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 7687 if (attr) { 7688 pageprot = attr & ~(SHARED|PRIVATE); 7689 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE; 7690 7691 /* 7692 * We are done if the segment types don't match 7693 * or if we have segment level protections and 7694 * they don't match. 7695 */ 7696 if (svd->type != segtype) { 7697 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7698 return (0); 7699 } 7700 if (svd->pageprot == 0 && svd->prot != pageprot) { 7701 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7702 return (0); 7703 } 7704 } 7705 7706 if (op == MC_LOCK) { 7707 if (svd->tr_state == SEGVN_TR_INIT) { 7708 svd->tr_state = SEGVN_TR_OFF; 7709 } else if (svd->tr_state == SEGVN_TR_ON) { 7710 ASSERT(svd->amp != NULL); 7711 segvn_textunrepl(seg, 0); 7712 ASSERT(svd->amp == NULL && 7713 svd->tr_state == SEGVN_TR_OFF); 7714 } 7715 } 7716 7717 /* 7718 * If we're locking, then we must create a vpage structure if 7719 * none exists. If we're unlocking, then check to see if there 7720 * is a vpage -- if not, then we could not have locked anything. 7721 */ 7722 7723 if ((vpp = svd->vpage) == NULL) { 7724 if (op == MC_LOCK) { 7725 segvn_vpage(seg); 7726 if (svd->vpage == NULL) { 7727 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7728 return (ENOMEM); 7729 } 7730 } else { 7731 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7732 return (0); 7733 } 7734 } 7735 7736 /* 7737 * The anonymous data vector (i.e., previously 7738 * unreferenced mapping to swap space) can be allocated 7739 * by lazily testing for its existence. 7740 */ 7741 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) { 7742 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 7743 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 7744 svd->amp->a_szc = seg->s_szc; 7745 } 7746 7747 if ((amp = svd->amp) != NULL) { 7748 anon_index = svd->anon_index + seg_page(seg, addr); 7749 } 7750 7751 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7752 evp = &svd->vpage[seg_page(seg, addr + len)]; 7753 7754 if (sp != NULL) 7755 mutex_enter(&sp->shm_mlock); 7756 7757 /* determine number of unlocked bytes in range for lock operation */ 7758 if (op == MC_LOCK) { 7759 7760 if (sp == NULL) { 7761 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7762 vpp++) { 7763 if (!VPP_ISPPLOCK(vpp)) 7764 unlocked_bytes += PAGESIZE; 7765 } 7766 } else { 7767 ulong_t i_idx, i_edx; 7768 anon_sync_obj_t i_cookie; 7769 struct anon *i_ap; 7770 struct vnode *i_vp; 7771 u_offset_t i_off; 7772 7773 /* Only count sysV pages once for locked memory */ 7774 i_edx = svd->anon_index + seg_page(seg, addr + len); 7775 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7776 for (i_idx = anon_index; i_idx < i_edx; i_idx++) { 7777 anon_array_enter(amp, i_idx, &i_cookie); 7778 i_ap = anon_get_ptr(amp->ahp, i_idx); 7779 if (i_ap == NULL) { 7780 unlocked_bytes += PAGESIZE; 7781 anon_array_exit(&i_cookie); 7782 continue; 7783 } 7784 swap_xlate(i_ap, &i_vp, &i_off); 7785 anon_array_exit(&i_cookie); 7786 pp = page_lookup(i_vp, i_off, SE_SHARED); 7787 if (pp == NULL) { 7788 unlocked_bytes += PAGESIZE; 7789 continue; 7790 } else if (pp->p_lckcnt == 0) 7791 unlocked_bytes += PAGESIZE; 7792 page_unlock(pp); 7793 } 7794 ANON_LOCK_EXIT(&->a_rwlock); 7795 } 7796 7797 mutex_enter(&p->p_lock); 7798 err = rctl_incr_locked_mem(p, proj, unlocked_bytes, 7799 chargeproc); 7800 mutex_exit(&p->p_lock); 7801 7802 if (err) { 7803 if (sp != NULL) 7804 mutex_exit(&sp->shm_mlock); 7805 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7806 return (err); 7807 } 7808 } 7809 /* 7810 * Loop over all pages in the range. Process if we're locking and 7811 * page has not already been locked in this mapping; or if we're 7812 * unlocking and the page has been locked. 7813 */ 7814 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7815 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) { 7816 if ((attr == 0 || VPP_PROT(vpp) == pageprot) && 7817 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) || 7818 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) { 7819 7820 if (amp != NULL) 7821 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7822 /* 7823 * If this isn't a MAP_NORESERVE segment and 7824 * we're locking, allocate anon slots if they 7825 * don't exist. The page is brought in later on. 7826 */ 7827 if (op == MC_LOCK && svd->vp == NULL && 7828 ((svd->flags & MAP_NORESERVE) == 0) && 7829 amp != NULL && 7830 ((ap = anon_get_ptr(amp->ahp, anon_index)) 7831 == NULL)) { 7832 anon_array_enter(amp, anon_index, &cookie); 7833 7834 if ((ap = anon_get_ptr(amp->ahp, 7835 anon_index)) == NULL) { 7836 pp = anon_zero(seg, addr, &ap, 7837 svd->cred); 7838 if (pp == NULL) { 7839 anon_array_exit(&cookie); 7840 ANON_LOCK_EXIT(&->a_rwlock); 7841 err = ENOMEM; 7842 goto out; 7843 } 7844 ASSERT(anon_get_ptr(amp->ahp, 7845 anon_index) == NULL); 7846 (void) anon_set_ptr(amp->ahp, 7847 anon_index, ap, ANON_SLEEP); 7848 page_unlock(pp); 7849 } 7850 anon_array_exit(&cookie); 7851 } 7852 7853 /* 7854 * Get name for page, accounting for 7855 * existence of private copy. 7856 */ 7857 ap = NULL; 7858 if (amp != NULL) { 7859 anon_array_enter(amp, anon_index, &cookie); 7860 ap = anon_get_ptr(amp->ahp, anon_index); 7861 if (ap != NULL) { 7862 swap_xlate(ap, &vp, &off); 7863 } else { 7864 if (svd->vp == NULL && 7865 (svd->flags & MAP_NORESERVE)) { 7866 anon_array_exit(&cookie); 7867 ANON_LOCK_EXIT(&->a_rwlock); 7868 continue; 7869 } 7870 vp = svd->vp; 7871 off = offset; 7872 } 7873 if (op != MC_LOCK || ap == NULL) { 7874 anon_array_exit(&cookie); 7875 ANON_LOCK_EXIT(&->a_rwlock); 7876 } 7877 } else { 7878 vp = svd->vp; 7879 off = offset; 7880 } 7881 7882 /* 7883 * Get page frame. It's ok if the page is 7884 * not available when we're unlocking, as this 7885 * may simply mean that a page we locked got 7886 * truncated out of existence after we locked it. 7887 * 7888 * Invoke VOP_GETPAGE() to obtain the page struct 7889 * since we may need to read it from disk if its 7890 * been paged out. 7891 */ 7892 if (op != MC_LOCK) 7893 pp = page_lookup(vp, off, SE_SHARED); 7894 else { 7895 page_t *pl[1 + 1]; 7896 int error; 7897 7898 ASSERT(vp != NULL); 7899 7900 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 7901 (uint_t *)NULL, pl, PAGESIZE, seg, addr, 7902 S_OTHER, svd->cred, NULL); 7903 7904 if (error && ap != NULL) { 7905 anon_array_exit(&cookie); 7906 ANON_LOCK_EXIT(&->a_rwlock); 7907 } 7908 7909 /* 7910 * If the error is EDEADLK then we must bounce 7911 * up and drop all vm subsystem locks and then 7912 * retry the operation later 7913 * This behavior is a temporary measure because 7914 * ufs/sds logging is badly designed and will 7915 * deadlock if we don't allow this bounce to 7916 * happen. The real solution is to re-design 7917 * the logging code to work properly. See bug 7918 * 4125102 for details of the problem. 7919 */ 7920 if (error == EDEADLK) { 7921 err = error; 7922 goto out; 7923 } 7924 /* 7925 * Quit if we fail to fault in the page. Treat 7926 * the failure as an error, unless the addr 7927 * is mapped beyond the end of a file. 7928 */ 7929 if (error && svd->vp) { 7930 va.va_mask = AT_SIZE; 7931 if (VOP_GETATTR(svd->vp, &va, 0, 7932 svd->cred, NULL) != 0) { 7933 err = EIO; 7934 goto out; 7935 } 7936 if (btopr(va.va_size) >= 7937 btopr(off + 1)) { 7938 err = EIO; 7939 goto out; 7940 } 7941 goto out; 7942 7943 } else if (error) { 7944 err = EIO; 7945 goto out; 7946 } 7947 pp = pl[0]; 7948 ASSERT(pp != NULL); 7949 } 7950 7951 /* 7952 * See Statement at the beginning of this routine. 7953 * 7954 * claim is always set if MAP_PRIVATE and PROT_WRITE 7955 * irrespective of following factors: 7956 * 7957 * (1) anon slots are populated or not 7958 * (2) cow is broken or not 7959 * (3) refcnt on ap is 1 or greater than 1 7960 * 7961 * See 4140683 for details 7962 */ 7963 claim = ((VPP_PROT(vpp) & PROT_WRITE) && 7964 (svd->type == MAP_PRIVATE)); 7965 7966 /* 7967 * Perform page-level operation appropriate to 7968 * operation. If locking, undo the SOFTLOCK 7969 * performed to bring the page into memory 7970 * after setting the lock. If unlocking, 7971 * and no page was found, account for the claim 7972 * separately. 7973 */ 7974 if (op == MC_LOCK) { 7975 int ret = 1; /* Assume success */ 7976 7977 ASSERT(!VPP_ISPPLOCK(vpp)); 7978 7979 ret = page_pp_lock(pp, claim, 0); 7980 if (ap != NULL) { 7981 if (ap->an_pvp != NULL) { 7982 anon_swap_free(ap, pp); 7983 } 7984 anon_array_exit(&cookie); 7985 ANON_LOCK_EXIT(&->a_rwlock); 7986 } 7987 if (ret == 0) { 7988 /* locking page failed */ 7989 page_unlock(pp); 7990 err = EAGAIN; 7991 goto out; 7992 } 7993 VPP_SETPPLOCK(vpp); 7994 if (sp != NULL) { 7995 if (pp->p_lckcnt == 1) 7996 locked_bytes += PAGESIZE; 7997 } else 7998 locked_bytes += PAGESIZE; 7999 8000 if (lockmap != (ulong_t *)NULL) 8001 BT_SET(lockmap, pos); 8002 8003 page_unlock(pp); 8004 } else { 8005 ASSERT(VPP_ISPPLOCK(vpp)); 8006 if (pp != NULL) { 8007 /* sysV pages should be locked */ 8008 ASSERT(sp == NULL || pp->p_lckcnt > 0); 8009 page_pp_unlock(pp, claim, 0); 8010 if (sp != NULL) { 8011 if (pp->p_lckcnt == 0) 8012 unlocked_bytes 8013 += PAGESIZE; 8014 } else 8015 unlocked_bytes += PAGESIZE; 8016 page_unlock(pp); 8017 } else { 8018 ASSERT(sp == NULL); 8019 unlocked_bytes += PAGESIZE; 8020 } 8021 VPP_CLRPPLOCK(vpp); 8022 } 8023 } 8024 } 8025 out: 8026 if (op == MC_LOCK) { 8027 /* Credit back bytes that did not get locked */ 8028 if ((unlocked_bytes - locked_bytes) > 0) { 8029 if (proj == NULL) 8030 mutex_enter(&p->p_lock); 8031 rctl_decr_locked_mem(p, proj, 8032 (unlocked_bytes - locked_bytes), chargeproc); 8033 if (proj == NULL) 8034 mutex_exit(&p->p_lock); 8035 } 8036 8037 } else { 8038 /* Account bytes that were unlocked */ 8039 if (unlocked_bytes > 0) { 8040 if (proj == NULL) 8041 mutex_enter(&p->p_lock); 8042 rctl_decr_locked_mem(p, proj, unlocked_bytes, 8043 chargeproc); 8044 if (proj == NULL) 8045 mutex_exit(&p->p_lock); 8046 } 8047 } 8048 if (sp != NULL) 8049 mutex_exit(&sp->shm_mlock); 8050 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8051 8052 return (err); 8053 } 8054 8055 /* 8056 * Set advice from user for specified pages 8057 * There are 10 types of advice: 8058 * MADV_NORMAL - Normal (default) behavior (whatever that is) 8059 * MADV_RANDOM - Random page references 8060 * do not allow readahead or 'klustering' 8061 * MADV_SEQUENTIAL - Sequential page references 8062 * Pages previous to the one currently being 8063 * accessed (determined by fault) are 'not needed' 8064 * and are freed immediately 8065 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl) 8066 * MADV_DONTNEED - Pages are not needed (synced out in mctl) 8067 * MADV_FREE - Contents can be discarded 8068 * MADV_ACCESS_DEFAULT- Default access 8069 * MADV_ACCESS_LWP - Next LWP will access heavily 8070 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily 8071 * MADV_PURGE - Contents will be immediately discarded 8072 */ 8073 static int 8074 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 8075 { 8076 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8077 size_t page; 8078 int err = 0; 8079 int already_set; 8080 struct anon_map *amp; 8081 ulong_t anon_index; 8082 struct seg *next; 8083 lgrp_mem_policy_t policy; 8084 struct seg *prev; 8085 struct vnode *vp; 8086 8087 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 8088 8089 /* 8090 * In case of MADV_FREE/MADV_PURGE, we won't be modifying any segment 8091 * private data structures; so, we only need to grab READER's lock 8092 */ 8093 if (behav != MADV_FREE && behav != MADV_PURGE) { 8094 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 8095 if (svd->tr_state != SEGVN_TR_OFF) { 8096 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8097 return (0); 8098 } 8099 } else { 8100 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8101 } 8102 8103 /* 8104 * Large pages are assumed to be only turned on when accesses to the 8105 * segment's address range have spatial and temporal locality. That 8106 * justifies ignoring MADV_SEQUENTIAL for large page segments. 8107 * Also, ignore advice affecting lgroup memory allocation 8108 * if don't need to do lgroup optimizations on this system 8109 */ 8110 8111 if ((behav == MADV_SEQUENTIAL && 8112 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) || 8113 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT || 8114 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) { 8115 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8116 return (0); 8117 } 8118 8119 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT || 8120 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) { 8121 /* 8122 * Since we are going to unload hat mappings 8123 * we first have to flush the cache. Otherwise 8124 * this might lead to system panic if another 8125 * thread is doing physio on the range whose 8126 * mappings are unloaded by madvise(3C). 8127 */ 8128 if (svd->softlockcnt > 0) { 8129 /* 8130 * If this is shared segment non 0 softlockcnt 8131 * means locked pages are still in use. 8132 */ 8133 if (svd->type == MAP_SHARED) { 8134 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8135 return (EAGAIN); 8136 } 8137 /* 8138 * Since we do have the segvn writers lock 8139 * nobody can fill the cache with entries 8140 * belonging to this seg during the purge. 8141 * The flush either succeeds or we still 8142 * have pending I/Os. In the later case, 8143 * madvise(3C) fails. 8144 */ 8145 segvn_purge(seg); 8146 if (svd->softlockcnt > 0) { 8147 /* 8148 * Since madvise(3C) is advisory and 8149 * it's not part of UNIX98, madvise(3C) 8150 * failure here doesn't cause any hardship. 8151 * Note that we don't block in "as" layer. 8152 */ 8153 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8154 return (EAGAIN); 8155 } 8156 } else if (svd->type == MAP_SHARED && svd->amp != NULL && 8157 svd->amp->a_softlockcnt > 0) { 8158 /* 8159 * Try to purge this amp's entries from pcache. It 8160 * will succeed only if other segments that share the 8161 * amp have no outstanding softlock's. 8162 */ 8163 segvn_purge(seg); 8164 } 8165 } 8166 8167 amp = svd->amp; 8168 vp = svd->vp; 8169 if (behav == MADV_FREE || behav == MADV_PURGE) { 8170 pgcnt_t purged; 8171 8172 if (behav == MADV_FREE && (vp != NULL || amp == NULL)) { 8173 /* 8174 * MADV_FREE is not supported for segments with an 8175 * underlying object; if anonmap is NULL, anon slots 8176 * are not yet populated and there is nothing for us 8177 * to do. As MADV_FREE is advisory, we don't return an 8178 * error in either case. 8179 */ 8180 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8181 return (0); 8182 } 8183 8184 if (amp == NULL) { 8185 /* 8186 * If we're here with a NULL anonmap, it's because we 8187 * are doing a MADV_PURGE. We have nothing to do, but 8188 * because MADV_PURGE isn't merely advisory, we return 8189 * an error in this case. 8190 */ 8191 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8192 return (EBUSY); 8193 } 8194 8195 segvn_purge(seg); 8196 8197 page = seg_page(seg, addr); 8198 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8199 err = anon_disclaim(amp, 8200 svd->anon_index + page, len, behav, &purged); 8201 8202 if (purged != 0 && (svd->flags & MAP_NORESERVE)) { 8203 /* 8204 * If we purged pages on a MAP_NORESERVE mapping, we 8205 * need to be sure to now unreserve our reserved swap. 8206 * (We use the atomic operations to manipulate our 8207 * segment and address space counters because we only 8208 * have the corresponding locks held as reader, not 8209 * writer.) 8210 */ 8211 ssize_t bytes = ptob(purged); 8212 8213 anon_unresv_zone(bytes, seg->s_as->a_proc->p_zone); 8214 atomic_add_long(&svd->swresv, -bytes); 8215 atomic_add_long(&seg->s_as->a_resvsize, -bytes); 8216 } 8217 8218 ANON_LOCK_EXIT(&->a_rwlock); 8219 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8220 8221 /* 8222 * MADV_PURGE and MADV_FREE differ in their return semantics: 8223 * because MADV_PURGE is designed to be bug-for-bug compatible 8224 * with its clumsy Linux forebear, it will fail where MADV_FREE 8225 * does not. 8226 */ 8227 return (behav == MADV_PURGE ? err : 0); 8228 } 8229 8230 /* 8231 * If advice is to be applied to entire segment, 8232 * use advice field in seg_data structure 8233 * otherwise use appropriate vpage entry. 8234 */ 8235 if ((addr == seg->s_base) && (len == seg->s_size)) { 8236 switch (behav) { 8237 case MADV_ACCESS_LWP: 8238 case MADV_ACCESS_MANY: 8239 case MADV_ACCESS_DEFAULT: 8240 /* 8241 * Set memory allocation policy for this segment 8242 */ 8243 policy = lgrp_madv_to_policy(behav, len, svd->type); 8244 if (svd->type == MAP_SHARED) 8245 already_set = lgrp_shm_policy_set(policy, amp, 8246 svd->anon_index, vp, svd->offset, len); 8247 else { 8248 /* 8249 * For private memory, need writers lock on 8250 * address space because the segment may be 8251 * split or concatenated when changing policy 8252 */ 8253 if (AS_READ_HELD(seg->s_as)) { 8254 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8255 return (IE_RETRY); 8256 } 8257 8258 already_set = lgrp_privm_policy_set(policy, 8259 &svd->policy_info, len); 8260 } 8261 8262 /* 8263 * If policy set already and it shouldn't be reapplied, 8264 * don't do anything. 8265 */ 8266 if (already_set && 8267 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8268 break; 8269 8270 /* 8271 * Mark any existing pages in given range for 8272 * migration 8273 */ 8274 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8275 vp, svd->offset, 1); 8276 8277 /* 8278 * If same policy set already or this is a shared 8279 * memory segment, don't need to try to concatenate 8280 * segment with adjacent ones. 8281 */ 8282 if (already_set || svd->type == MAP_SHARED) 8283 break; 8284 8285 /* 8286 * Try to concatenate this segment with previous 8287 * one and next one, since we changed policy for 8288 * this one and it may be compatible with adjacent 8289 * ones now. 8290 */ 8291 prev = AS_SEGPREV(seg->s_as, seg); 8292 next = AS_SEGNEXT(seg->s_as, seg); 8293 8294 if (next && next->s_ops == &segvn_ops && 8295 addr + len == next->s_base) 8296 (void) segvn_concat(seg, next, 1); 8297 8298 if (prev && prev->s_ops == &segvn_ops && 8299 addr == prev->s_base + prev->s_size) { 8300 /* 8301 * Drop lock for private data of current 8302 * segment before concatenating (deleting) it 8303 * and return IE_REATTACH to tell as_ctl() that 8304 * current segment has changed 8305 */ 8306 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8307 if (!segvn_concat(prev, seg, 1)) 8308 err = IE_REATTACH; 8309 8310 return (err); 8311 } 8312 break; 8313 8314 case MADV_SEQUENTIAL: 8315 /* 8316 * unloading mapping guarantees 8317 * detection in segvn_fault 8318 */ 8319 ASSERT(seg->s_szc == 0); 8320 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8321 hat_unload(seg->s_as->a_hat, addr, len, 8322 HAT_UNLOAD); 8323 /* FALLTHROUGH */ 8324 case MADV_NORMAL: 8325 case MADV_RANDOM: 8326 svd->advice = (uchar_t)behav; 8327 svd->pageadvice = 0; 8328 break; 8329 case MADV_WILLNEED: /* handled in memcntl */ 8330 case MADV_DONTNEED: /* handled in memcntl */ 8331 case MADV_FREE: /* handled above */ 8332 case MADV_PURGE: /* handled above */ 8333 break; 8334 default: 8335 err = EINVAL; 8336 } 8337 } else { 8338 caddr_t eaddr; 8339 struct seg *new_seg; 8340 struct segvn_data *new_svd = NULL; 8341 u_offset_t off; 8342 caddr_t oldeaddr; 8343 8344 page = seg_page(seg, addr); 8345 8346 segvn_vpage(seg); 8347 if (svd->vpage == NULL) { 8348 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8349 return (ENOMEM); 8350 } 8351 8352 switch (behav) { 8353 struct vpage *bvpp, *evpp; 8354 8355 case MADV_ACCESS_LWP: 8356 case MADV_ACCESS_MANY: 8357 case MADV_ACCESS_DEFAULT: 8358 /* 8359 * Set memory allocation policy for portion of this 8360 * segment 8361 */ 8362 8363 /* 8364 * Align address and length of advice to page 8365 * boundaries for large pages 8366 */ 8367 if (seg->s_szc != 0) { 8368 size_t pgsz; 8369 8370 pgsz = page_get_pagesize(seg->s_szc); 8371 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 8372 len = P2ROUNDUP(len, pgsz); 8373 } 8374 8375 /* 8376 * Check to see whether policy is set already 8377 */ 8378 policy = lgrp_madv_to_policy(behav, len, svd->type); 8379 8380 anon_index = svd->anon_index + page; 8381 off = svd->offset + (uintptr_t)(addr - seg->s_base); 8382 8383 if (svd->type == MAP_SHARED) 8384 already_set = lgrp_shm_policy_set(policy, amp, 8385 anon_index, vp, off, len); 8386 else 8387 already_set = 8388 (policy == svd->policy_info.mem_policy); 8389 8390 /* 8391 * If policy set already and it shouldn't be reapplied, 8392 * don't do anything. 8393 */ 8394 if (already_set && 8395 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8396 break; 8397 8398 /* 8399 * For private memory, need writers lock on 8400 * address space because the segment may be 8401 * split or concatenated when changing policy 8402 */ 8403 if (svd->type == MAP_PRIVATE && 8404 AS_READ_HELD(seg->s_as)) { 8405 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8406 return (IE_RETRY); 8407 } 8408 8409 /* 8410 * Mark any existing pages in given range for 8411 * migration 8412 */ 8413 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8414 vp, svd->offset, 1); 8415 8416 /* 8417 * Don't need to try to split or concatenate 8418 * segments, since policy is same or this is a shared 8419 * memory segment 8420 */ 8421 if (already_set || svd->type == MAP_SHARED) 8422 break; 8423 8424 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 8425 ASSERT(svd->amp == NULL); 8426 ASSERT(svd->tr_state == SEGVN_TR_OFF); 8427 ASSERT(svd->softlockcnt == 0); 8428 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 8429 HAT_REGION_TEXT); 8430 svd->rcookie = HAT_INVALID_REGION_COOKIE; 8431 } 8432 8433 /* 8434 * Split off new segment if advice only applies to a 8435 * portion of existing segment starting in middle 8436 */ 8437 new_seg = NULL; 8438 eaddr = addr + len; 8439 oldeaddr = seg->s_base + seg->s_size; 8440 if (addr > seg->s_base) { 8441 /* 8442 * Must flush I/O page cache 8443 * before splitting segment 8444 */ 8445 if (svd->softlockcnt > 0) 8446 segvn_purge(seg); 8447 8448 /* 8449 * Split segment and return IE_REATTACH to tell 8450 * as_ctl() that current segment changed 8451 */ 8452 new_seg = segvn_split_seg(seg, addr); 8453 new_svd = (struct segvn_data *)new_seg->s_data; 8454 err = IE_REATTACH; 8455 8456 /* 8457 * If new segment ends where old one 8458 * did, try to concatenate the new 8459 * segment with next one. 8460 */ 8461 if (eaddr == oldeaddr) { 8462 /* 8463 * Set policy for new segment 8464 */ 8465 (void) lgrp_privm_policy_set(policy, 8466 &new_svd->policy_info, 8467 new_seg->s_size); 8468 8469 next = AS_SEGNEXT(new_seg->s_as, 8470 new_seg); 8471 8472 if (next && 8473 next->s_ops == &segvn_ops && 8474 eaddr == next->s_base) 8475 (void) segvn_concat(new_seg, 8476 next, 1); 8477 } 8478 } 8479 8480 /* 8481 * Split off end of existing segment if advice only 8482 * applies to a portion of segment ending before 8483 * end of the existing segment 8484 */ 8485 if (eaddr < oldeaddr) { 8486 /* 8487 * Must flush I/O page cache 8488 * before splitting segment 8489 */ 8490 if (svd->softlockcnt > 0) 8491 segvn_purge(seg); 8492 8493 /* 8494 * If beginning of old segment was already 8495 * split off, use new segment to split end off 8496 * from. 8497 */ 8498 if (new_seg != NULL && new_seg != seg) { 8499 /* 8500 * Split segment 8501 */ 8502 (void) segvn_split_seg(new_seg, eaddr); 8503 8504 /* 8505 * Set policy for new segment 8506 */ 8507 (void) lgrp_privm_policy_set(policy, 8508 &new_svd->policy_info, 8509 new_seg->s_size); 8510 } else { 8511 /* 8512 * Split segment and return IE_REATTACH 8513 * to tell as_ctl() that current 8514 * segment changed 8515 */ 8516 (void) segvn_split_seg(seg, eaddr); 8517 err = IE_REATTACH; 8518 8519 (void) lgrp_privm_policy_set(policy, 8520 &svd->policy_info, seg->s_size); 8521 8522 /* 8523 * If new segment starts where old one 8524 * did, try to concatenate it with 8525 * previous segment. 8526 */ 8527 if (addr == seg->s_base) { 8528 prev = AS_SEGPREV(seg->s_as, 8529 seg); 8530 8531 /* 8532 * Drop lock for private data 8533 * of current segment before 8534 * concatenating (deleting) it 8535 */ 8536 if (prev && 8537 prev->s_ops == 8538 &segvn_ops && 8539 addr == prev->s_base + 8540 prev->s_size) { 8541 SEGVN_LOCK_EXIT( 8542 seg->s_as, 8543 &svd->lock); 8544 (void) segvn_concat( 8545 prev, seg, 1); 8546 return (err); 8547 } 8548 } 8549 } 8550 } 8551 break; 8552 case MADV_SEQUENTIAL: 8553 ASSERT(seg->s_szc == 0); 8554 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8555 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 8556 /* FALLTHROUGH */ 8557 case MADV_NORMAL: 8558 case MADV_RANDOM: 8559 bvpp = &svd->vpage[page]; 8560 evpp = &svd->vpage[page + (len >> PAGESHIFT)]; 8561 for (; bvpp < evpp; bvpp++) 8562 VPP_SETADVICE(bvpp, behav); 8563 svd->advice = MADV_NORMAL; 8564 break; 8565 case MADV_WILLNEED: /* handled in memcntl */ 8566 case MADV_DONTNEED: /* handled in memcntl */ 8567 case MADV_FREE: /* handled above */ 8568 case MADV_PURGE: /* handled above */ 8569 break; 8570 default: 8571 err = EINVAL; 8572 } 8573 } 8574 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8575 return (err); 8576 } 8577 8578 /* 8579 * There is one kind of inheritance that can be specified for pages: 8580 * 8581 * SEGP_INH_ZERO - Pages should be zeroed in the child 8582 */ 8583 static int 8584 segvn_inherit(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 8585 { 8586 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8587 struct vpage *bvpp, *evpp; 8588 size_t page; 8589 int ret = 0; 8590 8591 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 8592 8593 /* Can't support something we don't know about */ 8594 if (behav != SEGP_INH_ZERO) 8595 return (ENOTSUP); 8596 8597 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 8598 8599 /* 8600 * This must be a straightforward anonymous segment that is mapped 8601 * privately and is not backed by a vnode. 8602 */ 8603 if (svd->tr_state != SEGVN_TR_OFF || 8604 svd->type != MAP_PRIVATE || 8605 svd->vp != NULL) { 8606 ret = EINVAL; 8607 goto out; 8608 } 8609 8610 /* 8611 * If the entire segment has been marked as inherit zero, then no reason 8612 * to do anything else. 8613 */ 8614 if (svd->svn_inz == SEGVN_INZ_ALL) { 8615 ret = 0; 8616 goto out; 8617 } 8618 8619 /* 8620 * If this applies to the entire segment, simply mark it and we're done. 8621 */ 8622 if ((addr == seg->s_base) && (len == seg->s_size)) { 8623 svd->svn_inz = SEGVN_INZ_ALL; 8624 ret = 0; 8625 goto out; 8626 } 8627 8628 /* 8629 * We've been asked to mark a subset of this segment as inherit zero, 8630 * therefore we need to mainpulate its vpages. 8631 */ 8632 if (svd->vpage == NULL) { 8633 segvn_vpage(seg); 8634 if (svd->vpage == NULL) { 8635 ret = ENOMEM; 8636 goto out; 8637 } 8638 } 8639 8640 svd->svn_inz = SEGVN_INZ_VPP; 8641 page = seg_page(seg, addr); 8642 bvpp = &svd->vpage[page]; 8643 evpp = &svd->vpage[page + (len >> PAGESHIFT)]; 8644 for (; bvpp < evpp; bvpp++) 8645 VPP_SETINHZERO(bvpp); 8646 ret = 0; 8647 8648 out: 8649 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8650 return (ret); 8651 } 8652 8653 /* 8654 * Create a vpage structure for this seg. 8655 */ 8656 static void 8657 segvn_vpage(struct seg *seg) 8658 { 8659 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8660 struct vpage *vp, *evp; 8661 static pgcnt_t page_limit = 0; 8662 8663 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 8664 8665 /* 8666 * If no vpage structure exists, allocate one. Copy the protections 8667 * and the advice from the segment itself to the individual pages. 8668 */ 8669 if (svd->vpage == NULL) { 8670 /* 8671 * Start by calculating the number of pages we must allocate to 8672 * track the per-page vpage structs needs for this entire 8673 * segment. If we know now that it will require more than our 8674 * heuristic for the maximum amount of kmem we can consume then 8675 * fail. We do this here, instead of trying to detect this deep 8676 * in page_resv and propagating the error up, since the entire 8677 * memory allocation stack is not amenable to passing this 8678 * back. Instead, it wants to keep trying. 8679 * 8680 * As a heuristic we set a page limit of 5/8s of total_pages 8681 * for this allocation. We use shifts so that no floating 8682 * point conversion takes place and only need to do the 8683 * calculation once. 8684 */ 8685 ulong_t mem_needed = seg_pages(seg) * sizeof (struct vpage); 8686 pgcnt_t npages = mem_needed >> PAGESHIFT; 8687 8688 if (page_limit == 0) 8689 page_limit = (total_pages >> 1) + (total_pages >> 3); 8690 8691 if (npages > page_limit) 8692 return; 8693 8694 svd->pageadvice = 1; 8695 svd->vpage = kmem_zalloc(mem_needed, KM_SLEEP); 8696 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 8697 for (vp = svd->vpage; vp < evp; vp++) { 8698 VPP_SETPROT(vp, svd->prot); 8699 VPP_SETADVICE(vp, svd->advice); 8700 } 8701 } 8702 } 8703 8704 /* 8705 * Dump the pages belonging to this segvn segment. 8706 */ 8707 static void 8708 segvn_dump(struct seg *seg) 8709 { 8710 struct segvn_data *svd; 8711 page_t *pp; 8712 struct anon_map *amp; 8713 ulong_t anon_index = 0; 8714 struct vnode *vp; 8715 u_offset_t off, offset; 8716 pfn_t pfn; 8717 pgcnt_t page, npages; 8718 caddr_t addr; 8719 8720 npages = seg_pages(seg); 8721 svd = (struct segvn_data *)seg->s_data; 8722 vp = svd->vp; 8723 off = offset = svd->offset; 8724 addr = seg->s_base; 8725 8726 if ((amp = svd->amp) != NULL) { 8727 anon_index = svd->anon_index; 8728 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8729 } 8730 8731 for (page = 0; page < npages; page++, offset += PAGESIZE) { 8732 struct anon *ap; 8733 int we_own_it = 0; 8734 8735 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) { 8736 swap_xlate_nopanic(ap, &vp, &off); 8737 } else { 8738 vp = svd->vp; 8739 off = offset; 8740 } 8741 8742 /* 8743 * If pp == NULL, the page either does not exist 8744 * or is exclusively locked. So determine if it 8745 * exists before searching for it. 8746 */ 8747 8748 if ((pp = page_lookup_nowait(vp, off, SE_SHARED))) 8749 we_own_it = 1; 8750 else 8751 pp = page_exists(vp, off); 8752 8753 if (pp) { 8754 pfn = page_pptonum(pp); 8755 dump_addpage(seg->s_as, addr, pfn); 8756 if (we_own_it) 8757 page_unlock(pp); 8758 } 8759 addr += PAGESIZE; 8760 dump_timeleft = dump_timeout; 8761 } 8762 8763 if (amp != NULL) 8764 ANON_LOCK_EXIT(&->a_rwlock); 8765 } 8766 8767 #ifdef DEBUG 8768 static uint32_t segvn_pglock_mtbf = 0; 8769 #endif 8770 8771 #define PCACHE_SHWLIST ((page_t *)-2) 8772 #define NOPCACHE_SHWLIST ((page_t *)-1) 8773 8774 /* 8775 * Lock/Unlock anon pages over a given range. Return shadow list. This routine 8776 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages 8777 * to avoid the overhead of per page locking, unlocking for subsequent IOs to 8778 * the same parts of the segment. Currently shadow list creation is only 8779 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are 8780 * tagged with segment pointer, starting virtual address and length. This 8781 * approach for MAP_SHARED segments may add many pcache entries for the same 8782 * set of pages and lead to long hash chains that decrease pcache lookup 8783 * performance. To avoid this issue for shared segments shared anon map and 8784 * starting anon index are used for pcache entry tagging. This allows all 8785 * segments to share pcache entries for the same anon range and reduces pcache 8786 * chain's length as well as memory overhead from duplicate shadow lists and 8787 * pcache entries. 8788 * 8789 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd 8790 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock 8791 * part of softlockcnt accounting is done differently for private and shared 8792 * segments. In private segment case softlock is only incremented when a new 8793 * shadow list is created but not when an existing one is found via 8794 * seg_plookup(). pcache entries have reference count incremented/decremented 8795 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0 8796 * reference count can be purged (and purging is needed before segment can be 8797 * freed). When a private segment pcache entry is purged segvn_reclaim() will 8798 * decrement softlockcnt. Since in private segment case each of its pcache 8799 * entries only belongs to this segment we can expect that when 8800 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this 8801 * segment purge will succeed and softlockcnt will drop to 0. In shared 8802 * segment case reference count in pcache entry counts active locks from many 8803 * different segments so we can't expect segment purging to succeed even when 8804 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this 8805 * segment. To be able to determine when there're no pending pagelocks in 8806 * shared segment case we don't rely on purging to make softlockcnt drop to 0 8807 * but instead softlockcnt is incremented and decremented for every 8808 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow 8809 * list was created or an existing one was found. When softlockcnt drops to 0 8810 * this segment no longer has any claims for pcached shadow lists and the 8811 * segment can be freed even if there're still active pcache entries 8812 * shared by this segment anon map. Shared segment pcache entries belong to 8813 * anon map and are typically removed when anon map is freed after all 8814 * processes destroy the segments that use this anon map. 8815 */ 8816 static int 8817 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp, 8818 enum lock_type type, enum seg_rw rw) 8819 { 8820 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8821 size_t np; 8822 pgcnt_t adjustpages; 8823 pgcnt_t npages; 8824 ulong_t anon_index; 8825 uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE; 8826 uint_t error; 8827 struct anon_map *amp; 8828 pgcnt_t anpgcnt; 8829 struct page **pplist, **pl, *pp; 8830 caddr_t a; 8831 size_t page; 8832 caddr_t lpgaddr, lpgeaddr; 8833 anon_sync_obj_t cookie; 8834 int anlock; 8835 struct anon_map *pamp; 8836 caddr_t paddr; 8837 seg_preclaim_cbfunc_t preclaim_callback; 8838 size_t pgsz; 8839 int use_pcache; 8840 size_t wlen = 0; 8841 uint_t pflags = 0; 8842 int sftlck_sbase = 0; 8843 int sftlck_send = 0; 8844 8845 #ifdef DEBUG 8846 if (type == L_PAGELOCK && segvn_pglock_mtbf) { 8847 hrtime_t ts = gethrtime(); 8848 if ((ts % segvn_pglock_mtbf) == 0) { 8849 return (ENOTSUP); 8850 } 8851 if ((ts % segvn_pglock_mtbf) == 1) { 8852 return (EFAULT); 8853 } 8854 } 8855 #endif 8856 8857 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START, 8858 "segvn_pagelock: start seg %p addr %p", seg, addr); 8859 8860 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 8861 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); 8862 8863 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8864 8865 /* 8866 * for now we only support pagelock to anon memory. We would have to 8867 * check protections for vnode objects and call into the vnode driver. 8868 * That's too much for a fast path. Let the fault entry point handle 8869 * it. 8870 */ 8871 if (svd->vp != NULL) { 8872 if (type == L_PAGELOCK) { 8873 error = ENOTSUP; 8874 goto out; 8875 } 8876 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL"); 8877 } 8878 if ((amp = svd->amp) == NULL) { 8879 if (type == L_PAGELOCK) { 8880 error = EFAULT; 8881 goto out; 8882 } 8883 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL"); 8884 } 8885 if (rw != S_READ && rw != S_WRITE) { 8886 if (type == L_PAGELOCK) { 8887 error = ENOTSUP; 8888 goto out; 8889 } 8890 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw"); 8891 } 8892 8893 if (seg->s_szc != 0) { 8894 /* 8895 * We are adjusting the pagelock region to the large page size 8896 * boundary because the unlocked part of a large page cannot 8897 * be freed anyway unless all constituent pages of a large 8898 * page are locked. Bigger regions reduce pcache chain length 8899 * and improve lookup performance. The tradeoff is that the 8900 * very first segvn_pagelock() call for a given page is more 8901 * expensive if only 1 page_t is needed for IO. This is only 8902 * an issue if pcache entry doesn't get reused by several 8903 * subsequent calls. We optimize here for the case when pcache 8904 * is heavily used by repeated IOs to the same address range. 8905 * 8906 * Note segment's page size cannot change while we are holding 8907 * as lock. And then it cannot change while softlockcnt is 8908 * not 0. This will allow us to correctly recalculate large 8909 * page size region for the matching pageunlock/reclaim call 8910 * since as_pageunlock() caller must always match 8911 * as_pagelock() call's addr and len. 8912 * 8913 * For pageunlock *ppp points to the pointer of page_t that 8914 * corresponds to the real unadjusted start address. Similar 8915 * for pagelock *ppp must point to the pointer of page_t that 8916 * corresponds to the real unadjusted start address. 8917 */ 8918 pgsz = page_get_pagesize(seg->s_szc); 8919 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 8920 adjustpages = btop((uintptr_t)(addr - lpgaddr)); 8921 } else if (len < segvn_pglock_comb_thrshld) { 8922 lpgaddr = addr; 8923 lpgeaddr = addr + len; 8924 adjustpages = 0; 8925 pgsz = PAGESIZE; 8926 } else { 8927 /* 8928 * Align the address range of large enough requests to allow 8929 * combining of different shadow lists into 1 to reduce memory 8930 * overhead from potentially overlapping large shadow lists 8931 * (worst case is we have a 1MB IO into buffers with start 8932 * addresses separated by 4K). Alignment is only possible if 8933 * padded chunks have sufficient access permissions. Note 8934 * permissions won't change between L_PAGELOCK and 8935 * L_PAGEUNLOCK calls since non 0 softlockcnt will force 8936 * segvn_setprot() to wait until softlockcnt drops to 0. This 8937 * allows us to determine in L_PAGEUNLOCK the same range we 8938 * computed in L_PAGELOCK. 8939 * 8940 * If alignment is limited by segment ends set 8941 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when 8942 * these flags are set bump softlockcnt_sbase/softlockcnt_send 8943 * per segment counters. In L_PAGEUNLOCK case decrease 8944 * softlockcnt_sbase/softlockcnt_send counters if 8945 * sftlck_sbase/sftlck_send flags are set. When 8946 * softlockcnt_sbase/softlockcnt_send are non 0 8947 * segvn_concat()/segvn_extend_prev()/segvn_extend_next() 8948 * won't merge the segments. This restriction combined with 8949 * restriction on segment unmapping and splitting for segments 8950 * that have non 0 softlockcnt allows L_PAGEUNLOCK to 8951 * correctly determine the same range that was previously 8952 * locked by matching L_PAGELOCK. 8953 */ 8954 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16); 8955 pgsz = PAGESIZE; 8956 if (svd->type == MAP_PRIVATE) { 8957 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr, 8958 segvn_pglock_comb_balign); 8959 if (lpgaddr < seg->s_base) { 8960 lpgaddr = seg->s_base; 8961 sftlck_sbase = 1; 8962 } 8963 } else { 8964 ulong_t aix = svd->anon_index + seg_page(seg, addr); 8965 ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign); 8966 if (aaix < svd->anon_index) { 8967 lpgaddr = seg->s_base; 8968 sftlck_sbase = 1; 8969 } else { 8970 lpgaddr = addr - ptob(aix - aaix); 8971 ASSERT(lpgaddr >= seg->s_base); 8972 } 8973 } 8974 if (svd->pageprot && lpgaddr != addr) { 8975 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)]; 8976 struct vpage *evp = &svd->vpage[seg_page(seg, addr)]; 8977 while (vp < evp) { 8978 if ((VPP_PROT(vp) & protchk) == 0) { 8979 break; 8980 } 8981 vp++; 8982 } 8983 if (vp < evp) { 8984 lpgaddr = addr; 8985 pflags = 0; 8986 } 8987 } 8988 lpgeaddr = addr + len; 8989 if (pflags) { 8990 if (svd->type == MAP_PRIVATE) { 8991 lpgeaddr = (caddr_t)P2ROUNDUP( 8992 (uintptr_t)lpgeaddr, 8993 segvn_pglock_comb_balign); 8994 } else { 8995 ulong_t aix = svd->anon_index + 8996 seg_page(seg, lpgeaddr); 8997 ulong_t aaix = P2ROUNDUP(aix, 8998 segvn_pglock_comb_palign); 8999 if (aaix < aix) { 9000 lpgeaddr = 0; 9001 } else { 9002 lpgeaddr += ptob(aaix - aix); 9003 } 9004 } 9005 if (lpgeaddr == 0 || 9006 lpgeaddr > seg->s_base + seg->s_size) { 9007 lpgeaddr = seg->s_base + seg->s_size; 9008 sftlck_send = 1; 9009 } 9010 } 9011 if (svd->pageprot && lpgeaddr != addr + len) { 9012 struct vpage *vp; 9013 struct vpage *evp; 9014 9015 vp = &svd->vpage[seg_page(seg, addr + len)]; 9016 evp = &svd->vpage[seg_page(seg, lpgeaddr)]; 9017 9018 while (vp < evp) { 9019 if ((VPP_PROT(vp) & protchk) == 0) { 9020 break; 9021 } 9022 vp++; 9023 } 9024 if (vp < evp) { 9025 lpgeaddr = addr + len; 9026 } 9027 } 9028 adjustpages = btop((uintptr_t)(addr - lpgaddr)); 9029 } 9030 9031 /* 9032 * For MAP_SHARED segments we create pcache entries tagged by amp and 9033 * anon index so that we can share pcache entries with other segments 9034 * that map this amp. For private segments pcache entries are tagged 9035 * with segment and virtual address. 9036 */ 9037 if (svd->type == MAP_SHARED) { 9038 pamp = amp; 9039 paddr = (caddr_t)((lpgaddr - seg->s_base) + 9040 ptob(svd->anon_index)); 9041 preclaim_callback = shamp_reclaim; 9042 } else { 9043 pamp = NULL; 9044 paddr = lpgaddr; 9045 preclaim_callback = segvn_reclaim; 9046 } 9047 9048 if (type == L_PAGEUNLOCK) { 9049 VM_STAT_ADD(segvnvmstats.pagelock[0]); 9050 9051 /* 9052 * update hat ref bits for /proc. We need to make sure 9053 * that threads tracing the ref and mod bits of the 9054 * address space get the right data. 9055 * Note: page ref and mod bits are updated at reclaim time 9056 */ 9057 if (seg->s_as->a_vbits) { 9058 for (a = addr; a < addr + len; a += PAGESIZE) { 9059 if (rw == S_WRITE) { 9060 hat_setstat(seg->s_as, a, 9061 PAGESIZE, P_REF | P_MOD); 9062 } else { 9063 hat_setstat(seg->s_as, a, 9064 PAGESIZE, P_REF); 9065 } 9066 } 9067 } 9068 9069 /* 9070 * Check the shadow list entry after the last page used in 9071 * this IO request. If it's NOPCACHE_SHWLIST the shadow list 9072 * was not inserted into pcache and is not large page 9073 * adjusted. In this case call reclaim callback directly and 9074 * don't adjust the shadow list start and size for large 9075 * pages. 9076 */ 9077 npages = btop(len); 9078 if ((*ppp)[npages] == NOPCACHE_SHWLIST) { 9079 void *ptag; 9080 if (pamp != NULL) { 9081 ASSERT(svd->type == MAP_SHARED); 9082 ptag = (void *)pamp; 9083 paddr = (caddr_t)((addr - seg->s_base) + 9084 ptob(svd->anon_index)); 9085 } else { 9086 ptag = (void *)seg; 9087 paddr = addr; 9088 } 9089 (void) preclaim_callback(ptag, paddr, len, *ppp, rw, 0); 9090 } else { 9091 ASSERT((*ppp)[npages] == PCACHE_SHWLIST || 9092 IS_SWAPFSVP((*ppp)[npages]->p_vnode)); 9093 len = lpgeaddr - lpgaddr; 9094 npages = btop(len); 9095 seg_pinactive(seg, pamp, paddr, len, 9096 *ppp - adjustpages, rw, pflags, preclaim_callback); 9097 } 9098 9099 if (pamp != NULL) { 9100 ASSERT(svd->type == MAP_SHARED); 9101 ASSERT(svd->softlockcnt >= npages); 9102 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages); 9103 } 9104 9105 if (sftlck_sbase) { 9106 ASSERT(svd->softlockcnt_sbase > 0); 9107 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_sbase); 9108 } 9109 if (sftlck_send) { 9110 ASSERT(svd->softlockcnt_send > 0); 9111 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_send); 9112 } 9113 9114 /* 9115 * If someone is blocked while unmapping, we purge 9116 * segment page cache and thus reclaim pplist synchronously 9117 * without waiting for seg_pasync_thread. This speeds up 9118 * unmapping in cases where munmap(2) is called, while 9119 * raw async i/o is still in progress or where a thread 9120 * exits on data fault in a multithreaded application. 9121 */ 9122 if (AS_ISUNMAPWAIT(seg->s_as)) { 9123 if (svd->softlockcnt == 0) { 9124 mutex_enter(&seg->s_as->a_contents); 9125 if (AS_ISUNMAPWAIT(seg->s_as)) { 9126 AS_CLRUNMAPWAIT(seg->s_as); 9127 cv_broadcast(&seg->s_as->a_cv); 9128 } 9129 mutex_exit(&seg->s_as->a_contents); 9130 } else if (pamp == NULL) { 9131 /* 9132 * softlockcnt is not 0 and this is a 9133 * MAP_PRIVATE segment. Try to purge its 9134 * pcache entries to reduce softlockcnt. 9135 * If it drops to 0 segvn_reclaim() 9136 * will wake up a thread waiting on 9137 * unmapwait flag. 9138 * 9139 * We don't purge MAP_SHARED segments with non 9140 * 0 softlockcnt since IO is still in progress 9141 * for such segments. 9142 */ 9143 ASSERT(svd->type == MAP_PRIVATE); 9144 segvn_purge(seg); 9145 } 9146 } 9147 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9148 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END, 9149 "segvn_pagelock: unlock seg %p addr %p", seg, addr); 9150 return (0); 9151 } 9152 9153 /* The L_PAGELOCK case ... */ 9154 9155 VM_STAT_ADD(segvnvmstats.pagelock[1]); 9156 9157 /* 9158 * For MAP_SHARED segments we have to check protections before 9159 * seg_plookup() since pcache entries may be shared by many segments 9160 * with potentially different page protections. 9161 */ 9162 if (pamp != NULL) { 9163 ASSERT(svd->type == MAP_SHARED); 9164 if (svd->pageprot == 0) { 9165 if ((svd->prot & protchk) == 0) { 9166 error = EACCES; 9167 goto out; 9168 } 9169 } else { 9170 /* 9171 * check page protections 9172 */ 9173 caddr_t ea; 9174 9175 if (seg->s_szc) { 9176 a = lpgaddr; 9177 ea = lpgeaddr; 9178 } else { 9179 a = addr; 9180 ea = addr + len; 9181 } 9182 for (; a < ea; a += pgsz) { 9183 struct vpage *vp; 9184 9185 ASSERT(seg->s_szc == 0 || 9186 sameprot(seg, a, pgsz)); 9187 vp = &svd->vpage[seg_page(seg, a)]; 9188 if ((VPP_PROT(vp) & protchk) == 0) { 9189 error = EACCES; 9190 goto out; 9191 } 9192 } 9193 } 9194 } 9195 9196 /* 9197 * try to find pages in segment page cache 9198 */ 9199 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags); 9200 if (pplist != NULL) { 9201 if (pamp != NULL) { 9202 npages = btop((uintptr_t)(lpgeaddr - lpgaddr)); 9203 ASSERT(svd->type == MAP_SHARED); 9204 atomic_add_long((ulong_t *)&svd->softlockcnt, 9205 npages); 9206 } 9207 if (sftlck_sbase) { 9208 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase); 9209 } 9210 if (sftlck_send) { 9211 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send); 9212 } 9213 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9214 *ppp = pplist + adjustpages; 9215 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END, 9216 "segvn_pagelock: cache hit seg %p addr %p", seg, addr); 9217 return (0); 9218 } 9219 9220 /* 9221 * For MAP_SHARED segments we already verified above that segment 9222 * protections allow this pagelock operation. 9223 */ 9224 if (pamp == NULL) { 9225 ASSERT(svd->type == MAP_PRIVATE); 9226 if (svd->pageprot == 0) { 9227 if ((svd->prot & protchk) == 0) { 9228 error = EACCES; 9229 goto out; 9230 } 9231 if (svd->prot & PROT_WRITE) { 9232 wlen = lpgeaddr - lpgaddr; 9233 } else { 9234 wlen = 0; 9235 ASSERT(rw == S_READ); 9236 } 9237 } else { 9238 int wcont = 1; 9239 /* 9240 * check page protections 9241 */ 9242 for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) { 9243 struct vpage *vp; 9244 9245 ASSERT(seg->s_szc == 0 || 9246 sameprot(seg, a, pgsz)); 9247 vp = &svd->vpage[seg_page(seg, a)]; 9248 if ((VPP_PROT(vp) & protchk) == 0) { 9249 error = EACCES; 9250 goto out; 9251 } 9252 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) { 9253 wlen += pgsz; 9254 } else { 9255 wcont = 0; 9256 ASSERT(rw == S_READ); 9257 } 9258 } 9259 } 9260 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr); 9261 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr); 9262 } 9263 9264 /* 9265 * Only build large page adjusted shadow list if we expect to insert 9266 * it into pcache. For large enough pages it's a big overhead to 9267 * create a shadow list of the entire large page. But this overhead 9268 * should be amortized over repeated pcache hits on subsequent reuse 9269 * of this shadow list (IO into any range within this shadow list will 9270 * find it in pcache since we large page align the request for pcache 9271 * lookups). pcache performance is improved with bigger shadow lists 9272 * as it reduces the time to pcache the entire big segment and reduces 9273 * pcache chain length. 9274 */ 9275 if (seg_pinsert_check(seg, pamp, paddr, 9276 lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) { 9277 addr = lpgaddr; 9278 len = lpgeaddr - lpgaddr; 9279 use_pcache = 1; 9280 } else { 9281 use_pcache = 0; 9282 /* 9283 * Since this entry will not be inserted into the pcache, we 9284 * will not do any adjustments to the starting address or 9285 * size of the memory to be locked. 9286 */ 9287 adjustpages = 0; 9288 } 9289 npages = btop(len); 9290 9291 pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP); 9292 pl = pplist; 9293 *ppp = pplist + adjustpages; 9294 /* 9295 * If use_pcache is 0 this shadow list is not large page adjusted. 9296 * Record this info in the last entry of shadow array so that 9297 * L_PAGEUNLOCK can determine if it should large page adjust the 9298 * address range to find the real range that was locked. 9299 */ 9300 pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST; 9301 9302 page = seg_page(seg, addr); 9303 anon_index = svd->anon_index + page; 9304 9305 anlock = 0; 9306 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 9307 ASSERT(amp->a_szc >= seg->s_szc); 9308 anpgcnt = page_get_pagecnt(amp->a_szc); 9309 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) { 9310 struct anon *ap; 9311 struct vnode *vp; 9312 u_offset_t off; 9313 9314 /* 9315 * Lock and unlock anon array only once per large page. 9316 * anon_array_enter() locks the root anon slot according to 9317 * a_szc which can't change while anon map is locked. We lock 9318 * anon the first time through this loop and each time we 9319 * reach anon index that corresponds to a root of a large 9320 * page. 9321 */ 9322 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) { 9323 ASSERT(anlock == 0); 9324 anon_array_enter(amp, anon_index, &cookie); 9325 anlock = 1; 9326 } 9327 ap = anon_get_ptr(amp->ahp, anon_index); 9328 9329 /* 9330 * We must never use seg_pcache for COW pages 9331 * because we might end up with original page still 9332 * lying in seg_pcache even after private page is 9333 * created. This leads to data corruption as 9334 * aio_write refers to the page still in cache 9335 * while all other accesses refer to the private 9336 * page. 9337 */ 9338 if (ap == NULL || ap->an_refcnt != 1) { 9339 struct vpage *vpage; 9340 9341 if (seg->s_szc) { 9342 error = EFAULT; 9343 break; 9344 } 9345 if (svd->vpage != NULL) { 9346 vpage = &svd->vpage[seg_page(seg, a)]; 9347 } else { 9348 vpage = NULL; 9349 } 9350 ASSERT(anlock); 9351 anon_array_exit(&cookie); 9352 anlock = 0; 9353 pp = NULL; 9354 error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0, 9355 vpage, &pp, 0, F_INVAL, rw, 1); 9356 if (error) { 9357 error = fc_decode(error); 9358 break; 9359 } 9360 anon_array_enter(amp, anon_index, &cookie); 9361 anlock = 1; 9362 ap = anon_get_ptr(amp->ahp, anon_index); 9363 if (ap == NULL || ap->an_refcnt != 1) { 9364 error = EFAULT; 9365 break; 9366 } 9367 } 9368 swap_xlate(ap, &vp, &off); 9369 pp = page_lookup_nowait(vp, off, SE_SHARED); 9370 if (pp == NULL) { 9371 error = EFAULT; 9372 break; 9373 } 9374 if (ap->an_pvp != NULL) { 9375 anon_swap_free(ap, pp); 9376 } 9377 /* 9378 * Unlock anon if this is the last slot in a large page. 9379 */ 9380 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) { 9381 ASSERT(anlock); 9382 anon_array_exit(&cookie); 9383 anlock = 0; 9384 } 9385 *pplist++ = pp; 9386 } 9387 if (anlock) { /* Ensure the lock is dropped */ 9388 anon_array_exit(&cookie); 9389 } 9390 ANON_LOCK_EXIT(&->a_rwlock); 9391 9392 if (a >= addr + len) { 9393 atomic_add_long((ulong_t *)&svd->softlockcnt, npages); 9394 if (pamp != NULL) { 9395 ASSERT(svd->type == MAP_SHARED); 9396 atomic_add_long((ulong_t *)&pamp->a_softlockcnt, 9397 npages); 9398 wlen = len; 9399 } 9400 if (sftlck_sbase) { 9401 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase); 9402 } 9403 if (sftlck_send) { 9404 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send); 9405 } 9406 if (use_pcache) { 9407 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl, 9408 rw, pflags, preclaim_callback); 9409 } 9410 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9411 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END, 9412 "segvn_pagelock: cache fill seg %p addr %p", seg, addr); 9413 return (0); 9414 } 9415 9416 pplist = pl; 9417 np = ((uintptr_t)(a - addr)) >> PAGESHIFT; 9418 while (np > (uint_t)0) { 9419 ASSERT(PAGE_LOCKED(*pplist)); 9420 page_unlock(*pplist); 9421 np--; 9422 pplist++; 9423 } 9424 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9425 out: 9426 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9427 *ppp = NULL; 9428 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END, 9429 "segvn_pagelock: cache miss seg %p addr %p", seg, addr); 9430 return (error); 9431 } 9432 9433 /* 9434 * purge any cached pages in the I/O page cache 9435 */ 9436 static void 9437 segvn_purge(struct seg *seg) 9438 { 9439 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9440 9441 /* 9442 * pcache is only used by pure anon segments. 9443 */ 9444 if (svd->amp == NULL || svd->vp != NULL) { 9445 return; 9446 } 9447 9448 /* 9449 * For MAP_SHARED segments non 0 segment's softlockcnt means 9450 * active IO is still in progress via this segment. So we only 9451 * purge MAP_SHARED segments when their softlockcnt is 0. 9452 */ 9453 if (svd->type == MAP_PRIVATE) { 9454 if (svd->softlockcnt) { 9455 seg_ppurge(seg, NULL, 0); 9456 } 9457 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) { 9458 seg_ppurge(seg, svd->amp, 0); 9459 } 9460 } 9461 9462 /* 9463 * If async argument is not 0 we are called from pcache async thread and don't 9464 * hold AS lock. 9465 */ 9466 9467 /*ARGSUSED*/ 9468 static int 9469 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 9470 enum seg_rw rw, int async) 9471 { 9472 struct seg *seg = (struct seg *)ptag; 9473 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9474 pgcnt_t np, npages; 9475 struct page **pl; 9476 9477 npages = np = btop(len); 9478 ASSERT(npages); 9479 9480 ASSERT(svd->vp == NULL && svd->amp != NULL); 9481 ASSERT(svd->softlockcnt >= npages); 9482 ASSERT(async || AS_LOCK_HELD(seg->s_as)); 9483 9484 pl = pplist; 9485 9486 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST); 9487 ASSERT(!async || pl[np] == PCACHE_SHWLIST); 9488 9489 while (np > (uint_t)0) { 9490 if (rw == S_WRITE) { 9491 hat_setrefmod(*pplist); 9492 } else { 9493 hat_setref(*pplist); 9494 } 9495 page_unlock(*pplist); 9496 np--; 9497 pplist++; 9498 } 9499 9500 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9501 9502 /* 9503 * If we are pcache async thread we don't hold AS lock. This means if 9504 * softlockcnt drops to 0 after the decrement below address space may 9505 * get freed. We can't allow it since after softlock derement to 0 we 9506 * still need to access as structure for possible wakeup of unmap 9507 * waiters. To prevent the disappearance of as we take this segment 9508 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to 9509 * make sure this routine completes before segment is freed. 9510 * 9511 * The second complication we have to deal with in async case is a 9512 * possibility of missed wake up of unmap wait thread. When we don't 9513 * hold as lock here we may take a_contents lock before unmap wait 9514 * thread that was first to see softlockcnt was still not 0. As a 9515 * result we'll fail to wake up an unmap wait thread. To avoid this 9516 * race we set nounmapwait flag in as structure if we drop softlockcnt 9517 * to 0 when we were called by pcache async thread. unmapwait thread 9518 * will not block if this flag is set. 9519 */ 9520 if (async) { 9521 mutex_enter(&svd->segfree_syncmtx); 9522 } 9523 9524 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) { 9525 if (async || AS_ISUNMAPWAIT(seg->s_as)) { 9526 mutex_enter(&seg->s_as->a_contents); 9527 if (async) { 9528 AS_SETNOUNMAPWAIT(seg->s_as); 9529 } 9530 if (AS_ISUNMAPWAIT(seg->s_as)) { 9531 AS_CLRUNMAPWAIT(seg->s_as); 9532 cv_broadcast(&seg->s_as->a_cv); 9533 } 9534 mutex_exit(&seg->s_as->a_contents); 9535 } 9536 } 9537 9538 if (async) { 9539 mutex_exit(&svd->segfree_syncmtx); 9540 } 9541 return (0); 9542 } 9543 9544 /*ARGSUSED*/ 9545 static int 9546 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 9547 enum seg_rw rw, int async) 9548 { 9549 amp_t *amp = (amp_t *)ptag; 9550 pgcnt_t np, npages; 9551 struct page **pl; 9552 9553 npages = np = btop(len); 9554 ASSERT(npages); 9555 ASSERT(amp->a_softlockcnt >= npages); 9556 9557 pl = pplist; 9558 9559 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST); 9560 ASSERT(!async || pl[np] == PCACHE_SHWLIST); 9561 9562 while (np > (uint_t)0) { 9563 if (rw == S_WRITE) { 9564 hat_setrefmod(*pplist); 9565 } else { 9566 hat_setref(*pplist); 9567 } 9568 page_unlock(*pplist); 9569 np--; 9570 pplist++; 9571 } 9572 9573 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9574 9575 /* 9576 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt 9577 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0 9578 * and anonmap_purge() acquires a_purgemtx. 9579 */ 9580 mutex_enter(&->a_purgemtx); 9581 if (!atomic_add_long_nv((ulong_t *)&->a_softlockcnt, -npages) && 9582 amp->a_purgewait) { 9583 amp->a_purgewait = 0; 9584 cv_broadcast(&->a_purgecv); 9585 } 9586 mutex_exit(&->a_purgemtx); 9587 return (0); 9588 } 9589 9590 /* 9591 * get a memory ID for an addr in a given segment 9592 * 9593 * XXX only creates PAGESIZE pages if anon slots are not initialized. 9594 * At fault time they will be relocated into larger pages. 9595 */ 9596 static int 9597 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 9598 { 9599 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9600 struct anon *ap = NULL; 9601 ulong_t anon_index = 0; 9602 struct anon_map *amp; 9603 anon_sync_obj_t cookie; 9604 9605 if (svd->type == MAP_PRIVATE) { 9606 memidp->val[0] = (uintptr_t)seg->s_as; 9607 memidp->val[1] = (uintptr_t)addr; 9608 return (0); 9609 } 9610 9611 if (svd->type == MAP_SHARED) { 9612 if (svd->vp) { 9613 memidp->val[0] = (uintptr_t)svd->vp; 9614 memidp->val[1] = (u_longlong_t)svd->offset + 9615 (uintptr_t)(addr - seg->s_base); 9616 return (0); 9617 } else { 9618 9619 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 9620 if ((amp = svd->amp) != NULL) { 9621 anon_index = svd->anon_index + 9622 seg_page(seg, addr); 9623 } 9624 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9625 9626 ASSERT(amp != NULL); 9627 9628 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 9629 anon_array_enter(amp, anon_index, &cookie); 9630 ap = anon_get_ptr(amp->ahp, anon_index); 9631 if (ap == NULL) { 9632 page_t *pp; 9633 9634 pp = anon_zero(seg, addr, &ap, svd->cred); 9635 if (pp == NULL) { 9636 anon_array_exit(&cookie); 9637 ANON_LOCK_EXIT(&->a_rwlock); 9638 return (ENOMEM); 9639 } 9640 ASSERT(anon_get_ptr(amp->ahp, anon_index) 9641 == NULL); 9642 (void) anon_set_ptr(amp->ahp, anon_index, 9643 ap, ANON_SLEEP); 9644 page_unlock(pp); 9645 } 9646 9647 anon_array_exit(&cookie); 9648 ANON_LOCK_EXIT(&->a_rwlock); 9649 9650 memidp->val[0] = (uintptr_t)ap; 9651 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 9652 return (0); 9653 } 9654 } 9655 return (EINVAL); 9656 } 9657 9658 static int 9659 sameprot(struct seg *seg, caddr_t a, size_t len) 9660 { 9661 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9662 struct vpage *vpage; 9663 spgcnt_t pages = btop(len); 9664 uint_t prot; 9665 9666 if (svd->pageprot == 0) 9667 return (1); 9668 9669 ASSERT(svd->vpage != NULL); 9670 9671 vpage = &svd->vpage[seg_page(seg, a)]; 9672 prot = VPP_PROT(vpage); 9673 vpage++; 9674 pages--; 9675 while (pages-- > 0) { 9676 if (prot != VPP_PROT(vpage)) 9677 return (0); 9678 vpage++; 9679 } 9680 return (1); 9681 } 9682 9683 /* 9684 * Get memory allocation policy info for specified address in given segment 9685 */ 9686 static lgrp_mem_policy_info_t * 9687 segvn_getpolicy(struct seg *seg, caddr_t addr) 9688 { 9689 struct anon_map *amp; 9690 ulong_t anon_index; 9691 lgrp_mem_policy_info_t *policy_info; 9692 struct segvn_data *svn_data; 9693 u_offset_t vn_off; 9694 vnode_t *vp; 9695 9696 ASSERT(seg != NULL); 9697 9698 svn_data = (struct segvn_data *)seg->s_data; 9699 if (svn_data == NULL) 9700 return (NULL); 9701 9702 /* 9703 * Get policy info for private or shared memory 9704 */ 9705 if (svn_data->type != MAP_SHARED) { 9706 if (svn_data->tr_state != SEGVN_TR_ON) { 9707 policy_info = &svn_data->policy_info; 9708 } else { 9709 policy_info = &svn_data->tr_policy_info; 9710 ASSERT(policy_info->mem_policy == 9711 LGRP_MEM_POLICY_NEXT_SEG); 9712 } 9713 } else { 9714 amp = svn_data->amp; 9715 anon_index = svn_data->anon_index + seg_page(seg, addr); 9716 vp = svn_data->vp; 9717 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base); 9718 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off); 9719 } 9720 9721 return (policy_info); 9722 } 9723 9724 /*ARGSUSED*/ 9725 static int 9726 segvn_capable(struct seg *seg, segcapability_t capability) 9727 { 9728 return (0); 9729 } 9730 9731 /* 9732 * Bind text vnode segment to an amp. If we bind successfully mappings will be 9733 * established to per vnode mapping per lgroup amp pages instead of to vnode 9734 * pages. There's one amp per vnode text mapping per lgroup. Many processes 9735 * may share the same text replication amp. If a suitable amp doesn't already 9736 * exist in svntr hash table create a new one. We may fail to bind to amp if 9737 * segment is not eligible for text replication. Code below first checks for 9738 * these conditions. If binding is successful segment tr_state is set to on 9739 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and 9740 * svd->amp remains as NULL. 9741 */ 9742 static void 9743 segvn_textrepl(struct seg *seg) 9744 { 9745 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9746 vnode_t *vp = svd->vp; 9747 u_offset_t off = svd->offset; 9748 size_t size = seg->s_size; 9749 u_offset_t eoff = off + size; 9750 uint_t szc = seg->s_szc; 9751 ulong_t hash = SVNTR_HASH_FUNC(vp); 9752 svntr_t *svntrp; 9753 struct vattr va; 9754 proc_t *p = seg->s_as->a_proc; 9755 lgrp_id_t lgrp_id; 9756 lgrp_id_t olid; 9757 int first; 9758 struct anon_map *amp; 9759 9760 ASSERT(AS_LOCK_HELD(seg->s_as)); 9761 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 9762 ASSERT(p != NULL); 9763 ASSERT(svd->tr_state == SEGVN_TR_INIT); 9764 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9765 ASSERT(svd->flags & MAP_TEXT); 9766 ASSERT(svd->type == MAP_PRIVATE); 9767 ASSERT(vp != NULL && svd->amp == NULL); 9768 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 9769 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0); 9770 ASSERT(seg->s_as != &kas); 9771 ASSERT(off < eoff); 9772 ASSERT(svntr_hashtab != NULL); 9773 9774 /* 9775 * If numa optimizations are no longer desired bail out. 9776 */ 9777 if (!lgrp_optimizations()) { 9778 svd->tr_state = SEGVN_TR_OFF; 9779 return; 9780 } 9781 9782 /* 9783 * Avoid creating anon maps with size bigger than the file size. 9784 * If VOP_GETATTR() call fails bail out. 9785 */ 9786 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME; 9787 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) { 9788 svd->tr_state = SEGVN_TR_OFF; 9789 SEGVN_TR_ADDSTAT(gaerr); 9790 return; 9791 } 9792 if (btopr(va.va_size) < btopr(eoff)) { 9793 svd->tr_state = SEGVN_TR_OFF; 9794 SEGVN_TR_ADDSTAT(overmap); 9795 return; 9796 } 9797 9798 /* 9799 * VVMEXEC may not be set yet if exec() prefaults text segment. Set 9800 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED 9801 * mapping that checks if trcache for this vnode needs to be 9802 * invalidated can't miss us. 9803 */ 9804 if (!(vp->v_flag & VVMEXEC)) { 9805 mutex_enter(&vp->v_lock); 9806 vp->v_flag |= VVMEXEC; 9807 mutex_exit(&vp->v_lock); 9808 } 9809 mutex_enter(&svntr_hashtab[hash].tr_lock); 9810 /* 9811 * Bail out if potentially MAP_SHARED writable mappings exist to this 9812 * vnode. We don't want to use old file contents from existing 9813 * replicas if this mapping was established after the original file 9814 * was changed. 9815 */ 9816 if (vn_is_mapped(vp, V_WRITE)) { 9817 mutex_exit(&svntr_hashtab[hash].tr_lock); 9818 svd->tr_state = SEGVN_TR_OFF; 9819 SEGVN_TR_ADDSTAT(wrcnt); 9820 return; 9821 } 9822 svntrp = svntr_hashtab[hash].tr_head; 9823 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9824 ASSERT(svntrp->tr_refcnt != 0); 9825 if (svntrp->tr_vp != vp) { 9826 continue; 9827 } 9828 9829 /* 9830 * Bail out if the file or its attributes were changed after 9831 * this replication entry was created since we need to use the 9832 * latest file contents. Note that mtime test alone is not 9833 * sufficient because a user can explicitly change mtime via 9834 * utimes(2) interfaces back to the old value after modifiying 9835 * the file contents. To detect this case we also have to test 9836 * ctime which among other things records the time of the last 9837 * mtime change by utimes(2). ctime is not changed when the file 9838 * is only read or executed so we expect that typically existing 9839 * replication amp's can be used most of the time. 9840 */ 9841 if (!svntrp->tr_valid || 9842 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec || 9843 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec || 9844 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec || 9845 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) { 9846 mutex_exit(&svntr_hashtab[hash].tr_lock); 9847 svd->tr_state = SEGVN_TR_OFF; 9848 SEGVN_TR_ADDSTAT(stale); 9849 return; 9850 } 9851 /* 9852 * if off, eoff and szc match current segment we found the 9853 * existing entry we can use. 9854 */ 9855 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff && 9856 svntrp->tr_szc == szc) { 9857 break; 9858 } 9859 /* 9860 * Don't create different but overlapping in file offsets 9861 * entries to avoid replication of the same file pages more 9862 * than once per lgroup. 9863 */ 9864 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) || 9865 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) { 9866 mutex_exit(&svntr_hashtab[hash].tr_lock); 9867 svd->tr_state = SEGVN_TR_OFF; 9868 SEGVN_TR_ADDSTAT(overlap); 9869 return; 9870 } 9871 } 9872 /* 9873 * If we didn't find existing entry create a new one. 9874 */ 9875 if (svntrp == NULL) { 9876 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP); 9877 if (svntrp == NULL) { 9878 mutex_exit(&svntr_hashtab[hash].tr_lock); 9879 svd->tr_state = SEGVN_TR_OFF; 9880 SEGVN_TR_ADDSTAT(nokmem); 9881 return; 9882 } 9883 #ifdef DEBUG 9884 { 9885 lgrp_id_t i; 9886 for (i = 0; i < NLGRPS_MAX; i++) { 9887 ASSERT(svntrp->tr_amp[i] == NULL); 9888 } 9889 } 9890 #endif /* DEBUG */ 9891 svntrp->tr_vp = vp; 9892 svntrp->tr_off = off; 9893 svntrp->tr_eoff = eoff; 9894 svntrp->tr_szc = szc; 9895 svntrp->tr_valid = 1; 9896 svntrp->tr_mtime = va.va_mtime; 9897 svntrp->tr_ctime = va.va_ctime; 9898 svntrp->tr_refcnt = 0; 9899 svntrp->tr_next = svntr_hashtab[hash].tr_head; 9900 svntr_hashtab[hash].tr_head = svntrp; 9901 } 9902 first = 1; 9903 again: 9904 /* 9905 * We want to pick a replica with pages on main thread's (t_tid = 1, 9906 * aka T1) lgrp. Currently text replication is only optimized for 9907 * workloads that either have all threads of a process on the same 9908 * lgrp or execute their large text primarily on main thread. 9909 */ 9910 lgrp_id = p->p_t1_lgrpid; 9911 if (lgrp_id == LGRP_NONE) { 9912 /* 9913 * In case exec() prefaults text on non main thread use 9914 * current thread lgrpid. It will become main thread anyway 9915 * soon. 9916 */ 9917 lgrp_id = lgrp_home_id(curthread); 9918 } 9919 /* 9920 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise 9921 * just set it to NLGRPS_MAX if it's different from current process T1 9922 * home lgrp. p_tr_lgrpid is used to detect if process uses text 9923 * replication and T1 new home is different from lgrp used for text 9924 * replication. When this happens asyncronous segvn thread rechecks if 9925 * segments should change lgrps used for text replication. If we fail 9926 * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX 9927 * without cas if it's not already NLGRPS_MAX and not equal lgrp_id 9928 * we want to use. We don't need to use cas in this case because 9929 * another thread that races in between our non atomic check and set 9930 * may only change p_tr_lgrpid to NLGRPS_MAX at this point. 9931 */ 9932 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9933 olid = p->p_tr_lgrpid; 9934 if (lgrp_id != olid && olid != NLGRPS_MAX) { 9935 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX; 9936 if (atomic_cas_32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) != 9937 olid) { 9938 olid = p->p_tr_lgrpid; 9939 ASSERT(olid != LGRP_NONE); 9940 if (olid != lgrp_id && olid != NLGRPS_MAX) { 9941 p->p_tr_lgrpid = NLGRPS_MAX; 9942 } 9943 } 9944 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 9945 membar_producer(); 9946 /* 9947 * lgrp_move_thread() won't schedule async recheck after 9948 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not 9949 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid 9950 * is not LGRP_NONE. 9951 */ 9952 if (first && p->p_t1_lgrpid != LGRP_NONE && 9953 p->p_t1_lgrpid != lgrp_id) { 9954 first = 0; 9955 goto again; 9956 } 9957 } 9958 /* 9959 * If no amp was created yet for lgrp_id create a new one as long as 9960 * we have enough memory to afford it. 9961 */ 9962 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) { 9963 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 9964 if (trmem > segvn_textrepl_max_bytes) { 9965 SEGVN_TR_ADDSTAT(normem); 9966 goto fail; 9967 } 9968 if (anon_try_resv_zone(size, NULL) == 0) { 9969 SEGVN_TR_ADDSTAT(noanon); 9970 goto fail; 9971 } 9972 amp = anonmap_alloc(size, size, ANON_NOSLEEP); 9973 if (amp == NULL) { 9974 anon_unresv_zone(size, NULL); 9975 SEGVN_TR_ADDSTAT(nokmem); 9976 goto fail; 9977 } 9978 ASSERT(amp->refcnt == 1); 9979 amp->a_szc = szc; 9980 svntrp->tr_amp[lgrp_id] = amp; 9981 SEGVN_TR_ADDSTAT(newamp); 9982 } 9983 svntrp->tr_refcnt++; 9984 ASSERT(svd->svn_trnext == NULL); 9985 ASSERT(svd->svn_trprev == NULL); 9986 svd->svn_trnext = svntrp->tr_svnhead; 9987 svd->svn_trprev = NULL; 9988 if (svntrp->tr_svnhead != NULL) { 9989 svntrp->tr_svnhead->svn_trprev = svd; 9990 } 9991 svntrp->tr_svnhead = svd; 9992 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size); 9993 ASSERT(amp->refcnt >= 1); 9994 svd->amp = amp; 9995 svd->anon_index = 0; 9996 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG; 9997 svd->tr_policy_info.mem_lgrpid = lgrp_id; 9998 svd->tr_state = SEGVN_TR_ON; 9999 mutex_exit(&svntr_hashtab[hash].tr_lock); 10000 SEGVN_TR_ADDSTAT(repl); 10001 return; 10002 fail: 10003 ASSERT(segvn_textrepl_bytes >= size); 10004 atomic_add_long(&segvn_textrepl_bytes, -size); 10005 ASSERT(svntrp != NULL); 10006 ASSERT(svntrp->tr_amp[lgrp_id] == NULL); 10007 if (svntrp->tr_refcnt == 0) { 10008 ASSERT(svntrp == svntr_hashtab[hash].tr_head); 10009 svntr_hashtab[hash].tr_head = svntrp->tr_next; 10010 mutex_exit(&svntr_hashtab[hash].tr_lock); 10011 kmem_cache_free(svntr_cache, svntrp); 10012 } else { 10013 mutex_exit(&svntr_hashtab[hash].tr_lock); 10014 } 10015 svd->tr_state = SEGVN_TR_OFF; 10016 } 10017 10018 /* 10019 * Convert seg back to regular vnode mapping seg by unbinding it from its text 10020 * replication amp. This routine is most typically called when segment is 10021 * unmapped but can also be called when segment no longer qualifies for text 10022 * replication (e.g. due to protection changes). If unload_unmap is set use 10023 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of 10024 * svntr free all its anon maps and remove it from the hash table. 10025 */ 10026 static void 10027 segvn_textunrepl(struct seg *seg, int unload_unmap) 10028 { 10029 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 10030 vnode_t *vp = svd->vp; 10031 u_offset_t off = svd->offset; 10032 size_t size = seg->s_size; 10033 u_offset_t eoff = off + size; 10034 uint_t szc = seg->s_szc; 10035 ulong_t hash = SVNTR_HASH_FUNC(vp); 10036 svntr_t *svntrp; 10037 svntr_t **prv_svntrp; 10038 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid; 10039 lgrp_id_t i; 10040 10041 ASSERT(AS_LOCK_HELD(seg->s_as)); 10042 ASSERT(AS_WRITE_HELD(seg->s_as) || 10043 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 10044 ASSERT(svd->tr_state == SEGVN_TR_ON); 10045 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 10046 ASSERT(svd->amp != NULL); 10047 ASSERT(svd->amp->refcnt >= 1); 10048 ASSERT(svd->anon_index == 0); 10049 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 10050 ASSERT(svntr_hashtab != NULL); 10051 10052 mutex_enter(&svntr_hashtab[hash].tr_lock); 10053 prv_svntrp = &svntr_hashtab[hash].tr_head; 10054 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) { 10055 ASSERT(svntrp->tr_refcnt != 0); 10056 if (svntrp->tr_vp == vp && svntrp->tr_off == off && 10057 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) { 10058 break; 10059 } 10060 } 10061 if (svntrp == NULL) { 10062 panic("segvn_textunrepl: svntr record not found"); 10063 } 10064 if (svntrp->tr_amp[lgrp_id] != svd->amp) { 10065 panic("segvn_textunrepl: amp mismatch"); 10066 } 10067 svd->tr_state = SEGVN_TR_OFF; 10068 svd->amp = NULL; 10069 if (svd->svn_trprev == NULL) { 10070 ASSERT(svntrp->tr_svnhead == svd); 10071 svntrp->tr_svnhead = svd->svn_trnext; 10072 if (svntrp->tr_svnhead != NULL) { 10073 svntrp->tr_svnhead->svn_trprev = NULL; 10074 } 10075 svd->svn_trnext = NULL; 10076 } else { 10077 svd->svn_trprev->svn_trnext = svd->svn_trnext; 10078 if (svd->svn_trnext != NULL) { 10079 svd->svn_trnext->svn_trprev = svd->svn_trprev; 10080 svd->svn_trnext = NULL; 10081 } 10082 svd->svn_trprev = NULL; 10083 } 10084 if (--svntrp->tr_refcnt) { 10085 mutex_exit(&svntr_hashtab[hash].tr_lock); 10086 goto done; 10087 } 10088 *prv_svntrp = svntrp->tr_next; 10089 mutex_exit(&svntr_hashtab[hash].tr_lock); 10090 for (i = 0; i < NLGRPS_MAX; i++) { 10091 struct anon_map *amp = svntrp->tr_amp[i]; 10092 if (amp == NULL) { 10093 continue; 10094 } 10095 ASSERT(amp->refcnt == 1); 10096 ASSERT(amp->swresv == size); 10097 ASSERT(amp->size == size); 10098 ASSERT(amp->a_szc == szc); 10099 if (amp->a_szc != 0) { 10100 anon_free_pages(amp->ahp, 0, size, szc); 10101 } else { 10102 anon_free(amp->ahp, 0, size); 10103 } 10104 svntrp->tr_amp[i] = NULL; 10105 ASSERT(segvn_textrepl_bytes >= size); 10106 atomic_add_long(&segvn_textrepl_bytes, -size); 10107 anon_unresv_zone(amp->swresv, NULL); 10108 amp->refcnt = 0; 10109 anonmap_free(amp); 10110 } 10111 kmem_cache_free(svntr_cache, svntrp); 10112 done: 10113 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size, 10114 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL); 10115 } 10116 10117 /* 10118 * This is called when a MAP_SHARED writable mapping is created to a vnode 10119 * that is currently used for execution (VVMEXEC flag is set). In this case we 10120 * need to prevent further use of existing replicas. 10121 */ 10122 static void 10123 segvn_inval_trcache(vnode_t *vp) 10124 { 10125 ulong_t hash = SVNTR_HASH_FUNC(vp); 10126 svntr_t *svntrp; 10127 10128 ASSERT(vp->v_flag & VVMEXEC); 10129 10130 if (svntr_hashtab == NULL) { 10131 return; 10132 } 10133 10134 mutex_enter(&svntr_hashtab[hash].tr_lock); 10135 svntrp = svntr_hashtab[hash].tr_head; 10136 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 10137 ASSERT(svntrp->tr_refcnt != 0); 10138 if (svntrp->tr_vp == vp && svntrp->tr_valid) { 10139 svntrp->tr_valid = 0; 10140 } 10141 } 10142 mutex_exit(&svntr_hashtab[hash].tr_lock); 10143 } 10144 10145 static void 10146 segvn_trasync_thread(void) 10147 { 10148 callb_cpr_t cpr_info; 10149 kmutex_t cpr_lock; /* just for CPR stuff */ 10150 10151 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL); 10152 10153 CALLB_CPR_INIT(&cpr_info, &cpr_lock, 10154 callb_generic_cpr, "segvn_async"); 10155 10156 if (segvn_update_textrepl_interval == 0) { 10157 segvn_update_textrepl_interval = segvn_update_tr_time * hz; 10158 } else { 10159 segvn_update_textrepl_interval *= hz; 10160 } 10161 (void) timeout(segvn_trupdate_wakeup, NULL, 10162 segvn_update_textrepl_interval); 10163 10164 for (;;) { 10165 mutex_enter(&cpr_lock); 10166 CALLB_CPR_SAFE_BEGIN(&cpr_info); 10167 mutex_exit(&cpr_lock); 10168 sema_p(&segvn_trasync_sem); 10169 mutex_enter(&cpr_lock); 10170 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 10171 mutex_exit(&cpr_lock); 10172 segvn_trupdate(); 10173 } 10174 } 10175 10176 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0; 10177 10178 static void 10179 segvn_trupdate_wakeup(void *dummy) 10180 { 10181 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations(); 10182 10183 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) { 10184 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs; 10185 sema_v(&segvn_trasync_sem); 10186 } 10187 10188 if (!segvn_disable_textrepl_update && 10189 segvn_update_textrepl_interval != 0) { 10190 (void) timeout(segvn_trupdate_wakeup, dummy, 10191 segvn_update_textrepl_interval); 10192 } 10193 } 10194 10195 static void 10196 segvn_trupdate(void) 10197 { 10198 ulong_t hash; 10199 svntr_t *svntrp; 10200 segvn_data_t *svd; 10201 10202 ASSERT(svntr_hashtab != NULL); 10203 10204 for (hash = 0; hash < svntr_hashtab_sz; hash++) { 10205 mutex_enter(&svntr_hashtab[hash].tr_lock); 10206 svntrp = svntr_hashtab[hash].tr_head; 10207 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 10208 ASSERT(svntrp->tr_refcnt != 0); 10209 svd = svntrp->tr_svnhead; 10210 for (; svd != NULL; svd = svd->svn_trnext) { 10211 segvn_trupdate_seg(svd->seg, svd, svntrp, 10212 hash); 10213 } 10214 } 10215 mutex_exit(&svntr_hashtab[hash].tr_lock); 10216 } 10217 } 10218 10219 static void 10220 segvn_trupdate_seg(struct seg *seg, segvn_data_t *svd, svntr_t *svntrp, 10221 ulong_t hash) 10222 { 10223 proc_t *p; 10224 lgrp_id_t lgrp_id; 10225 struct as *as; 10226 size_t size; 10227 struct anon_map *amp; 10228 10229 ASSERT(svd->vp != NULL); 10230 ASSERT(svd->vp == svntrp->tr_vp); 10231 ASSERT(svd->offset == svntrp->tr_off); 10232 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff); 10233 ASSERT(seg != NULL); 10234 ASSERT(svd->seg == seg); 10235 ASSERT(seg->s_data == (void *)svd); 10236 ASSERT(seg->s_szc == svntrp->tr_szc); 10237 ASSERT(svd->tr_state == SEGVN_TR_ON); 10238 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 10239 ASSERT(svd->amp != NULL); 10240 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 10241 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE); 10242 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX); 10243 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp); 10244 ASSERT(svntrp->tr_refcnt != 0); 10245 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock)); 10246 10247 as = seg->s_as; 10248 ASSERT(as != NULL && as != &kas); 10249 p = as->a_proc; 10250 ASSERT(p != NULL); 10251 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 10252 lgrp_id = p->p_t1_lgrpid; 10253 if (lgrp_id == LGRP_NONE) { 10254 return; 10255 } 10256 ASSERT(lgrp_id < NLGRPS_MAX); 10257 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) { 10258 return; 10259 } 10260 10261 /* 10262 * Use tryenter locking since we are locking as/seg and svntr hash 10263 * lock in reverse from syncrounous thread order. 10264 */ 10265 if (!AS_LOCK_TRYENTER(as, RW_READER)) { 10266 SEGVN_TR_ADDSTAT(nolock); 10267 if (segvn_lgrp_trthr_migrs_snpsht) { 10268 segvn_lgrp_trthr_migrs_snpsht = 0; 10269 } 10270 return; 10271 } 10272 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) { 10273 AS_LOCK_EXIT(as); 10274 SEGVN_TR_ADDSTAT(nolock); 10275 if (segvn_lgrp_trthr_migrs_snpsht) { 10276 segvn_lgrp_trthr_migrs_snpsht = 0; 10277 } 10278 return; 10279 } 10280 size = seg->s_size; 10281 if (svntrp->tr_amp[lgrp_id] == NULL) { 10282 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 10283 if (trmem > segvn_textrepl_max_bytes) { 10284 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10285 AS_LOCK_EXIT(as); 10286 atomic_add_long(&segvn_textrepl_bytes, -size); 10287 SEGVN_TR_ADDSTAT(normem); 10288 return; 10289 } 10290 if (anon_try_resv_zone(size, NULL) == 0) { 10291 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10292 AS_LOCK_EXIT(as); 10293 atomic_add_long(&segvn_textrepl_bytes, -size); 10294 SEGVN_TR_ADDSTAT(noanon); 10295 return; 10296 } 10297 amp = anonmap_alloc(size, size, KM_NOSLEEP); 10298 if (amp == NULL) { 10299 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10300 AS_LOCK_EXIT(as); 10301 atomic_add_long(&segvn_textrepl_bytes, -size); 10302 anon_unresv_zone(size, NULL); 10303 SEGVN_TR_ADDSTAT(nokmem); 10304 return; 10305 } 10306 ASSERT(amp->refcnt == 1); 10307 amp->a_szc = seg->s_szc; 10308 svntrp->tr_amp[lgrp_id] = amp; 10309 } 10310 /* 10311 * We don't need to drop the bucket lock but here we give other 10312 * threads a chance. svntr and svd can't be unlinked as long as 10313 * segment lock is held as a writer and AS held as well. After we 10314 * retake bucket lock we'll continue from where we left. We'll be able 10315 * to reach the end of either list since new entries are always added 10316 * to the beginning of the lists. 10317 */ 10318 mutex_exit(&svntr_hashtab[hash].tr_lock); 10319 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL); 10320 mutex_enter(&svntr_hashtab[hash].tr_lock); 10321 10322 ASSERT(svd->tr_state == SEGVN_TR_ON); 10323 ASSERT(svd->amp != NULL); 10324 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 10325 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id); 10326 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]); 10327 10328 svd->tr_policy_info.mem_lgrpid = lgrp_id; 10329 svd->amp = svntrp->tr_amp[lgrp_id]; 10330 p->p_tr_lgrpid = NLGRPS_MAX; 10331 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10332 AS_LOCK_EXIT(as); 10333 10334 ASSERT(svntrp->tr_refcnt != 0); 10335 ASSERT(svd->vp == svntrp->tr_vp); 10336 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id); 10337 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]); 10338 ASSERT(svd->seg == seg); 10339 ASSERT(svd->tr_state == SEGVN_TR_ON); 10340 10341 SEGVN_TR_ADDSTAT(asyncrepl); 10342 } 10343