1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2014, Joyent, Inc. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 */ 26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 /* 31 * University Copyright- Copyright (c) 1982, 1986, 1988 32 * The Regents of the University of California 33 * All Rights Reserved 34 * 35 * University Acknowledgment- Portions of this document are derived from 36 * software developed by the University of California, Berkeley, and its 37 * contributors. 38 */ 39 40 /* 41 * VM - shared or copy-on-write from a vnode/anonymous memory. 42 */ 43 44 #include <sys/types.h> 45 #include <sys/param.h> 46 #include <sys/t_lock.h> 47 #include <sys/errno.h> 48 #include <sys/systm.h> 49 #include <sys/mman.h> 50 #include <sys/debug.h> 51 #include <sys/cred.h> 52 #include <sys/vmsystm.h> 53 #include <sys/tuneable.h> 54 #include <sys/bitmap.h> 55 #include <sys/swap.h> 56 #include <sys/kmem.h> 57 #include <sys/sysmacros.h> 58 #include <sys/vtrace.h> 59 #include <sys/cmn_err.h> 60 #include <sys/callb.h> 61 #include <sys/vm.h> 62 #include <sys/dumphdr.h> 63 #include <sys/lgrp.h> 64 65 #include <vm/hat.h> 66 #include <vm/as.h> 67 #include <vm/seg.h> 68 #include <vm/seg_vn.h> 69 #include <vm/pvn.h> 70 #include <vm/anon.h> 71 #include <vm/page.h> 72 #include <vm/vpage.h> 73 #include <sys/proc.h> 74 #include <sys/task.h> 75 #include <sys/project.h> 76 #include <sys/zone.h> 77 #include <sys/shm_impl.h> 78 79 /* 80 * segvn_fault needs a temporary page list array. To avoid calling kmem all 81 * the time, it creates a small (PVN_GETPAGE_NUM entry) array and uses it if 82 * it can. In the rare case when this page list is not large enough, it 83 * goes and gets a large enough array from kmem. 84 * 85 * This small page list array covers either 8 pages or 64kB worth of pages - 86 * whichever is smaller. 87 */ 88 #define PVN_MAX_GETPAGE_SZ 0x10000 89 #define PVN_MAX_GETPAGE_NUM 0x8 90 91 #if PVN_MAX_GETPAGE_SZ > PVN_MAX_GETPAGE_NUM * PAGESIZE 92 #define PVN_GETPAGE_SZ ptob(PVN_MAX_GETPAGE_NUM) 93 #define PVN_GETPAGE_NUM PVN_MAX_GETPAGE_NUM 94 #else 95 #define PVN_GETPAGE_SZ PVN_MAX_GETPAGE_SZ 96 #define PVN_GETPAGE_NUM btop(PVN_MAX_GETPAGE_SZ) 97 #endif 98 99 /* 100 * Private seg op routines. 101 */ 102 static int segvn_dup(struct seg *seg, struct seg *newseg); 103 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len); 104 static void segvn_free(struct seg *seg); 105 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg, 106 caddr_t addr, size_t len, enum fault_type type, 107 enum seg_rw rw); 108 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr); 109 static int segvn_setprot(struct seg *seg, caddr_t addr, 110 size_t len, uint_t prot); 111 static int segvn_checkprot(struct seg *seg, caddr_t addr, 112 size_t len, uint_t prot); 113 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 114 static size_t segvn_swapout(struct seg *seg); 115 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len, 116 int attr, uint_t flags); 117 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len, 118 char *vec); 119 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 120 int attr, int op, ulong_t *lockmap, size_t pos); 121 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len, 122 uint_t *protv); 123 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr); 124 static int segvn_gettype(struct seg *seg, caddr_t addr); 125 static int segvn_getvp(struct seg *seg, caddr_t addr, 126 struct vnode **vpp); 127 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len, 128 uint_t behav); 129 static void segvn_dump(struct seg *seg); 130 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, 131 struct page ***ppp, enum lock_type type, enum seg_rw rw); 132 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, 133 uint_t szc); 134 static int segvn_getmemid(struct seg *seg, caddr_t addr, 135 memid_t *memidp); 136 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t); 137 static int segvn_capable(struct seg *seg, segcapability_t capable); 138 139 struct seg_ops segvn_ops = { 140 segvn_dup, 141 segvn_unmap, 142 segvn_free, 143 segvn_fault, 144 segvn_faulta, 145 segvn_setprot, 146 segvn_checkprot, 147 segvn_kluster, 148 segvn_swapout, 149 segvn_sync, 150 segvn_incore, 151 segvn_lockop, 152 segvn_getprot, 153 segvn_getoffset, 154 segvn_gettype, 155 segvn_getvp, 156 segvn_advise, 157 segvn_dump, 158 segvn_pagelock, 159 segvn_setpagesize, 160 segvn_getmemid, 161 segvn_getpolicy, 162 segvn_capable, 163 }; 164 165 /* 166 * Common zfod structures, provided as a shorthand for others to use. 167 */ 168 static segvn_crargs_t zfod_segvn_crargs = 169 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL); 170 static segvn_crargs_t kzfod_segvn_crargs = 171 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER, 172 PROT_ALL & ~PROT_USER); 173 static segvn_crargs_t stack_noexec_crargs = 174 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL); 175 176 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */ 177 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */ 178 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */ 179 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */ 180 181 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */ 182 183 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */ 184 185 size_t segvn_pglock_comb_thrshld = (1UL << 16); /* 64K */ 186 size_t segvn_pglock_comb_balign = (1UL << 16); /* 64K */ 187 uint_t segvn_pglock_comb_bshift; 188 size_t segvn_pglock_comb_palign; 189 190 static int segvn_concat(struct seg *, struct seg *, int); 191 static int segvn_extend_prev(struct seg *, struct seg *, 192 struct segvn_crargs *, size_t); 193 static int segvn_extend_next(struct seg *, struct seg *, 194 struct segvn_crargs *, size_t); 195 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw); 196 static void segvn_pagelist_rele(page_t **); 197 static void segvn_setvnode_mpss(vnode_t *); 198 static void segvn_relocate_pages(page_t **, page_t *); 199 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *); 200 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t, 201 uint_t, page_t **, page_t **, uint_t *, int *); 202 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t, 203 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 204 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t, 205 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 206 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t, 207 u_offset_t, struct vpage *, page_t **, uint_t, 208 enum fault_type, enum seg_rw, int); 209 static void segvn_vpage(struct seg *); 210 static size_t segvn_count_swap_by_vpages(struct seg *); 211 212 static void segvn_purge(struct seg *seg); 213 static int segvn_reclaim(void *, caddr_t, size_t, struct page **, 214 enum seg_rw, int); 215 static int shamp_reclaim(void *, caddr_t, size_t, struct page **, 216 enum seg_rw, int); 217 218 static int sameprot(struct seg *, caddr_t, size_t); 219 220 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t); 221 static int segvn_clrszc(struct seg *); 222 static struct seg *segvn_split_seg(struct seg *, caddr_t); 223 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t, 224 ulong_t, uint_t); 225 226 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t, 227 size_t, void *, u_offset_t); 228 229 static struct kmem_cache *segvn_cache; 230 static struct kmem_cache **segvn_szc_cache; 231 232 #ifdef VM_STATS 233 static struct segvnvmstats_str { 234 ulong_t fill_vp_pages[31]; 235 ulong_t fltvnpages[49]; 236 ulong_t fullszcpages[10]; 237 ulong_t relocatepages[3]; 238 ulong_t fltanpages[17]; 239 ulong_t pagelock[2]; 240 ulong_t demoterange[3]; 241 } segvnvmstats; 242 #endif /* VM_STATS */ 243 244 #define SDR_RANGE 1 /* demote entire range */ 245 #define SDR_END 2 /* demote non aligned ends only */ 246 247 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \ 248 if ((len) != 0) { \ 249 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \ 250 ASSERT(lpgaddr >= (seg)->s_base); \ 251 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \ 252 (len)), pgsz); \ 253 ASSERT(lpgeaddr > lpgaddr); \ 254 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \ 255 } else { \ 256 lpgeaddr = lpgaddr = (addr); \ 257 } \ 258 } 259 260 /*ARGSUSED*/ 261 static int 262 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags) 263 { 264 struct segvn_data *svd = buf; 265 266 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL); 267 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL); 268 svd->svn_trnext = svd->svn_trprev = NULL; 269 return (0); 270 } 271 272 /*ARGSUSED1*/ 273 static void 274 segvn_cache_destructor(void *buf, void *cdrarg) 275 { 276 struct segvn_data *svd = buf; 277 278 rw_destroy(&svd->lock); 279 mutex_destroy(&svd->segfree_syncmtx); 280 } 281 282 /*ARGSUSED*/ 283 static int 284 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags) 285 { 286 bzero(buf, sizeof (svntr_t)); 287 return (0); 288 } 289 290 /* 291 * Patching this variable to non-zero allows the system to run with 292 * stacks marked as "not executable". It's a bit of a kludge, but is 293 * provided as a tweakable for platforms that export those ABIs 294 * (e.g. sparc V8) that have executable stacks enabled by default. 295 * There are also some restrictions for platforms that don't actually 296 * implement 'noexec' protections. 297 * 298 * Once enabled, the system is (therefore) unable to provide a fully 299 * ABI-compliant execution environment, though practically speaking, 300 * most everything works. The exceptions are generally some interpreters 301 * and debuggers that create executable code on the stack and jump 302 * into it (without explicitly mprotecting the address range to include 303 * PROT_EXEC). 304 * 305 * One important class of applications that are disabled are those 306 * that have been transformed into malicious agents using one of the 307 * numerous "buffer overflow" attacks. See 4007890. 308 */ 309 int noexec_user_stack = 0; 310 int noexec_user_stack_log = 1; 311 312 int segvn_lpg_disable = 0; 313 uint_t segvn_maxpgszc = 0; 314 315 ulong_t segvn_vmpss_clrszc_cnt; 316 ulong_t segvn_vmpss_clrszc_err; 317 ulong_t segvn_fltvnpages_clrszc_cnt; 318 ulong_t segvn_fltvnpages_clrszc_err; 319 ulong_t segvn_setpgsz_align_err; 320 ulong_t segvn_setpgsz_anon_align_err; 321 ulong_t segvn_setpgsz_getattr_err; 322 ulong_t segvn_setpgsz_eof_err; 323 ulong_t segvn_faultvnmpss_align_err1; 324 ulong_t segvn_faultvnmpss_align_err2; 325 ulong_t segvn_faultvnmpss_align_err3; 326 ulong_t segvn_faultvnmpss_align_err4; 327 ulong_t segvn_faultvnmpss_align_err5; 328 ulong_t segvn_vmpss_pageio_deadlk_err; 329 330 int segvn_use_regions = 1; 331 332 /* 333 * Segvn supports text replication optimization for NUMA platforms. Text 334 * replica's are represented by anon maps (amp). There's one amp per text file 335 * region per lgroup. A process chooses the amp for each of its text mappings 336 * based on the lgroup assignment of its main thread (t_tid = 1). All 337 * processes that want a replica on a particular lgroup for the same text file 338 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table 339 * with vp,off,size,szc used as a key. Text replication segments are read only 340 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by 341 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode 342 * pages. Replication amp is assigned to a segment when it gets its first 343 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread 344 * rechecks periodically if the process still maps an amp local to the main 345 * thread. If not async thread forces process to remap to an amp in the new 346 * home lgroup of the main thread. Current text replication implementation 347 * only provides the benefit to workloads that do most of their work in the 348 * main thread of a process or all the threads of a process run in the same 349 * lgroup. To extend text replication benefit to different types of 350 * multithreaded workloads further work would be needed in the hat layer to 351 * allow the same virtual address in the same hat to simultaneously map 352 * different physical addresses (i.e. page table replication would be needed 353 * for x86). 354 * 355 * amp pages are used instead of vnode pages as long as segment has a very 356 * simple life cycle. It's created via segvn_create(), handles S_EXEC 357 * (S_READ) pagefaults and is fully unmapped. If anything more complicated 358 * happens such as protection is changed, real COW fault happens, pagesize is 359 * changed, MC_LOCK is requested or segment is partially unmapped we turn off 360 * text replication by converting the segment back to vnode only segment 361 * (unmap segment's address range and set svd->amp to NULL). 362 * 363 * The original file can be changed after amp is inserted into 364 * svntr_hashtab. Processes that are launched after the file is already 365 * changed can't use the replica's created prior to the file change. To 366 * implement this functionality hash entries are timestamped. Replica's can 367 * only be used if current file modification time is the same as the timestamp 368 * saved when hash entry was created. However just timestamps alone are not 369 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We 370 * deal with file changes via MAP_SHARED mappings differently. When writable 371 * MAP_SHARED mappings are created to vnodes marked as executable we mark all 372 * existing replica's for this vnode as not usable for future text 373 * mappings. And we don't create new replica's for files that currently have 374 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is 375 * true). 376 */ 377 378 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20) 379 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR; 380 381 static ulong_t svntr_hashtab_sz = 512; 382 static svntr_bucket_t *svntr_hashtab = NULL; 383 static struct kmem_cache *svntr_cache; 384 static svntr_stats_t *segvn_textrepl_stats; 385 static ksema_t segvn_trasync_sem; 386 387 int segvn_disable_textrepl = 1; 388 size_t textrepl_size_thresh = (size_t)-1; 389 size_t segvn_textrepl_bytes = 0; 390 size_t segvn_textrepl_max_bytes = 0; 391 clock_t segvn_update_textrepl_interval = 0; 392 int segvn_update_tr_time = 10; 393 int segvn_disable_textrepl_update = 0; 394 395 static void segvn_textrepl(struct seg *); 396 static void segvn_textunrepl(struct seg *, int); 397 static void segvn_inval_trcache(vnode_t *); 398 static void segvn_trasync_thread(void); 399 static void segvn_trupdate_wakeup(void *); 400 static void segvn_trupdate(void); 401 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *, 402 ulong_t); 403 404 /* 405 * Initialize segvn data structures 406 */ 407 void 408 segvn_init(void) 409 { 410 uint_t maxszc; 411 uint_t szc; 412 size_t pgsz; 413 414 segvn_cache = kmem_cache_create("segvn_cache", 415 sizeof (struct segvn_data), 0, 416 segvn_cache_constructor, segvn_cache_destructor, NULL, 417 NULL, NULL, 0); 418 419 if (segvn_lpg_disable == 0) { 420 szc = maxszc = page_num_pagesizes() - 1; 421 if (szc == 0) { 422 segvn_lpg_disable = 1; 423 } 424 if (page_get_pagesize(0) != PAGESIZE) { 425 panic("segvn_init: bad szc 0"); 426 /*NOTREACHED*/ 427 } 428 while (szc != 0) { 429 pgsz = page_get_pagesize(szc); 430 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) { 431 panic("segvn_init: bad szc %d", szc); 432 /*NOTREACHED*/ 433 } 434 szc--; 435 } 436 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc) 437 segvn_maxpgszc = maxszc; 438 } 439 440 if (segvn_maxpgszc) { 441 segvn_szc_cache = (struct kmem_cache **)kmem_alloc( 442 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *), 443 KM_SLEEP); 444 } 445 446 for (szc = 1; szc <= segvn_maxpgszc; szc++) { 447 char str[32]; 448 449 (void) sprintf(str, "segvn_szc_cache%d", szc); 450 segvn_szc_cache[szc] = kmem_cache_create(str, 451 page_get_pagecnt(szc) * sizeof (page_t *), 0, 452 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG); 453 } 454 455 456 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL)) 457 segvn_use_regions = 0; 458 459 /* 460 * For now shared regions and text replication segvn support 461 * are mutually exclusive. This is acceptable because 462 * currently significant benefit from text replication was 463 * only observed on AMD64 NUMA platforms (due to relatively 464 * small L2$ size) and currently we don't support shared 465 * regions on x86. 466 */ 467 if (segvn_use_regions && !segvn_disable_textrepl) { 468 segvn_disable_textrepl = 1; 469 } 470 471 #if defined(_LP64) 472 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 && 473 !segvn_disable_textrepl) { 474 ulong_t i; 475 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t); 476 477 svntr_cache = kmem_cache_create("svntr_cache", 478 sizeof (svntr_t), 0, svntr_cache_constructor, NULL, 479 NULL, NULL, NULL, 0); 480 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP); 481 for (i = 0; i < svntr_hashtab_sz; i++) { 482 mutex_init(&svntr_hashtab[i].tr_lock, NULL, 483 MUTEX_DEFAULT, NULL); 484 } 485 segvn_textrepl_max_bytes = ptob(physmem) / 486 segvn_textrepl_max_bytes_factor; 487 segvn_textrepl_stats = kmem_zalloc(NCPU * 488 sizeof (svntr_stats_t), KM_SLEEP); 489 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL); 490 (void) thread_create(NULL, 0, segvn_trasync_thread, 491 NULL, 0, &p0, TS_RUN, minclsyspri); 492 } 493 #endif 494 495 if (!ISP2(segvn_pglock_comb_balign) || 496 segvn_pglock_comb_balign < PAGESIZE) { 497 segvn_pglock_comb_balign = 1UL << 16; /* 64K */ 498 } 499 segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1; 500 segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign); 501 } 502 503 #define SEGVN_PAGEIO ((void *)0x1) 504 #define SEGVN_NOPAGEIO ((void *)0x2) 505 506 static void 507 segvn_setvnode_mpss(vnode_t *vp) 508 { 509 int err; 510 511 ASSERT(vp->v_mpssdata == NULL || 512 vp->v_mpssdata == SEGVN_PAGEIO || 513 vp->v_mpssdata == SEGVN_NOPAGEIO); 514 515 if (vp->v_mpssdata == NULL) { 516 if (vn_vmpss_usepageio(vp)) { 517 err = VOP_PAGEIO(vp, (page_t *)NULL, 518 (u_offset_t)0, 0, 0, CRED(), NULL); 519 } else { 520 err = ENOSYS; 521 } 522 /* 523 * set v_mpssdata just once per vnode life 524 * so that it never changes. 525 */ 526 mutex_enter(&vp->v_lock); 527 if (vp->v_mpssdata == NULL) { 528 if (err == EINVAL) { 529 vp->v_mpssdata = SEGVN_PAGEIO; 530 } else { 531 vp->v_mpssdata = SEGVN_NOPAGEIO; 532 } 533 } 534 mutex_exit(&vp->v_lock); 535 } 536 } 537 538 int 539 segvn_create(struct seg *seg, void *argsp) 540 { 541 struct segvn_crargs *a = (struct segvn_crargs *)argsp; 542 struct segvn_data *svd; 543 size_t swresv = 0; 544 struct cred *cred; 545 struct anon_map *amp; 546 int error = 0; 547 size_t pgsz; 548 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT; 549 int use_rgn = 0; 550 int trok = 0; 551 552 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 553 554 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) { 555 panic("segvn_create type"); 556 /*NOTREACHED*/ 557 } 558 559 /* 560 * Check arguments. If a shared anon structure is given then 561 * it is illegal to also specify a vp. 562 */ 563 if (a->amp != NULL && a->vp != NULL) { 564 panic("segvn_create anon_map"); 565 /*NOTREACHED*/ 566 } 567 568 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) && 569 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) && 570 segvn_use_regions) { 571 use_rgn = 1; 572 } 573 574 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */ 575 if (a->type == MAP_SHARED) 576 a->flags &= ~MAP_NORESERVE; 577 578 if (a->szc != 0) { 579 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) || 580 (a->amp != NULL && a->type == MAP_PRIVATE) || 581 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) { 582 a->szc = 0; 583 } else { 584 if (a->szc > segvn_maxpgszc) 585 a->szc = segvn_maxpgszc; 586 pgsz = page_get_pagesize(a->szc); 587 if (!IS_P2ALIGNED(seg->s_base, pgsz) || 588 !IS_P2ALIGNED(seg->s_size, pgsz)) { 589 a->szc = 0; 590 } else if (a->vp != NULL) { 591 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) { 592 /* 593 * paranoid check. 594 * hat_page_demote() is not supported 595 * on swapfs pages. 596 */ 597 a->szc = 0; 598 } else if (map_addr_vacalign_check(seg->s_base, 599 a->offset & PAGEMASK)) { 600 a->szc = 0; 601 } 602 } else if (a->amp != NULL) { 603 pgcnt_t anum = btopr(a->offset); 604 pgcnt_t pgcnt = page_get_pagecnt(a->szc); 605 if (!IS_P2ALIGNED(anum, pgcnt)) { 606 a->szc = 0; 607 } 608 } 609 } 610 } 611 612 /* 613 * If segment may need private pages, reserve them now. 614 */ 615 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) || 616 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) { 617 if (anon_resv_zone(seg->s_size, 618 seg->s_as->a_proc->p_zone) == 0) 619 return (EAGAIN); 620 swresv = seg->s_size; 621 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 622 seg, swresv, 1); 623 } 624 625 /* 626 * Reserve any mapping structures that may be required. 627 * 628 * Don't do it for segments that may use regions. It's currently a 629 * noop in the hat implementations anyway. 630 */ 631 if (!use_rgn) { 632 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP); 633 } 634 635 if (a->cred) { 636 cred = a->cred; 637 crhold(cred); 638 } else { 639 crhold(cred = CRED()); 640 } 641 642 /* Inform the vnode of the new mapping */ 643 if (a->vp != NULL) { 644 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK, 645 seg->s_as, seg->s_base, seg->s_size, a->prot, 646 a->maxprot, a->type, cred, NULL); 647 if (error) { 648 if (swresv != 0) { 649 anon_unresv_zone(swresv, 650 seg->s_as->a_proc->p_zone); 651 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 652 "anon proc:%p %lu %u", seg, swresv, 0); 653 } 654 crfree(cred); 655 if (!use_rgn) { 656 hat_unload(seg->s_as->a_hat, seg->s_base, 657 seg->s_size, HAT_UNLOAD_UNMAP); 658 } 659 return (error); 660 } 661 /* 662 * svntr_hashtab will be NULL if we support shared regions. 663 */ 664 trok = ((a->flags & MAP_TEXT) && 665 (seg->s_size > textrepl_size_thresh || 666 (a->flags & _MAP_TEXTREPL)) && 667 lgrp_optimizations() && svntr_hashtab != NULL && 668 a->type == MAP_PRIVATE && swresv == 0 && 669 !(a->flags & MAP_NORESERVE) && 670 seg->s_as != &kas && a->vp->v_type == VREG); 671 672 ASSERT(!trok || !use_rgn); 673 } 674 675 /* 676 * MAP_NORESERVE mappings don't count towards the VSZ of a process 677 * until we fault the pages in. 678 */ 679 if ((a->vp == NULL || a->vp->v_type != VREG) && 680 a->flags & MAP_NORESERVE) { 681 seg->s_as->a_resvsize -= seg->s_size; 682 } 683 684 /* 685 * If more than one segment in the address space, and they're adjacent 686 * virtually, try to concatenate them. Don't concatenate if an 687 * explicit anon_map structure was supplied (e.g., SystemV shared 688 * memory) or if we'll use text replication for this segment. 689 */ 690 if (a->amp == NULL && !use_rgn && !trok) { 691 struct seg *pseg, *nseg; 692 struct segvn_data *psvd, *nsvd; 693 lgrp_mem_policy_t ppolicy, npolicy; 694 uint_t lgrp_mem_policy_flags = 0; 695 extern lgrp_mem_policy_t lgrp_mem_default_policy; 696 697 /* 698 * Memory policy flags (lgrp_mem_policy_flags) is valid when 699 * extending stack/heap segments. 700 */ 701 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) && 702 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) { 703 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags; 704 } else { 705 /* 706 * Get policy when not extending it from another segment 707 */ 708 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type); 709 } 710 711 /* 712 * First, try to concatenate the previous and new segments 713 */ 714 pseg = AS_SEGPREV(seg->s_as, seg); 715 if (pseg != NULL && 716 pseg->s_base + pseg->s_size == seg->s_base && 717 pseg->s_ops == &segvn_ops) { 718 /* 719 * Get memory allocation policy from previous segment. 720 * When extension is specified (e.g. for heap) apply 721 * this policy to the new segment regardless of the 722 * outcome of segment concatenation. Extension occurs 723 * for non-default policy otherwise default policy is 724 * used and is based on extended segment size. 725 */ 726 psvd = (struct segvn_data *)pseg->s_data; 727 ppolicy = psvd->policy_info.mem_policy; 728 if (lgrp_mem_policy_flags == 729 LGRP_MP_FLAG_EXTEND_UP) { 730 if (ppolicy != lgrp_mem_default_policy) { 731 mpolicy = ppolicy; 732 } else { 733 mpolicy = lgrp_mem_policy_default( 734 pseg->s_size + seg->s_size, 735 a->type); 736 } 737 } 738 739 if (mpolicy == ppolicy && 740 (pseg->s_size + seg->s_size <= 741 segvn_comb_thrshld || psvd->amp == NULL) && 742 segvn_extend_prev(pseg, seg, a, swresv) == 0) { 743 /* 744 * success! now try to concatenate 745 * with following seg 746 */ 747 crfree(cred); 748 nseg = AS_SEGNEXT(pseg->s_as, pseg); 749 if (nseg != NULL && 750 nseg != pseg && 751 nseg->s_ops == &segvn_ops && 752 pseg->s_base + pseg->s_size == 753 nseg->s_base) 754 (void) segvn_concat(pseg, nseg, 0); 755 ASSERT(pseg->s_szc == 0 || 756 (a->szc == pseg->s_szc && 757 IS_P2ALIGNED(pseg->s_base, pgsz) && 758 IS_P2ALIGNED(pseg->s_size, pgsz))); 759 return (0); 760 } 761 } 762 763 /* 764 * Failed, so try to concatenate with following seg 765 */ 766 nseg = AS_SEGNEXT(seg->s_as, seg); 767 if (nseg != NULL && 768 seg->s_base + seg->s_size == nseg->s_base && 769 nseg->s_ops == &segvn_ops) { 770 /* 771 * Get memory allocation policy from next segment. 772 * When extension is specified (e.g. for stack) apply 773 * this policy to the new segment regardless of the 774 * outcome of segment concatenation. Extension occurs 775 * for non-default policy otherwise default policy is 776 * used and is based on extended segment size. 777 */ 778 nsvd = (struct segvn_data *)nseg->s_data; 779 npolicy = nsvd->policy_info.mem_policy; 780 if (lgrp_mem_policy_flags == 781 LGRP_MP_FLAG_EXTEND_DOWN) { 782 if (npolicy != lgrp_mem_default_policy) { 783 mpolicy = npolicy; 784 } else { 785 mpolicy = lgrp_mem_policy_default( 786 nseg->s_size + seg->s_size, 787 a->type); 788 } 789 } 790 791 if (mpolicy == npolicy && 792 segvn_extend_next(seg, nseg, a, swresv) == 0) { 793 crfree(cred); 794 ASSERT(nseg->s_szc == 0 || 795 (a->szc == nseg->s_szc && 796 IS_P2ALIGNED(nseg->s_base, pgsz) && 797 IS_P2ALIGNED(nseg->s_size, pgsz))); 798 return (0); 799 } 800 } 801 } 802 803 if (a->vp != NULL) { 804 VN_HOLD(a->vp); 805 if (a->type == MAP_SHARED) 806 lgrp_shm_policy_init(NULL, a->vp); 807 } 808 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 809 810 seg->s_ops = &segvn_ops; 811 seg->s_data = (void *)svd; 812 seg->s_szc = a->szc; 813 814 svd->seg = seg; 815 svd->vp = a->vp; 816 /* 817 * Anonymous mappings have no backing file so the offset is meaningless. 818 */ 819 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0; 820 svd->prot = a->prot; 821 svd->maxprot = a->maxprot; 822 svd->pageprot = 0; 823 svd->type = a->type; 824 svd->vpage = NULL; 825 svd->cred = cred; 826 svd->advice = MADV_NORMAL; 827 svd->pageadvice = 0; 828 svd->flags = (ushort_t)a->flags; 829 svd->softlockcnt = 0; 830 svd->softlockcnt_sbase = 0; 831 svd->softlockcnt_send = 0; 832 svd->rcookie = HAT_INVALID_REGION_COOKIE; 833 svd->pageswap = 0; 834 835 if (a->szc != 0 && a->vp != NULL) { 836 segvn_setvnode_mpss(a->vp); 837 } 838 if (svd->type == MAP_SHARED && svd->vp != NULL && 839 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) { 840 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 841 segvn_inval_trcache(svd->vp); 842 } 843 844 amp = a->amp; 845 if ((svd->amp = amp) == NULL) { 846 svd->anon_index = 0; 847 if (svd->type == MAP_SHARED) { 848 svd->swresv = 0; 849 /* 850 * Shared mappings to a vp need no other setup. 851 * If we have a shared mapping to an anon_map object 852 * which hasn't been allocated yet, allocate the 853 * struct now so that it will be properly shared 854 * by remembering the swap reservation there. 855 */ 856 if (a->vp == NULL) { 857 svd->amp = anonmap_alloc(seg->s_size, swresv, 858 ANON_SLEEP); 859 svd->amp->a_szc = seg->s_szc; 860 } 861 } else { 862 /* 863 * Private mapping (with or without a vp). 864 * Allocate anon_map when needed. 865 */ 866 svd->swresv = swresv; 867 } 868 } else { 869 pgcnt_t anon_num; 870 871 /* 872 * Mapping to an existing anon_map structure without a vp. 873 * For now we will insure that the segment size isn't larger 874 * than the size - offset gives us. Later on we may wish to 875 * have the anon array dynamically allocated itself so that 876 * we don't always have to allocate all the anon pointer slots. 877 * This of course involves adding extra code to check that we 878 * aren't trying to use an anon pointer slot beyond the end 879 * of the currently allocated anon array. 880 */ 881 if ((amp->size - a->offset) < seg->s_size) { 882 panic("segvn_create anon_map size"); 883 /*NOTREACHED*/ 884 } 885 886 anon_num = btopr(a->offset); 887 888 if (a->type == MAP_SHARED) { 889 /* 890 * SHARED mapping to a given anon_map. 891 */ 892 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 893 amp->refcnt++; 894 if (a->szc > amp->a_szc) { 895 amp->a_szc = a->szc; 896 } 897 ANON_LOCK_EXIT(&->a_rwlock); 898 svd->anon_index = anon_num; 899 svd->swresv = 0; 900 } else { 901 /* 902 * PRIVATE mapping to a given anon_map. 903 * Make sure that all the needed anon 904 * structures are created (so that we will 905 * share the underlying pages if nothing 906 * is written by this mapping) and then 907 * duplicate the anon array as is done 908 * when a privately mapped segment is dup'ed. 909 */ 910 struct anon *ap; 911 caddr_t addr; 912 caddr_t eaddr; 913 ulong_t anon_idx; 914 int hat_flag = HAT_LOAD; 915 916 if (svd->flags & MAP_TEXT) { 917 hat_flag |= HAT_LOAD_TEXT; 918 } 919 920 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 921 svd->amp->a_szc = seg->s_szc; 922 svd->anon_index = 0; 923 svd->swresv = swresv; 924 925 /* 926 * Prevent 2 threads from allocating anon 927 * slots simultaneously. 928 */ 929 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 930 eaddr = seg->s_base + seg->s_size; 931 932 for (anon_idx = anon_num, addr = seg->s_base; 933 addr < eaddr; addr += PAGESIZE, anon_idx++) { 934 page_t *pp; 935 936 if ((ap = anon_get_ptr(amp->ahp, 937 anon_idx)) != NULL) 938 continue; 939 940 /* 941 * Allocate the anon struct now. 942 * Might as well load up translation 943 * to the page while we're at it... 944 */ 945 pp = anon_zero(seg, addr, &ap, cred); 946 if (ap == NULL || pp == NULL) { 947 panic("segvn_create anon_zero"); 948 /*NOTREACHED*/ 949 } 950 951 /* 952 * Re-acquire the anon_map lock and 953 * initialize the anon array entry. 954 */ 955 ASSERT(anon_get_ptr(amp->ahp, 956 anon_idx) == NULL); 957 (void) anon_set_ptr(amp->ahp, anon_idx, ap, 958 ANON_SLEEP); 959 960 ASSERT(seg->s_szc == 0); 961 ASSERT(!IS_VMODSORT(pp->p_vnode)); 962 963 ASSERT(use_rgn == 0); 964 hat_memload(seg->s_as->a_hat, addr, pp, 965 svd->prot & ~PROT_WRITE, hat_flag); 966 967 page_unlock(pp); 968 } 969 ASSERT(seg->s_szc == 0); 970 anon_dup(amp->ahp, anon_num, svd->amp->ahp, 971 0, seg->s_size); 972 ANON_LOCK_EXIT(&->a_rwlock); 973 } 974 } 975 976 /* 977 * Set default memory allocation policy for segment 978 * 979 * Always set policy for private memory at least for initialization 980 * even if this is a shared memory segment 981 */ 982 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size); 983 984 if (svd->type == MAP_SHARED) 985 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index, 986 svd->vp, svd->offset, seg->s_size); 987 988 if (use_rgn) { 989 ASSERT(!trok); 990 ASSERT(svd->amp == NULL); 991 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base, 992 seg->s_size, (void *)svd->vp, svd->offset, svd->prot, 993 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback, 994 HAT_REGION_TEXT); 995 } 996 997 ASSERT(!trok || !(svd->prot & PROT_WRITE)); 998 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF; 999 1000 return (0); 1001 } 1002 1003 /* 1004 * Concatenate two existing segments, if possible. 1005 * Return 0 on success, -1 if two segments are not compatible 1006 * or -2 on memory allocation failure. 1007 * If amp_cat == 1 then try and concat segments with anon maps 1008 */ 1009 static int 1010 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat) 1011 { 1012 struct segvn_data *svd1 = seg1->s_data; 1013 struct segvn_data *svd2 = seg2->s_data; 1014 struct anon_map *amp1 = svd1->amp; 1015 struct anon_map *amp2 = svd2->amp; 1016 struct vpage *vpage1 = svd1->vpage; 1017 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL; 1018 size_t size, nvpsize; 1019 pgcnt_t npages1, npages2; 1020 1021 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as); 1022 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 1023 ASSERT(seg1->s_ops == seg2->s_ops); 1024 1025 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) || 1026 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 1027 return (-1); 1028 } 1029 1030 /* both segments exist, try to merge them */ 1031 #define incompat(x) (svd1->x != svd2->x) 1032 if (incompat(vp) || incompat(maxprot) || 1033 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) || 1034 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) || 1035 incompat(type) || incompat(cred) || incompat(flags) || 1036 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) || 1037 (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0) 1038 return (-1); 1039 #undef incompat 1040 1041 /* 1042 * vp == NULL implies zfod, offset doesn't matter 1043 */ 1044 if (svd1->vp != NULL && 1045 svd1->offset + seg1->s_size != svd2->offset) { 1046 return (-1); 1047 } 1048 1049 /* 1050 * Don't concatenate if either segment uses text replication. 1051 */ 1052 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) { 1053 return (-1); 1054 } 1055 1056 /* 1057 * Fail early if we're not supposed to concatenate 1058 * segments with non NULL amp. 1059 */ 1060 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) { 1061 return (-1); 1062 } 1063 1064 if (svd1->vp == NULL && svd1->type == MAP_SHARED) { 1065 if (amp1 != amp2) { 1066 return (-1); 1067 } 1068 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) != 1069 svd2->anon_index) { 1070 return (-1); 1071 } 1072 ASSERT(amp1 == NULL || amp1->refcnt >= 2); 1073 } 1074 1075 /* 1076 * If either seg has vpages, create a new merged vpage array. 1077 */ 1078 if (vpage1 != NULL || vpage2 != NULL) { 1079 struct vpage *vp, *evp; 1080 1081 npages1 = seg_pages(seg1); 1082 npages2 = seg_pages(seg2); 1083 nvpsize = vpgtob(npages1 + npages2); 1084 1085 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) { 1086 return (-2); 1087 } 1088 1089 if (vpage1 != NULL) { 1090 bcopy(vpage1, nvpage, vpgtob(npages1)); 1091 } else { 1092 evp = nvpage + npages1; 1093 for (vp = nvpage; vp < evp; vp++) { 1094 VPP_SETPROT(vp, svd1->prot); 1095 VPP_SETADVICE(vp, svd1->advice); 1096 } 1097 } 1098 1099 if (vpage2 != NULL) { 1100 bcopy(vpage2, nvpage + npages1, vpgtob(npages2)); 1101 } else { 1102 evp = nvpage + npages1 + npages2; 1103 for (vp = nvpage + npages1; vp < evp; vp++) { 1104 VPP_SETPROT(vp, svd2->prot); 1105 VPP_SETADVICE(vp, svd2->advice); 1106 } 1107 } 1108 1109 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) { 1110 ASSERT(svd1->swresv == seg1->s_size); 1111 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1112 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1113 evp = nvpage + npages1; 1114 for (vp = nvpage; vp < evp; vp++) { 1115 VPP_SETSWAPRES(vp); 1116 } 1117 } 1118 1119 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) { 1120 ASSERT(svd2->swresv == seg2->s_size); 1121 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1122 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1123 vp = nvpage + npages1; 1124 evp = vp + npages2; 1125 for (; vp < evp; vp++) { 1126 VPP_SETSWAPRES(vp); 1127 } 1128 } 1129 } 1130 ASSERT((vpage1 != NULL || vpage2 != NULL) || 1131 (svd1->pageswap == 0 && svd2->pageswap == 0)); 1132 1133 /* 1134 * If either segment has private pages, create a new merged anon 1135 * array. If mergeing shared anon segments just decrement anon map's 1136 * refcnt. 1137 */ 1138 if (amp1 != NULL && svd1->type == MAP_SHARED) { 1139 ASSERT(amp1 == amp2 && svd1->vp == NULL); 1140 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1141 ASSERT(amp1->refcnt >= 2); 1142 amp1->refcnt--; 1143 ANON_LOCK_EXIT(&1->a_rwlock); 1144 svd2->amp = NULL; 1145 } else if (amp1 != NULL || amp2 != NULL) { 1146 struct anon_hdr *nahp; 1147 struct anon_map *namp = NULL; 1148 size_t asize; 1149 1150 ASSERT(svd1->type == MAP_PRIVATE); 1151 1152 asize = seg1->s_size + seg2->s_size; 1153 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) { 1154 if (nvpage != NULL) { 1155 kmem_free(nvpage, nvpsize); 1156 } 1157 return (-2); 1158 } 1159 if (amp1 != NULL) { 1160 /* 1161 * XXX anon rwlock is not really needed because 1162 * this is a private segment and we are writers. 1163 */ 1164 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1165 ASSERT(amp1->refcnt == 1); 1166 if (anon_copy_ptr(amp1->ahp, svd1->anon_index, 1167 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) { 1168 anon_release(nahp, btop(asize)); 1169 ANON_LOCK_EXIT(&1->a_rwlock); 1170 if (nvpage != NULL) { 1171 kmem_free(nvpage, nvpsize); 1172 } 1173 return (-2); 1174 } 1175 } 1176 if (amp2 != NULL) { 1177 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1178 ASSERT(amp2->refcnt == 1); 1179 if (anon_copy_ptr(amp2->ahp, svd2->anon_index, 1180 nahp, btop(seg1->s_size), btop(seg2->s_size), 1181 ANON_NOSLEEP)) { 1182 anon_release(nahp, btop(asize)); 1183 ANON_LOCK_EXIT(&2->a_rwlock); 1184 if (amp1 != NULL) { 1185 ANON_LOCK_EXIT(&1->a_rwlock); 1186 } 1187 if (nvpage != NULL) { 1188 kmem_free(nvpage, nvpsize); 1189 } 1190 return (-2); 1191 } 1192 } 1193 if (amp1 != NULL) { 1194 namp = amp1; 1195 anon_release(amp1->ahp, btop(amp1->size)); 1196 } 1197 if (amp2 != NULL) { 1198 if (namp == NULL) { 1199 ASSERT(amp1 == NULL); 1200 namp = amp2; 1201 anon_release(amp2->ahp, btop(amp2->size)); 1202 } else { 1203 amp2->refcnt--; 1204 ANON_LOCK_EXIT(&2->a_rwlock); 1205 anonmap_free(amp2); 1206 } 1207 svd2->amp = NULL; /* needed for seg_free */ 1208 } 1209 namp->ahp = nahp; 1210 namp->size = asize; 1211 svd1->amp = namp; 1212 svd1->anon_index = 0; 1213 ANON_LOCK_EXIT(&namp->a_rwlock); 1214 } 1215 /* 1216 * Now free the old vpage structures. 1217 */ 1218 if (nvpage != NULL) { 1219 if (vpage1 != NULL) { 1220 kmem_free(vpage1, vpgtob(npages1)); 1221 } 1222 if (vpage2 != NULL) { 1223 svd2->vpage = NULL; 1224 kmem_free(vpage2, vpgtob(npages2)); 1225 } 1226 if (svd2->pageprot) { 1227 svd1->pageprot = 1; 1228 } 1229 if (svd2->pageadvice) { 1230 svd1->pageadvice = 1; 1231 } 1232 if (svd2->pageswap) { 1233 svd1->pageswap = 1; 1234 } 1235 svd1->vpage = nvpage; 1236 } 1237 1238 /* all looks ok, merge segments */ 1239 svd1->swresv += svd2->swresv; 1240 svd2->swresv = 0; /* so seg_free doesn't release swap space */ 1241 size = seg2->s_size; 1242 seg_free(seg2); 1243 seg1->s_size += size; 1244 return (0); 1245 } 1246 1247 /* 1248 * Extend the previous segment (seg1) to include the 1249 * new segment (seg2 + a), if possible. 1250 * Return 0 on success. 1251 */ 1252 static int 1253 segvn_extend_prev(seg1, seg2, a, swresv) 1254 struct seg *seg1, *seg2; 1255 struct segvn_crargs *a; 1256 size_t swresv; 1257 { 1258 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data; 1259 size_t size; 1260 struct anon_map *amp1; 1261 struct vpage *new_vpage; 1262 1263 /* 1264 * We don't need any segment level locks for "segvn" data 1265 * since the address space is "write" locked. 1266 */ 1267 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 1268 1269 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) { 1270 return (-1); 1271 } 1272 1273 /* second segment is new, try to extend first */ 1274 /* XXX - should also check cred */ 1275 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot || 1276 (!svd1->pageprot && (svd1->prot != a->prot)) || 1277 svd1->type != a->type || svd1->flags != a->flags || 1278 seg1->s_szc != a->szc || svd1->softlockcnt_send > 0) 1279 return (-1); 1280 1281 /* vp == NULL implies zfod, offset doesn't matter */ 1282 if (svd1->vp != NULL && 1283 svd1->offset + seg1->s_size != (a->offset & PAGEMASK)) 1284 return (-1); 1285 1286 if (svd1->tr_state != SEGVN_TR_OFF) { 1287 return (-1); 1288 } 1289 1290 amp1 = svd1->amp; 1291 if (amp1) { 1292 pgcnt_t newpgs; 1293 1294 /* 1295 * Segment has private pages, can data structures 1296 * be expanded? 1297 * 1298 * Acquire the anon_map lock to prevent it from changing, 1299 * if it is shared. This ensures that the anon_map 1300 * will not change while a thread which has a read/write 1301 * lock on an address space references it. 1302 * XXX - Don't need the anon_map lock at all if "refcnt" 1303 * is 1. 1304 * 1305 * Can't grow a MAP_SHARED segment with an anonmap because 1306 * there may be existing anon slots where we want to extend 1307 * the segment and we wouldn't know what to do with them 1308 * (e.g., for tmpfs right thing is to just leave them there, 1309 * for /dev/zero they should be cleared out). 1310 */ 1311 if (svd1->type == MAP_SHARED) 1312 return (-1); 1313 1314 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1315 if (amp1->refcnt > 1) { 1316 ANON_LOCK_EXIT(&1->a_rwlock); 1317 return (-1); 1318 } 1319 newpgs = anon_grow(amp1->ahp, &svd1->anon_index, 1320 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP); 1321 1322 if (newpgs == 0) { 1323 ANON_LOCK_EXIT(&1->a_rwlock); 1324 return (-1); 1325 } 1326 amp1->size = ptob(newpgs); 1327 ANON_LOCK_EXIT(&1->a_rwlock); 1328 } 1329 if (svd1->vpage != NULL) { 1330 struct vpage *vp, *evp; 1331 new_vpage = 1332 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1333 KM_NOSLEEP); 1334 if (new_vpage == NULL) 1335 return (-1); 1336 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1))); 1337 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1))); 1338 svd1->vpage = new_vpage; 1339 1340 vp = new_vpage + seg_pages(seg1); 1341 evp = vp + seg_pages(seg2); 1342 for (; vp < evp; vp++) 1343 VPP_SETPROT(vp, a->prot); 1344 if (svd1->pageswap && swresv) { 1345 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1346 ASSERT(swresv == seg2->s_size); 1347 vp = new_vpage + seg_pages(seg1); 1348 for (; vp < evp; vp++) { 1349 VPP_SETSWAPRES(vp); 1350 } 1351 } 1352 } 1353 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0); 1354 size = seg2->s_size; 1355 seg_free(seg2); 1356 seg1->s_size += size; 1357 svd1->swresv += swresv; 1358 if (svd1->pageprot && (a->prot & PROT_WRITE) && 1359 svd1->type == MAP_SHARED && svd1->vp != NULL && 1360 (svd1->vp->v_flag & VVMEXEC)) { 1361 ASSERT(vn_is_mapped(svd1->vp, V_WRITE)); 1362 segvn_inval_trcache(svd1->vp); 1363 } 1364 return (0); 1365 } 1366 1367 /* 1368 * Extend the next segment (seg2) to include the 1369 * new segment (seg1 + a), if possible. 1370 * Return 0 on success. 1371 */ 1372 static int 1373 segvn_extend_next( 1374 struct seg *seg1, 1375 struct seg *seg2, 1376 struct segvn_crargs *a, 1377 size_t swresv) 1378 { 1379 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data; 1380 size_t size; 1381 struct anon_map *amp2; 1382 struct vpage *new_vpage; 1383 1384 /* 1385 * We don't need any segment level locks for "segvn" data 1386 * since the address space is "write" locked. 1387 */ 1388 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock)); 1389 1390 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 1391 return (-1); 1392 } 1393 1394 /* first segment is new, try to extend second */ 1395 /* XXX - should also check cred */ 1396 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot || 1397 (!svd2->pageprot && (svd2->prot != a->prot)) || 1398 svd2->type != a->type || svd2->flags != a->flags || 1399 seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0) 1400 return (-1); 1401 /* vp == NULL implies zfod, offset doesn't matter */ 1402 if (svd2->vp != NULL && 1403 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset) 1404 return (-1); 1405 1406 if (svd2->tr_state != SEGVN_TR_OFF) { 1407 return (-1); 1408 } 1409 1410 amp2 = svd2->amp; 1411 if (amp2) { 1412 pgcnt_t newpgs; 1413 1414 /* 1415 * Segment has private pages, can data structures 1416 * be expanded? 1417 * 1418 * Acquire the anon_map lock to prevent it from changing, 1419 * if it is shared. This ensures that the anon_map 1420 * will not change while a thread which has a read/write 1421 * lock on an address space references it. 1422 * 1423 * XXX - Don't need the anon_map lock at all if "refcnt" 1424 * is 1. 1425 */ 1426 if (svd2->type == MAP_SHARED) 1427 return (-1); 1428 1429 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1430 if (amp2->refcnt > 1) { 1431 ANON_LOCK_EXIT(&2->a_rwlock); 1432 return (-1); 1433 } 1434 newpgs = anon_grow(amp2->ahp, &svd2->anon_index, 1435 btop(seg2->s_size), btop(seg1->s_size), 1436 ANON_NOSLEEP | ANON_GROWDOWN); 1437 1438 if (newpgs == 0) { 1439 ANON_LOCK_EXIT(&2->a_rwlock); 1440 return (-1); 1441 } 1442 amp2->size = ptob(newpgs); 1443 ANON_LOCK_EXIT(&2->a_rwlock); 1444 } 1445 if (svd2->vpage != NULL) { 1446 struct vpage *vp, *evp; 1447 new_vpage = 1448 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1449 KM_NOSLEEP); 1450 if (new_vpage == NULL) { 1451 /* Not merging segments so adjust anon_index back */ 1452 if (amp2) 1453 svd2->anon_index += seg_pages(seg1); 1454 return (-1); 1455 } 1456 bcopy(svd2->vpage, new_vpage + seg_pages(seg1), 1457 vpgtob(seg_pages(seg2))); 1458 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2))); 1459 svd2->vpage = new_vpage; 1460 1461 vp = new_vpage; 1462 evp = vp + seg_pages(seg1); 1463 for (; vp < evp; vp++) 1464 VPP_SETPROT(vp, a->prot); 1465 if (svd2->pageswap && swresv) { 1466 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1467 ASSERT(swresv == seg1->s_size); 1468 vp = new_vpage; 1469 for (; vp < evp; vp++) { 1470 VPP_SETSWAPRES(vp); 1471 } 1472 } 1473 } 1474 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0); 1475 size = seg1->s_size; 1476 seg_free(seg1); 1477 seg2->s_size += size; 1478 seg2->s_base -= size; 1479 svd2->offset -= size; 1480 svd2->swresv += swresv; 1481 if (svd2->pageprot && (a->prot & PROT_WRITE) && 1482 svd2->type == MAP_SHARED && svd2->vp != NULL && 1483 (svd2->vp->v_flag & VVMEXEC)) { 1484 ASSERT(vn_is_mapped(svd2->vp, V_WRITE)); 1485 segvn_inval_trcache(svd2->vp); 1486 } 1487 return (0); 1488 } 1489 1490 static int 1491 segvn_dup(struct seg *seg, struct seg *newseg) 1492 { 1493 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1494 struct segvn_data *newsvd; 1495 pgcnt_t npages = seg_pages(seg); 1496 int error = 0; 1497 uint_t prot; 1498 size_t len; 1499 struct anon_map *amp; 1500 1501 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1502 ASSERT(newseg->s_as->a_proc->p_parent == curproc); 1503 1504 /* 1505 * If segment has anon reserved, reserve more for the new seg. 1506 * For a MAP_NORESERVE segment swresv will be a count of all the 1507 * allocated anon slots; thus we reserve for the child as many slots 1508 * as the parent has allocated. This semantic prevents the child or 1509 * parent from dieing during a copy-on-write fault caused by trying 1510 * to write a shared pre-existing anon page. 1511 */ 1512 if ((len = svd->swresv) != 0) { 1513 if (anon_resv(svd->swresv) == 0) 1514 return (ENOMEM); 1515 1516 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 1517 seg, len, 0); 1518 } 1519 1520 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 1521 1522 newseg->s_ops = &segvn_ops; 1523 newseg->s_data = (void *)newsvd; 1524 newseg->s_szc = seg->s_szc; 1525 1526 newsvd->seg = newseg; 1527 if ((newsvd->vp = svd->vp) != NULL) { 1528 VN_HOLD(svd->vp); 1529 if (svd->type == MAP_SHARED) 1530 lgrp_shm_policy_init(NULL, svd->vp); 1531 } 1532 newsvd->offset = svd->offset; 1533 newsvd->prot = svd->prot; 1534 newsvd->maxprot = svd->maxprot; 1535 newsvd->pageprot = svd->pageprot; 1536 newsvd->type = svd->type; 1537 newsvd->cred = svd->cred; 1538 crhold(newsvd->cred); 1539 newsvd->advice = svd->advice; 1540 newsvd->pageadvice = svd->pageadvice; 1541 newsvd->swresv = svd->swresv; 1542 newsvd->pageswap = svd->pageswap; 1543 newsvd->flags = svd->flags; 1544 newsvd->softlockcnt = 0; 1545 newsvd->softlockcnt_sbase = 0; 1546 newsvd->softlockcnt_send = 0; 1547 newsvd->policy_info = svd->policy_info; 1548 newsvd->rcookie = HAT_INVALID_REGION_COOKIE; 1549 1550 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) { 1551 /* 1552 * Not attaching to a shared anon object. 1553 */ 1554 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) || 1555 svd->tr_state == SEGVN_TR_OFF); 1556 if (svd->tr_state == SEGVN_TR_ON) { 1557 ASSERT(newsvd->vp != NULL && amp != NULL); 1558 newsvd->tr_state = SEGVN_TR_INIT; 1559 } else { 1560 newsvd->tr_state = svd->tr_state; 1561 } 1562 newsvd->amp = NULL; 1563 newsvd->anon_index = 0; 1564 } else { 1565 /* regions for now are only used on pure vnode segments */ 1566 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 1567 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1568 newsvd->tr_state = SEGVN_TR_OFF; 1569 if (svd->type == MAP_SHARED) { 1570 newsvd->amp = amp; 1571 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1572 amp->refcnt++; 1573 ANON_LOCK_EXIT(&->a_rwlock); 1574 newsvd->anon_index = svd->anon_index; 1575 } else { 1576 int reclaim = 1; 1577 1578 /* 1579 * Allocate and initialize new anon_map structure. 1580 */ 1581 newsvd->amp = anonmap_alloc(newseg->s_size, 0, 1582 ANON_SLEEP); 1583 newsvd->amp->a_szc = newseg->s_szc; 1584 newsvd->anon_index = 0; 1585 1586 /* 1587 * We don't have to acquire the anon_map lock 1588 * for the new segment (since it belongs to an 1589 * address space that is still not associated 1590 * with any process), or the segment in the old 1591 * address space (since all threads in it 1592 * are stopped while duplicating the address space). 1593 */ 1594 1595 /* 1596 * The goal of the following code is to make sure that 1597 * softlocked pages do not end up as copy on write 1598 * pages. This would cause problems where one 1599 * thread writes to a page that is COW and a different 1600 * thread in the same process has softlocked it. The 1601 * softlock lock would move away from this process 1602 * because the write would cause this process to get 1603 * a copy (without the softlock). 1604 * 1605 * The strategy here is to just break the 1606 * sharing on pages that could possibly be 1607 * softlocked. 1608 */ 1609 retry: 1610 if (svd->softlockcnt) { 1611 struct anon *ap, *newap; 1612 size_t i; 1613 uint_t vpprot; 1614 page_t *anon_pl[1+1], *pp; 1615 caddr_t addr; 1616 ulong_t old_idx = svd->anon_index; 1617 ulong_t new_idx = 0; 1618 1619 /* 1620 * The softlock count might be non zero 1621 * because some pages are still stuck in the 1622 * cache for lazy reclaim. Flush the cache 1623 * now. This should drop the count to zero. 1624 * [or there is really I/O going on to these 1625 * pages]. Note, we have the writers lock so 1626 * nothing gets inserted during the flush. 1627 */ 1628 if (reclaim == 1) { 1629 segvn_purge(seg); 1630 reclaim = 0; 1631 goto retry; 1632 } 1633 i = btopr(seg->s_size); 1634 addr = seg->s_base; 1635 /* 1636 * XXX break cow sharing using PAGESIZE 1637 * pages. They will be relocated into larger 1638 * pages at fault time. 1639 */ 1640 while (i-- > 0) { 1641 if (ap = anon_get_ptr(amp->ahp, 1642 old_idx)) { 1643 error = anon_getpage(&ap, 1644 &vpprot, anon_pl, PAGESIZE, 1645 seg, addr, S_READ, 1646 svd->cred); 1647 if (error) { 1648 newsvd->vpage = NULL; 1649 goto out; 1650 } 1651 /* 1652 * prot need not be computed 1653 * below 'cause anon_private is 1654 * going to ignore it anyway 1655 * as child doesn't inherit 1656 * pagelock from parent. 1657 */ 1658 prot = svd->pageprot ? 1659 VPP_PROT( 1660 &svd->vpage[ 1661 seg_page(seg, addr)]) 1662 : svd->prot; 1663 pp = anon_private(&newap, 1664 newseg, addr, prot, 1665 anon_pl[0], 0, 1666 newsvd->cred); 1667 if (pp == NULL) { 1668 /* no mem abort */ 1669 newsvd->vpage = NULL; 1670 error = ENOMEM; 1671 goto out; 1672 } 1673 (void) anon_set_ptr( 1674 newsvd->amp->ahp, new_idx, 1675 newap, ANON_SLEEP); 1676 page_unlock(pp); 1677 } 1678 addr += PAGESIZE; 1679 old_idx++; 1680 new_idx++; 1681 } 1682 } else { /* common case */ 1683 if (seg->s_szc != 0) { 1684 /* 1685 * If at least one of anon slots of a 1686 * large page exists then make sure 1687 * all anon slots of a large page 1688 * exist to avoid partial cow sharing 1689 * of a large page in the future. 1690 */ 1691 anon_dup_fill_holes(amp->ahp, 1692 svd->anon_index, newsvd->amp->ahp, 1693 0, seg->s_size, seg->s_szc, 1694 svd->vp != NULL); 1695 } else { 1696 anon_dup(amp->ahp, svd->anon_index, 1697 newsvd->amp->ahp, 0, seg->s_size); 1698 } 1699 1700 hat_clrattr(seg->s_as->a_hat, seg->s_base, 1701 seg->s_size, PROT_WRITE); 1702 } 1703 } 1704 } 1705 /* 1706 * If necessary, create a vpage structure for the new segment. 1707 * Do not copy any page lock indications. 1708 */ 1709 if (svd->vpage != NULL) { 1710 uint_t i; 1711 struct vpage *ovp = svd->vpage; 1712 struct vpage *nvp; 1713 1714 nvp = newsvd->vpage = 1715 kmem_alloc(vpgtob(npages), KM_SLEEP); 1716 for (i = 0; i < npages; i++) { 1717 *nvp = *ovp++; 1718 VPP_CLRPPLOCK(nvp++); 1719 } 1720 } else 1721 newsvd->vpage = NULL; 1722 1723 /* Inform the vnode of the new mapping */ 1724 if (newsvd->vp != NULL) { 1725 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset, 1726 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot, 1727 newsvd->maxprot, newsvd->type, newsvd->cred, NULL); 1728 } 1729 out: 1730 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1731 ASSERT(newsvd->amp == NULL); 1732 ASSERT(newsvd->tr_state == SEGVN_TR_OFF); 1733 newsvd->rcookie = svd->rcookie; 1734 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie); 1735 } 1736 return (error); 1737 } 1738 1739 1740 /* 1741 * callback function to invoke free_vp_pages() for only those pages actually 1742 * processed by the HAT when a shared region is destroyed. 1743 */ 1744 extern int free_pages; 1745 1746 static void 1747 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 1748 size_t r_size, void *r_obj, u_offset_t r_objoff) 1749 { 1750 u_offset_t off; 1751 size_t len; 1752 vnode_t *vp = (vnode_t *)r_obj; 1753 1754 ASSERT(eaddr > saddr); 1755 ASSERT(saddr >= r_saddr); 1756 ASSERT(saddr < r_saddr + r_size); 1757 ASSERT(eaddr > r_saddr); 1758 ASSERT(eaddr <= r_saddr + r_size); 1759 ASSERT(vp != NULL); 1760 1761 if (!free_pages) { 1762 return; 1763 } 1764 1765 len = eaddr - saddr; 1766 off = (saddr - r_saddr) + r_objoff; 1767 free_vp_pages(vp, off, len); 1768 } 1769 1770 /* 1771 * callback function used by segvn_unmap to invoke free_vp_pages() for only 1772 * those pages actually processed by the HAT 1773 */ 1774 static void 1775 segvn_hat_unload_callback(hat_callback_t *cb) 1776 { 1777 struct seg *seg = cb->hcb_data; 1778 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1779 size_t len; 1780 u_offset_t off; 1781 1782 ASSERT(svd->vp != NULL); 1783 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr); 1784 ASSERT(cb->hcb_start_addr >= seg->s_base); 1785 1786 len = cb->hcb_end_addr - cb->hcb_start_addr; 1787 off = cb->hcb_start_addr - seg->s_base; 1788 free_vp_pages(svd->vp, svd->offset + off, len); 1789 } 1790 1791 /* 1792 * This function determines the number of bytes of swap reserved by 1793 * a segment for which per-page accounting is present. It is used to 1794 * calculate the correct value of a segvn_data's swresv. 1795 */ 1796 static size_t 1797 segvn_count_swap_by_vpages(struct seg *seg) 1798 { 1799 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1800 struct vpage *vp, *evp; 1801 size_t nswappages = 0; 1802 1803 ASSERT(svd->pageswap); 1804 ASSERT(svd->vpage != NULL); 1805 1806 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 1807 1808 for (vp = svd->vpage; vp < evp; vp++) { 1809 if (VPP_ISSWAPRES(vp)) 1810 nswappages++; 1811 } 1812 1813 return (nswappages << PAGESHIFT); 1814 } 1815 1816 static int 1817 segvn_unmap(struct seg *seg, caddr_t addr, size_t len) 1818 { 1819 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1820 struct segvn_data *nsvd; 1821 struct seg *nseg; 1822 struct anon_map *amp; 1823 pgcnt_t opages; /* old segment size in pages */ 1824 pgcnt_t npages; /* new segment size in pages */ 1825 pgcnt_t dpages; /* pages being deleted (unmapped) */ 1826 hat_callback_t callback; /* used for free_vp_pages() */ 1827 hat_callback_t *cbp = NULL; 1828 caddr_t nbase; 1829 size_t nsize; 1830 size_t oswresv; 1831 int reclaim = 1; 1832 1833 /* 1834 * We don't need any segment level locks for "segvn" data 1835 * since the address space is "write" locked. 1836 */ 1837 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1838 1839 /* 1840 * Fail the unmap if pages are SOFTLOCKed through this mapping. 1841 * softlockcnt is protected from change by the as write lock. 1842 */ 1843 retry: 1844 if (svd->softlockcnt > 0) { 1845 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1846 1847 /* 1848 * If this is shared segment non 0 softlockcnt 1849 * means locked pages are still in use. 1850 */ 1851 if (svd->type == MAP_SHARED) { 1852 return (EAGAIN); 1853 } 1854 1855 /* 1856 * since we do have the writers lock nobody can fill 1857 * the cache during the purge. The flush either succeeds 1858 * or we still have pending I/Os. 1859 */ 1860 if (reclaim == 1) { 1861 segvn_purge(seg); 1862 reclaim = 0; 1863 goto retry; 1864 } 1865 return (EAGAIN); 1866 } 1867 1868 /* 1869 * Check for bad sizes 1870 */ 1871 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size || 1872 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) { 1873 panic("segvn_unmap"); 1874 /*NOTREACHED*/ 1875 } 1876 1877 if (seg->s_szc != 0) { 1878 size_t pgsz = page_get_pagesize(seg->s_szc); 1879 int err; 1880 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 1881 ASSERT(seg->s_base != addr || seg->s_size != len); 1882 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1883 ASSERT(svd->amp == NULL); 1884 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1885 hat_leave_region(seg->s_as->a_hat, 1886 svd->rcookie, HAT_REGION_TEXT); 1887 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1888 /* 1889 * could pass a flag to segvn_demote_range() 1890 * below to tell it not to do any unloads but 1891 * this case is rare enough to not bother for 1892 * now. 1893 */ 1894 } else if (svd->tr_state == SEGVN_TR_INIT) { 1895 svd->tr_state = SEGVN_TR_OFF; 1896 } else if (svd->tr_state == SEGVN_TR_ON) { 1897 ASSERT(svd->amp != NULL); 1898 segvn_textunrepl(seg, 1); 1899 ASSERT(svd->amp == NULL); 1900 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1901 } 1902 VM_STAT_ADD(segvnvmstats.demoterange[0]); 1903 err = segvn_demote_range(seg, addr, len, SDR_END, 0); 1904 if (err == 0) { 1905 return (IE_RETRY); 1906 } 1907 return (err); 1908 } 1909 } 1910 1911 /* Inform the vnode of the unmapping. */ 1912 if (svd->vp) { 1913 int error; 1914 1915 error = VOP_DELMAP(svd->vp, 1916 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base), 1917 seg->s_as, addr, len, svd->prot, svd->maxprot, 1918 svd->type, svd->cred, NULL); 1919 1920 if (error == EAGAIN) 1921 return (error); 1922 } 1923 1924 /* 1925 * Remove any page locks set through this mapping. 1926 * If text replication is not off no page locks could have been 1927 * established via this mapping. 1928 */ 1929 if (svd->tr_state == SEGVN_TR_OFF) { 1930 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0); 1931 } 1932 1933 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1934 ASSERT(svd->amp == NULL); 1935 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1936 ASSERT(svd->type == MAP_PRIVATE); 1937 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 1938 HAT_REGION_TEXT); 1939 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1940 } else if (svd->tr_state == SEGVN_TR_ON) { 1941 ASSERT(svd->amp != NULL); 1942 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE)); 1943 segvn_textunrepl(seg, 1); 1944 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 1945 } else { 1946 if (svd->tr_state != SEGVN_TR_OFF) { 1947 ASSERT(svd->tr_state == SEGVN_TR_INIT); 1948 svd->tr_state = SEGVN_TR_OFF; 1949 } 1950 /* 1951 * Unload any hardware translations in the range to be taken 1952 * out. Use a callback to invoke free_vp_pages() effectively. 1953 */ 1954 if (svd->vp != NULL && free_pages != 0) { 1955 callback.hcb_data = seg; 1956 callback.hcb_function = segvn_hat_unload_callback; 1957 cbp = &callback; 1958 } 1959 hat_unload_callback(seg->s_as->a_hat, addr, len, 1960 HAT_UNLOAD_UNMAP, cbp); 1961 1962 if (svd->type == MAP_SHARED && svd->vp != NULL && 1963 (svd->vp->v_flag & VVMEXEC) && 1964 ((svd->prot & PROT_WRITE) || svd->pageprot)) { 1965 segvn_inval_trcache(svd->vp); 1966 } 1967 } 1968 1969 /* 1970 * Check for entire segment 1971 */ 1972 if (addr == seg->s_base && len == seg->s_size) { 1973 seg_free(seg); 1974 return (0); 1975 } 1976 1977 opages = seg_pages(seg); 1978 dpages = btop(len); 1979 npages = opages - dpages; 1980 amp = svd->amp; 1981 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc); 1982 1983 /* 1984 * Check for beginning of segment 1985 */ 1986 if (addr == seg->s_base) { 1987 if (svd->vpage != NULL) { 1988 size_t nbytes; 1989 struct vpage *ovpage; 1990 1991 ovpage = svd->vpage; /* keep pointer to vpage */ 1992 1993 nbytes = vpgtob(npages); 1994 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 1995 bcopy(&ovpage[dpages], svd->vpage, nbytes); 1996 1997 /* free up old vpage */ 1998 kmem_free(ovpage, vpgtob(opages)); 1999 } 2000 if (amp != NULL) { 2001 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2002 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2003 /* 2004 * Shared anon map is no longer in use. Before 2005 * freeing its pages purge all entries from 2006 * pcache that belong to this amp. 2007 */ 2008 if (svd->type == MAP_SHARED) { 2009 ASSERT(amp->refcnt == 1); 2010 ASSERT(svd->softlockcnt == 0); 2011 anonmap_purge(amp); 2012 } 2013 /* 2014 * Free up now unused parts of anon_map array. 2015 */ 2016 if (amp->a_szc == seg->s_szc) { 2017 if (seg->s_szc != 0) { 2018 anon_free_pages(amp->ahp, 2019 svd->anon_index, len, 2020 seg->s_szc); 2021 } else { 2022 anon_free(amp->ahp, 2023 svd->anon_index, 2024 len); 2025 } 2026 } else { 2027 ASSERT(svd->type == MAP_SHARED); 2028 ASSERT(amp->a_szc > seg->s_szc); 2029 anon_shmap_free_pages(amp, 2030 svd->anon_index, len); 2031 } 2032 2033 /* 2034 * Unreserve swap space for the 2035 * unmapped chunk of this segment in 2036 * case it's MAP_SHARED 2037 */ 2038 if (svd->type == MAP_SHARED) { 2039 anon_unresv_zone(len, 2040 seg->s_as->a_proc->p_zone); 2041 amp->swresv -= len; 2042 } 2043 } 2044 ANON_LOCK_EXIT(&->a_rwlock); 2045 svd->anon_index += dpages; 2046 } 2047 if (svd->vp != NULL) 2048 svd->offset += len; 2049 2050 seg->s_base += len; 2051 seg->s_size -= len; 2052 2053 if (svd->swresv) { 2054 if (svd->flags & MAP_NORESERVE) { 2055 ASSERT(amp); 2056 oswresv = svd->swresv; 2057 2058 svd->swresv = ptob(anon_pages(amp->ahp, 2059 svd->anon_index, npages)); 2060 anon_unresv_zone(oswresv - svd->swresv, 2061 seg->s_as->a_proc->p_zone); 2062 if (SEG_IS_PARTIAL_RESV(seg)) 2063 seg->s_as->a_resvsize -= oswresv - 2064 svd->swresv; 2065 } else { 2066 size_t unlen; 2067 2068 if (svd->pageswap) { 2069 oswresv = svd->swresv; 2070 svd->swresv = 2071 segvn_count_swap_by_vpages(seg); 2072 ASSERT(oswresv >= svd->swresv); 2073 unlen = oswresv - svd->swresv; 2074 } else { 2075 svd->swresv -= len; 2076 ASSERT(svd->swresv == seg->s_size); 2077 unlen = len; 2078 } 2079 anon_unresv_zone(unlen, 2080 seg->s_as->a_proc->p_zone); 2081 } 2082 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2083 seg, len, 0); 2084 } 2085 2086 return (0); 2087 } 2088 2089 /* 2090 * Check for end of segment 2091 */ 2092 if (addr + len == seg->s_base + seg->s_size) { 2093 if (svd->vpage != NULL) { 2094 size_t nbytes; 2095 struct vpage *ovpage; 2096 2097 ovpage = svd->vpage; /* keep pointer to vpage */ 2098 2099 nbytes = vpgtob(npages); 2100 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2101 bcopy(ovpage, svd->vpage, nbytes); 2102 2103 /* free up old vpage */ 2104 kmem_free(ovpage, vpgtob(opages)); 2105 2106 } 2107 if (amp != NULL) { 2108 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2109 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2110 /* 2111 * Free up now unused parts of anon_map array. 2112 */ 2113 ulong_t an_idx = svd->anon_index + npages; 2114 2115 /* 2116 * Shared anon map is no longer in use. Before 2117 * freeing its pages purge all entries from 2118 * pcache that belong to this amp. 2119 */ 2120 if (svd->type == MAP_SHARED) { 2121 ASSERT(amp->refcnt == 1); 2122 ASSERT(svd->softlockcnt == 0); 2123 anonmap_purge(amp); 2124 } 2125 2126 if (amp->a_szc == seg->s_szc) { 2127 if (seg->s_szc != 0) { 2128 anon_free_pages(amp->ahp, 2129 an_idx, len, 2130 seg->s_szc); 2131 } else { 2132 anon_free(amp->ahp, an_idx, 2133 len); 2134 } 2135 } else { 2136 ASSERT(svd->type == MAP_SHARED); 2137 ASSERT(amp->a_szc > seg->s_szc); 2138 anon_shmap_free_pages(amp, 2139 an_idx, len); 2140 } 2141 2142 /* 2143 * Unreserve swap space for the 2144 * unmapped chunk of this segment in 2145 * case it's MAP_SHARED 2146 */ 2147 if (svd->type == MAP_SHARED) { 2148 anon_unresv_zone(len, 2149 seg->s_as->a_proc->p_zone); 2150 amp->swresv -= len; 2151 } 2152 } 2153 ANON_LOCK_EXIT(&->a_rwlock); 2154 } 2155 2156 seg->s_size -= len; 2157 2158 if (svd->swresv) { 2159 if (svd->flags & MAP_NORESERVE) { 2160 ASSERT(amp); 2161 oswresv = svd->swresv; 2162 svd->swresv = ptob(anon_pages(amp->ahp, 2163 svd->anon_index, npages)); 2164 anon_unresv_zone(oswresv - svd->swresv, 2165 seg->s_as->a_proc->p_zone); 2166 if (SEG_IS_PARTIAL_RESV(seg)) 2167 seg->s_as->a_resvsize -= oswresv - 2168 svd->swresv; 2169 } else { 2170 size_t unlen; 2171 2172 if (svd->pageswap) { 2173 oswresv = svd->swresv; 2174 svd->swresv = 2175 segvn_count_swap_by_vpages(seg); 2176 ASSERT(oswresv >= svd->swresv); 2177 unlen = oswresv - svd->swresv; 2178 } else { 2179 svd->swresv -= len; 2180 ASSERT(svd->swresv == seg->s_size); 2181 unlen = len; 2182 } 2183 anon_unresv_zone(unlen, 2184 seg->s_as->a_proc->p_zone); 2185 } 2186 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2187 "anon proc:%p %lu %u", seg, len, 0); 2188 } 2189 2190 return (0); 2191 } 2192 2193 /* 2194 * The section to go is in the middle of the segment, 2195 * have to make it into two segments. nseg is made for 2196 * the high end while seg is cut down at the low end. 2197 */ 2198 nbase = addr + len; /* new seg base */ 2199 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */ 2200 seg->s_size = addr - seg->s_base; /* shrink old seg */ 2201 nseg = seg_alloc(seg->s_as, nbase, nsize); 2202 if (nseg == NULL) { 2203 panic("segvn_unmap seg_alloc"); 2204 /*NOTREACHED*/ 2205 } 2206 nseg->s_ops = seg->s_ops; 2207 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 2208 nseg->s_data = (void *)nsvd; 2209 nseg->s_szc = seg->s_szc; 2210 *nsvd = *svd; 2211 nsvd->seg = nseg; 2212 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base); 2213 nsvd->swresv = 0; 2214 nsvd->softlockcnt = 0; 2215 nsvd->softlockcnt_sbase = 0; 2216 nsvd->softlockcnt_send = 0; 2217 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 2218 2219 if (svd->vp != NULL) { 2220 VN_HOLD(nsvd->vp); 2221 if (nsvd->type == MAP_SHARED) 2222 lgrp_shm_policy_init(NULL, nsvd->vp); 2223 } 2224 crhold(svd->cred); 2225 2226 if (svd->vpage == NULL) { 2227 nsvd->vpage = NULL; 2228 } else { 2229 /* need to split vpage into two arrays */ 2230 size_t nbytes; 2231 struct vpage *ovpage; 2232 2233 ovpage = svd->vpage; /* keep pointer to vpage */ 2234 2235 npages = seg_pages(seg); /* seg has shrunk */ 2236 nbytes = vpgtob(npages); 2237 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2238 2239 bcopy(ovpage, svd->vpage, nbytes); 2240 2241 npages = seg_pages(nseg); 2242 nbytes = vpgtob(npages); 2243 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2244 2245 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes); 2246 2247 /* free up old vpage */ 2248 kmem_free(ovpage, vpgtob(opages)); 2249 } 2250 2251 if (amp == NULL) { 2252 nsvd->amp = NULL; 2253 nsvd->anon_index = 0; 2254 } else { 2255 /* 2256 * Need to create a new anon map for the new segment. 2257 * We'll also allocate a new smaller array for the old 2258 * smaller segment to save space. 2259 */ 2260 opages = btop((uintptr_t)(addr - seg->s_base)); 2261 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2262 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2263 /* 2264 * Free up now unused parts of anon_map array. 2265 */ 2266 ulong_t an_idx = svd->anon_index + opages; 2267 2268 /* 2269 * Shared anon map is no longer in use. Before 2270 * freeing its pages purge all entries from 2271 * pcache that belong to this amp. 2272 */ 2273 if (svd->type == MAP_SHARED) { 2274 ASSERT(amp->refcnt == 1); 2275 ASSERT(svd->softlockcnt == 0); 2276 anonmap_purge(amp); 2277 } 2278 2279 if (amp->a_szc == seg->s_szc) { 2280 if (seg->s_szc != 0) { 2281 anon_free_pages(amp->ahp, an_idx, len, 2282 seg->s_szc); 2283 } else { 2284 anon_free(amp->ahp, an_idx, 2285 len); 2286 } 2287 } else { 2288 ASSERT(svd->type == MAP_SHARED); 2289 ASSERT(amp->a_szc > seg->s_szc); 2290 anon_shmap_free_pages(amp, an_idx, len); 2291 } 2292 2293 /* 2294 * Unreserve swap space for the 2295 * unmapped chunk of this segment in 2296 * case it's MAP_SHARED 2297 */ 2298 if (svd->type == MAP_SHARED) { 2299 anon_unresv_zone(len, 2300 seg->s_as->a_proc->p_zone); 2301 amp->swresv -= len; 2302 } 2303 } 2304 nsvd->anon_index = svd->anon_index + 2305 btop((uintptr_t)(nseg->s_base - seg->s_base)); 2306 if (svd->type == MAP_SHARED) { 2307 amp->refcnt++; 2308 nsvd->amp = amp; 2309 } else { 2310 struct anon_map *namp; 2311 struct anon_hdr *nahp; 2312 2313 ASSERT(svd->type == MAP_PRIVATE); 2314 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 2315 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 2316 namp->a_szc = seg->s_szc; 2317 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp, 2318 0, btop(seg->s_size), ANON_SLEEP); 2319 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index, 2320 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 2321 anon_release(amp->ahp, btop(amp->size)); 2322 svd->anon_index = 0; 2323 nsvd->anon_index = 0; 2324 amp->ahp = nahp; 2325 amp->size = seg->s_size; 2326 nsvd->amp = namp; 2327 } 2328 ANON_LOCK_EXIT(&->a_rwlock); 2329 } 2330 if (svd->swresv) { 2331 if (svd->flags & MAP_NORESERVE) { 2332 ASSERT(amp); 2333 oswresv = svd->swresv; 2334 svd->swresv = ptob(anon_pages(amp->ahp, 2335 svd->anon_index, btop(seg->s_size))); 2336 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 2337 nsvd->anon_index, btop(nseg->s_size))); 2338 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2339 anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv), 2340 seg->s_as->a_proc->p_zone); 2341 if (SEG_IS_PARTIAL_RESV(seg)) 2342 seg->s_as->a_resvsize -= oswresv - 2343 (svd->swresv + nsvd->swresv); 2344 } else { 2345 size_t unlen; 2346 2347 if (svd->pageswap) { 2348 oswresv = svd->swresv; 2349 svd->swresv = segvn_count_swap_by_vpages(seg); 2350 nsvd->swresv = segvn_count_swap_by_vpages(nseg); 2351 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2352 unlen = oswresv - (svd->swresv + nsvd->swresv); 2353 } else { 2354 if (seg->s_size + nseg->s_size + len != 2355 svd->swresv) { 2356 panic("segvn_unmap: cannot split " 2357 "swap reservation"); 2358 /*NOTREACHED*/ 2359 } 2360 svd->swresv = seg->s_size; 2361 nsvd->swresv = nseg->s_size; 2362 unlen = len; 2363 } 2364 anon_unresv_zone(unlen, 2365 seg->s_as->a_proc->p_zone); 2366 } 2367 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2368 seg, len, 0); 2369 } 2370 2371 return (0); /* I'm glad that's all over with! */ 2372 } 2373 2374 static void 2375 segvn_free(struct seg *seg) 2376 { 2377 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2378 pgcnt_t npages = seg_pages(seg); 2379 struct anon_map *amp; 2380 size_t len; 2381 2382 /* 2383 * We don't need any segment level locks for "segvn" data 2384 * since the address space is "write" locked. 2385 */ 2386 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2387 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2388 2389 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2390 2391 /* 2392 * Be sure to unlock pages. XXX Why do things get free'ed instead 2393 * of unmapped? XXX 2394 */ 2395 (void) segvn_lockop(seg, seg->s_base, seg->s_size, 2396 0, MC_UNLOCK, NULL, 0); 2397 2398 /* 2399 * Deallocate the vpage and anon pointers if necessary and possible. 2400 */ 2401 if (svd->vpage != NULL) { 2402 kmem_free(svd->vpage, vpgtob(npages)); 2403 svd->vpage = NULL; 2404 } 2405 if ((amp = svd->amp) != NULL) { 2406 /* 2407 * If there are no more references to this anon_map 2408 * structure, then deallocate the structure after freeing 2409 * up all the anon slot pointers that we can. 2410 */ 2411 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2412 ASSERT(amp->a_szc >= seg->s_szc); 2413 if (--amp->refcnt == 0) { 2414 if (svd->type == MAP_PRIVATE) { 2415 /* 2416 * Private - we only need to anon_free 2417 * the part that this segment refers to. 2418 */ 2419 if (seg->s_szc != 0) { 2420 anon_free_pages(amp->ahp, 2421 svd->anon_index, seg->s_size, 2422 seg->s_szc); 2423 } else { 2424 anon_free(amp->ahp, svd->anon_index, 2425 seg->s_size); 2426 } 2427 } else { 2428 2429 /* 2430 * Shared anon map is no longer in use. Before 2431 * freeing its pages purge all entries from 2432 * pcache that belong to this amp. 2433 */ 2434 ASSERT(svd->softlockcnt == 0); 2435 anonmap_purge(amp); 2436 2437 /* 2438 * Shared - anon_free the entire 2439 * anon_map's worth of stuff and 2440 * release any swap reservation. 2441 */ 2442 if (amp->a_szc != 0) { 2443 anon_shmap_free_pages(amp, 0, 2444 amp->size); 2445 } else { 2446 anon_free(amp->ahp, 0, amp->size); 2447 } 2448 if ((len = amp->swresv) != 0) { 2449 anon_unresv_zone(len, 2450 seg->s_as->a_proc->p_zone); 2451 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2452 "anon proc:%p %lu %u", seg, len, 0); 2453 } 2454 } 2455 svd->amp = NULL; 2456 ANON_LOCK_EXIT(&->a_rwlock); 2457 anonmap_free(amp); 2458 } else if (svd->type == MAP_PRIVATE) { 2459 /* 2460 * We had a private mapping which still has 2461 * a held anon_map so just free up all the 2462 * anon slot pointers that we were using. 2463 */ 2464 if (seg->s_szc != 0) { 2465 anon_free_pages(amp->ahp, svd->anon_index, 2466 seg->s_size, seg->s_szc); 2467 } else { 2468 anon_free(amp->ahp, svd->anon_index, 2469 seg->s_size); 2470 } 2471 ANON_LOCK_EXIT(&->a_rwlock); 2472 } else { 2473 ANON_LOCK_EXIT(&->a_rwlock); 2474 } 2475 } 2476 2477 /* 2478 * Release swap reservation. 2479 */ 2480 if ((len = svd->swresv) != 0) { 2481 anon_unresv_zone(svd->swresv, 2482 seg->s_as->a_proc->p_zone); 2483 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2484 seg, len, 0); 2485 if (SEG_IS_PARTIAL_RESV(seg)) 2486 seg->s_as->a_resvsize -= svd->swresv; 2487 svd->swresv = 0; 2488 } 2489 /* 2490 * Release claim on vnode, credentials, and finally free the 2491 * private data. 2492 */ 2493 if (svd->vp != NULL) { 2494 if (svd->type == MAP_SHARED) 2495 lgrp_shm_policy_fini(NULL, svd->vp); 2496 VN_RELE(svd->vp); 2497 svd->vp = NULL; 2498 } 2499 crfree(svd->cred); 2500 svd->pageprot = 0; 2501 svd->pageadvice = 0; 2502 svd->pageswap = 0; 2503 svd->cred = NULL; 2504 2505 /* 2506 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's 2507 * still working with this segment without holding as lock (in case 2508 * it's called by pcache async thread). 2509 */ 2510 ASSERT(svd->softlockcnt == 0); 2511 mutex_enter(&svd->segfree_syncmtx); 2512 mutex_exit(&svd->segfree_syncmtx); 2513 2514 seg->s_data = NULL; 2515 kmem_cache_free(segvn_cache, svd); 2516 } 2517 2518 /* 2519 * Do a F_SOFTUNLOCK call over the range requested. The range must have 2520 * already been F_SOFTLOCK'ed. 2521 * Caller must always match addr and len of a softunlock with a previous 2522 * softlock with exactly the same addr and len. 2523 */ 2524 static void 2525 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw) 2526 { 2527 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2528 page_t *pp; 2529 caddr_t adr; 2530 struct vnode *vp; 2531 u_offset_t offset; 2532 ulong_t anon_index; 2533 struct anon_map *amp; 2534 struct anon *ap = NULL; 2535 2536 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2537 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 2538 2539 if ((amp = svd->amp) != NULL) 2540 anon_index = svd->anon_index + seg_page(seg, addr); 2541 2542 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 2543 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2544 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie); 2545 } else { 2546 hat_unlock(seg->s_as->a_hat, addr, len); 2547 } 2548 for (adr = addr; adr < addr + len; adr += PAGESIZE) { 2549 if (amp != NULL) { 2550 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2551 if ((ap = anon_get_ptr(amp->ahp, anon_index++)) 2552 != NULL) { 2553 swap_xlate(ap, &vp, &offset); 2554 } else { 2555 vp = svd->vp; 2556 offset = svd->offset + 2557 (uintptr_t)(adr - seg->s_base); 2558 } 2559 ANON_LOCK_EXIT(&->a_rwlock); 2560 } else { 2561 vp = svd->vp; 2562 offset = svd->offset + 2563 (uintptr_t)(adr - seg->s_base); 2564 } 2565 2566 /* 2567 * Use page_find() instead of page_lookup() to 2568 * find the page since we know that it is locked. 2569 */ 2570 pp = page_find(vp, offset); 2571 if (pp == NULL) { 2572 panic( 2573 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx", 2574 (void *)adr, (void *)ap, (void *)vp, offset); 2575 /*NOTREACHED*/ 2576 } 2577 2578 if (rw == S_WRITE) { 2579 hat_setrefmod(pp); 2580 if (seg->s_as->a_vbits) 2581 hat_setstat(seg->s_as, adr, PAGESIZE, 2582 P_REF | P_MOD); 2583 } else if (rw != S_OTHER) { 2584 hat_setref(pp); 2585 if (seg->s_as->a_vbits) 2586 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF); 2587 } 2588 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2589 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset); 2590 page_unlock(pp); 2591 } 2592 ASSERT(svd->softlockcnt >= btop(len)); 2593 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) { 2594 /* 2595 * All SOFTLOCKS are gone. Wakeup any waiting 2596 * unmappers so they can try again to unmap. 2597 * Check for waiters first without the mutex 2598 * held so we don't always grab the mutex on 2599 * softunlocks. 2600 */ 2601 if (AS_ISUNMAPWAIT(seg->s_as)) { 2602 mutex_enter(&seg->s_as->a_contents); 2603 if (AS_ISUNMAPWAIT(seg->s_as)) { 2604 AS_CLRUNMAPWAIT(seg->s_as); 2605 cv_broadcast(&seg->s_as->a_cv); 2606 } 2607 mutex_exit(&seg->s_as->a_contents); 2608 } 2609 } 2610 } 2611 2612 #define PAGE_HANDLED ((page_t *)-1) 2613 2614 /* 2615 * Release all the pages in the NULL terminated ppp list 2616 * which haven't already been converted to PAGE_HANDLED. 2617 */ 2618 static void 2619 segvn_pagelist_rele(page_t **ppp) 2620 { 2621 for (; *ppp != NULL; ppp++) { 2622 if (*ppp != PAGE_HANDLED) 2623 page_unlock(*ppp); 2624 } 2625 } 2626 2627 static int stealcow = 1; 2628 2629 /* 2630 * Workaround for viking chip bug. See bug id 1220902. 2631 * To fix this down in pagefault() would require importing so 2632 * much as and segvn code as to be unmaintainable. 2633 */ 2634 int enable_mbit_wa = 0; 2635 2636 /* 2637 * Handles all the dirty work of getting the right 2638 * anonymous pages and loading up the translations. 2639 * This routine is called only from segvn_fault() 2640 * when looping over the range of addresses requested. 2641 * 2642 * The basic algorithm here is: 2643 * If this is an anon_zero case 2644 * Call anon_zero to allocate page 2645 * Load up translation 2646 * Return 2647 * endif 2648 * If this is an anon page 2649 * Use anon_getpage to get the page 2650 * else 2651 * Find page in pl[] list passed in 2652 * endif 2653 * If not a cow 2654 * Load up the translation to the page 2655 * return 2656 * endif 2657 * Call anon_private to handle cow 2658 * Load up (writable) translation to new page 2659 */ 2660 static faultcode_t 2661 segvn_faultpage( 2662 struct hat *hat, /* the hat to use for mapping */ 2663 struct seg *seg, /* seg_vn of interest */ 2664 caddr_t addr, /* address in as */ 2665 u_offset_t off, /* offset in vp */ 2666 struct vpage *vpage, /* pointer to vpage for vp, off */ 2667 page_t *pl[], /* object source page pointer */ 2668 uint_t vpprot, /* access allowed to object pages */ 2669 enum fault_type type, /* type of fault */ 2670 enum seg_rw rw, /* type of access at fault */ 2671 int brkcow) /* we may need to break cow */ 2672 { 2673 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2674 page_t *pp, **ppp; 2675 uint_t pageflags = 0; 2676 page_t *anon_pl[1 + 1]; 2677 page_t *opp = NULL; /* original page */ 2678 uint_t prot; 2679 int err; 2680 int cow; 2681 int claim; 2682 int steal = 0; 2683 ulong_t anon_index; 2684 struct anon *ap, *oldap; 2685 struct anon_map *amp; 2686 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 2687 int anon_lock = 0; 2688 anon_sync_obj_t cookie; 2689 2690 if (svd->flags & MAP_TEXT) { 2691 hat_flag |= HAT_LOAD_TEXT; 2692 } 2693 2694 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 2695 ASSERT(seg->s_szc == 0); 2696 ASSERT(svd->tr_state != SEGVN_TR_INIT); 2697 2698 /* 2699 * Initialize protection value for this page. 2700 * If we have per page protection values check it now. 2701 */ 2702 if (svd->pageprot) { 2703 uint_t protchk; 2704 2705 switch (rw) { 2706 case S_READ: 2707 protchk = PROT_READ; 2708 break; 2709 case S_WRITE: 2710 protchk = PROT_WRITE; 2711 break; 2712 case S_EXEC: 2713 protchk = PROT_EXEC; 2714 break; 2715 case S_OTHER: 2716 default: 2717 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 2718 break; 2719 } 2720 2721 prot = VPP_PROT(vpage); 2722 if ((prot & protchk) == 0) 2723 return (FC_PROT); /* illegal access type */ 2724 } else { 2725 prot = svd->prot; 2726 } 2727 2728 if (type == F_SOFTLOCK) { 2729 atomic_inc_ulong((ulong_t *)&svd->softlockcnt); 2730 } 2731 2732 /* 2733 * Always acquire the anon array lock to prevent 2 threads from 2734 * allocating separate anon slots for the same "addr". 2735 */ 2736 2737 if ((amp = svd->amp) != NULL) { 2738 ASSERT(RW_READ_HELD(&->a_rwlock)); 2739 anon_index = svd->anon_index + seg_page(seg, addr); 2740 anon_array_enter(amp, anon_index, &cookie); 2741 anon_lock = 1; 2742 } 2743 2744 if (svd->vp == NULL && amp != NULL) { 2745 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) { 2746 /* 2747 * Allocate a (normally) writable anonymous page of 2748 * zeroes. If no advance reservations, reserve now. 2749 */ 2750 if (svd->flags & MAP_NORESERVE) { 2751 if (anon_resv_zone(ptob(1), 2752 seg->s_as->a_proc->p_zone)) { 2753 atomic_add_long(&svd->swresv, ptob(1)); 2754 atomic_add_long(&seg->s_as->a_resvsize, 2755 ptob(1)); 2756 } else { 2757 err = ENOMEM; 2758 goto out; 2759 } 2760 } 2761 if ((pp = anon_zero(seg, addr, &ap, 2762 svd->cred)) == NULL) { 2763 err = ENOMEM; 2764 goto out; /* out of swap space */ 2765 } 2766 /* 2767 * Re-acquire the anon_map lock and 2768 * initialize the anon array entry. 2769 */ 2770 (void) anon_set_ptr(amp->ahp, anon_index, ap, 2771 ANON_SLEEP); 2772 2773 ASSERT(pp->p_szc == 0); 2774 2775 /* 2776 * Handle pages that have been marked for migration 2777 */ 2778 if (lgrp_optimizations()) 2779 page_migrate(seg, addr, &pp, 1); 2780 2781 if (enable_mbit_wa) { 2782 if (rw == S_WRITE) 2783 hat_setmod(pp); 2784 else if (!hat_ismod(pp)) 2785 prot &= ~PROT_WRITE; 2786 } 2787 /* 2788 * If AS_PAGLCK is set in a_flags (via memcntl(2) 2789 * with MC_LOCKAS, MCL_FUTURE) and this is a 2790 * MAP_NORESERVE segment, we may need to 2791 * permanently lock the page as it is being faulted 2792 * for the first time. The following text applies 2793 * only to MAP_NORESERVE segments: 2794 * 2795 * As per memcntl(2), if this segment was created 2796 * after MCL_FUTURE was applied (a "future" 2797 * segment), its pages must be locked. If this 2798 * segment existed at MCL_FUTURE application (a 2799 * "past" segment), the interface is unclear. 2800 * 2801 * We decide to lock only if vpage is present: 2802 * 2803 * - "future" segments will have a vpage array (see 2804 * as_map), and so will be locked as required 2805 * 2806 * - "past" segments may not have a vpage array, 2807 * depending on whether events (such as 2808 * mprotect) have occurred. Locking if vpage 2809 * exists will preserve legacy behavior. Not 2810 * locking if vpage is absent, will not break 2811 * the interface or legacy behavior. Note that 2812 * allocating vpage here if it's absent requires 2813 * upgrading the segvn reader lock, the cost of 2814 * which does not seem worthwhile. 2815 * 2816 * Usually testing and setting VPP_ISPPLOCK and 2817 * VPP_SETPPLOCK requires holding the segvn lock as 2818 * writer, but in this case all readers are 2819 * serializing on the anon array lock. 2820 */ 2821 if (AS_ISPGLCK(seg->s_as) && vpage != NULL && 2822 (svd->flags & MAP_NORESERVE) && 2823 !VPP_ISPPLOCK(vpage)) { 2824 proc_t *p = seg->s_as->a_proc; 2825 ASSERT(svd->type == MAP_PRIVATE); 2826 mutex_enter(&p->p_lock); 2827 if (rctl_incr_locked_mem(p, NULL, PAGESIZE, 2828 1) == 0) { 2829 claim = VPP_PROT(vpage) & PROT_WRITE; 2830 if (page_pp_lock(pp, claim, 0)) { 2831 VPP_SETPPLOCK(vpage); 2832 } else { 2833 rctl_decr_locked_mem(p, NULL, 2834 PAGESIZE, 1); 2835 } 2836 } 2837 mutex_exit(&p->p_lock); 2838 } 2839 2840 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2841 hat_memload(hat, addr, pp, prot, hat_flag); 2842 2843 if (!(hat_flag & HAT_LOAD_LOCK)) 2844 page_unlock(pp); 2845 2846 anon_array_exit(&cookie); 2847 return (0); 2848 } 2849 } 2850 2851 /* 2852 * Obtain the page structure via anon_getpage() if it is 2853 * a private copy of an object (the result of a previous 2854 * copy-on-write). 2855 */ 2856 if (amp != NULL) { 2857 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) { 2858 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE, 2859 seg, addr, rw, svd->cred); 2860 if (err) 2861 goto out; 2862 2863 if (svd->type == MAP_SHARED) { 2864 /* 2865 * If this is a shared mapping to an 2866 * anon_map, then ignore the write 2867 * permissions returned by anon_getpage(). 2868 * They apply to the private mappings 2869 * of this anon_map. 2870 */ 2871 vpprot |= PROT_WRITE; 2872 } 2873 opp = anon_pl[0]; 2874 } 2875 } 2876 2877 /* 2878 * Search the pl[] list passed in if it is from the 2879 * original object (i.e., not a private copy). 2880 */ 2881 if (opp == NULL) { 2882 /* 2883 * Find original page. We must be bringing it in 2884 * from the list in pl[]. 2885 */ 2886 for (ppp = pl; (opp = *ppp) != NULL; ppp++) { 2887 if (opp == PAGE_HANDLED) 2888 continue; 2889 ASSERT(opp->p_vnode == svd->vp); /* XXX */ 2890 if (opp->p_offset == off) 2891 break; 2892 } 2893 if (opp == NULL) { 2894 panic("segvn_faultpage not found"); 2895 /*NOTREACHED*/ 2896 } 2897 *ppp = PAGE_HANDLED; 2898 2899 } 2900 2901 ASSERT(PAGE_LOCKED(opp)); 2902 2903 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2904 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0); 2905 2906 /* 2907 * The fault is treated as a copy-on-write fault if a 2908 * write occurs on a private segment and the object 2909 * page (i.e., mapping) is write protected. We assume 2910 * that fatal protection checks have already been made. 2911 */ 2912 2913 if (brkcow) { 2914 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2915 cow = !(vpprot & PROT_WRITE); 2916 } else if (svd->tr_state == SEGVN_TR_ON) { 2917 /* 2918 * If we are doing text replication COW on first touch. 2919 */ 2920 ASSERT(amp != NULL); 2921 ASSERT(svd->vp != NULL); 2922 ASSERT(rw != S_WRITE); 2923 cow = (ap == NULL); 2924 } else { 2925 cow = 0; 2926 } 2927 2928 /* 2929 * If not a copy-on-write case load the translation 2930 * and return. 2931 */ 2932 if (cow == 0) { 2933 2934 /* 2935 * Handle pages that have been marked for migration 2936 */ 2937 if (lgrp_optimizations()) 2938 page_migrate(seg, addr, &opp, 1); 2939 2940 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) { 2941 if (rw == S_WRITE) 2942 hat_setmod(opp); 2943 else if (rw != S_OTHER && !hat_ismod(opp)) 2944 prot &= ~PROT_WRITE; 2945 } 2946 2947 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 2948 (!svd->pageprot && svd->prot == (prot & vpprot))); 2949 ASSERT(amp == NULL || 2950 svd->rcookie == HAT_INVALID_REGION_COOKIE); 2951 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag, 2952 svd->rcookie); 2953 2954 if (!(hat_flag & HAT_LOAD_LOCK)) 2955 page_unlock(opp); 2956 2957 if (anon_lock) { 2958 anon_array_exit(&cookie); 2959 } 2960 return (0); 2961 } 2962 2963 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2964 2965 hat_setref(opp); 2966 2967 ASSERT(amp != NULL && anon_lock); 2968 2969 /* 2970 * Steal the page only if it isn't a private page 2971 * since stealing a private page is not worth the effort. 2972 */ 2973 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) 2974 steal = 1; 2975 2976 /* 2977 * Steal the original page if the following conditions are true: 2978 * 2979 * We are low on memory, the page is not private, page is not large, 2980 * not shared, not modified, not `locked' or if we have it `locked' 2981 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies 2982 * that the page is not shared) and if it doesn't have any 2983 * translations. page_struct_lock isn't needed to look at p_cowcnt 2984 * and p_lckcnt because we first get exclusive lock on page. 2985 */ 2986 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD); 2987 2988 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 && 2989 page_tryupgrade(opp) && !hat_ismod(opp) && 2990 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) || 2991 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 && 2992 vpage != NULL && VPP_ISPPLOCK(vpage)))) { 2993 /* 2994 * Check if this page has other translations 2995 * after unloading our translation. 2996 */ 2997 if (hat_page_is_mapped(opp)) { 2998 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2999 hat_unload(seg->s_as->a_hat, addr, PAGESIZE, 3000 HAT_UNLOAD); 3001 } 3002 3003 /* 3004 * hat_unload() might sync back someone else's recent 3005 * modification, so check again. 3006 */ 3007 if (!hat_ismod(opp) && !hat_page_is_mapped(opp)) 3008 pageflags |= STEAL_PAGE; 3009 } 3010 3011 /* 3012 * If we have a vpage pointer, see if it indicates that we have 3013 * ``locked'' the page we map -- if so, tell anon_private to 3014 * transfer the locking resource to the new page. 3015 * 3016 * See Statement at the beginning of segvn_lockop regarding 3017 * the way lockcnts/cowcnts are handled during COW. 3018 * 3019 */ 3020 if (vpage != NULL && VPP_ISPPLOCK(vpage)) 3021 pageflags |= LOCK_PAGE; 3022 3023 /* 3024 * Allocate a private page and perform the copy. 3025 * For MAP_NORESERVE reserve swap space now, unless this 3026 * is a cow fault on an existing anon page in which case 3027 * MAP_NORESERVE will have made advance reservations. 3028 */ 3029 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) { 3030 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) { 3031 atomic_add_long(&svd->swresv, ptob(1)); 3032 atomic_add_long(&seg->s_as->a_resvsize, ptob(1)); 3033 } else { 3034 page_unlock(opp); 3035 err = ENOMEM; 3036 goto out; 3037 } 3038 } 3039 oldap = ap; 3040 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred); 3041 if (pp == NULL) { 3042 err = ENOMEM; /* out of swap space */ 3043 goto out; 3044 } 3045 3046 /* 3047 * If we copied away from an anonymous page, then 3048 * we are one step closer to freeing up an anon slot. 3049 * 3050 * NOTE: The original anon slot must be released while 3051 * holding the "anon_map" lock. This is necessary to prevent 3052 * other threads from obtaining a pointer to the anon slot 3053 * which may be freed if its "refcnt" is 1. 3054 */ 3055 if (oldap != NULL) 3056 anon_decref(oldap); 3057 3058 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 3059 3060 /* 3061 * Handle pages that have been marked for migration 3062 */ 3063 if (lgrp_optimizations()) 3064 page_migrate(seg, addr, &pp, 1); 3065 3066 ASSERT(pp->p_szc == 0); 3067 3068 ASSERT(!IS_VMODSORT(pp->p_vnode)); 3069 if (enable_mbit_wa) { 3070 if (rw == S_WRITE) 3071 hat_setmod(pp); 3072 else if (!hat_ismod(pp)) 3073 prot &= ~PROT_WRITE; 3074 } 3075 3076 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 3077 hat_memload(hat, addr, pp, prot, hat_flag); 3078 3079 if (!(hat_flag & HAT_LOAD_LOCK)) 3080 page_unlock(pp); 3081 3082 ASSERT(anon_lock); 3083 anon_array_exit(&cookie); 3084 return (0); 3085 out: 3086 if (anon_lock) 3087 anon_array_exit(&cookie); 3088 3089 if (type == F_SOFTLOCK) { 3090 atomic_dec_ulong((ulong_t *)&svd->softlockcnt); 3091 } 3092 return (FC_MAKE_ERR(err)); 3093 } 3094 3095 /* 3096 * relocate a bunch of smaller targ pages into one large repl page. all targ 3097 * pages must be complete pages smaller than replacement pages. 3098 * it's assumed that no page's szc can change since they are all PAGESIZE or 3099 * complete large pages locked SHARED. 3100 */ 3101 static void 3102 segvn_relocate_pages(page_t **targ, page_t *replacement) 3103 { 3104 page_t *pp; 3105 pgcnt_t repl_npgs, curnpgs; 3106 pgcnt_t i; 3107 uint_t repl_szc = replacement->p_szc; 3108 page_t *first_repl = replacement; 3109 page_t *repl; 3110 spgcnt_t npgs; 3111 3112 VM_STAT_ADD(segvnvmstats.relocatepages[0]); 3113 3114 ASSERT(repl_szc != 0); 3115 npgs = repl_npgs = page_get_pagecnt(repl_szc); 3116 3117 i = 0; 3118 while (repl_npgs) { 3119 spgcnt_t nreloc; 3120 int err; 3121 ASSERT(replacement != NULL); 3122 pp = targ[i]; 3123 ASSERT(pp->p_szc < repl_szc); 3124 ASSERT(PAGE_EXCL(pp)); 3125 ASSERT(!PP_ISFREE(pp)); 3126 curnpgs = page_get_pagecnt(pp->p_szc); 3127 if (curnpgs == 1) { 3128 VM_STAT_ADD(segvnvmstats.relocatepages[1]); 3129 repl = replacement; 3130 page_sub(&replacement, repl); 3131 ASSERT(PAGE_EXCL(repl)); 3132 ASSERT(!PP_ISFREE(repl)); 3133 ASSERT(repl->p_szc == repl_szc); 3134 } else { 3135 page_t *repl_savepp; 3136 int j; 3137 VM_STAT_ADD(segvnvmstats.relocatepages[2]); 3138 repl_savepp = replacement; 3139 for (j = 0; j < curnpgs; j++) { 3140 repl = replacement; 3141 page_sub(&replacement, repl); 3142 ASSERT(PAGE_EXCL(repl)); 3143 ASSERT(!PP_ISFREE(repl)); 3144 ASSERT(repl->p_szc == repl_szc); 3145 ASSERT(page_pptonum(targ[i + j]) == 3146 page_pptonum(targ[i]) + j); 3147 } 3148 repl = repl_savepp; 3149 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs)); 3150 } 3151 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL); 3152 if (err || nreloc != curnpgs) { 3153 panic("segvn_relocate_pages: " 3154 "page_relocate failed err=%d curnpgs=%ld " 3155 "nreloc=%ld", err, curnpgs, nreloc); 3156 } 3157 ASSERT(curnpgs <= repl_npgs); 3158 repl_npgs -= curnpgs; 3159 i += curnpgs; 3160 } 3161 ASSERT(replacement == NULL); 3162 3163 repl = first_repl; 3164 repl_npgs = npgs; 3165 for (i = 0; i < repl_npgs; i++) { 3166 ASSERT(PAGE_EXCL(repl)); 3167 ASSERT(!PP_ISFREE(repl)); 3168 targ[i] = repl; 3169 page_downgrade(targ[i]); 3170 repl++; 3171 } 3172 } 3173 3174 /* 3175 * Check if all pages in ppa array are complete smaller than szc pages and 3176 * their roots will still be aligned relative to their current size if the 3177 * entire ppa array is relocated into one szc page. If these conditions are 3178 * not met return 0. 3179 * 3180 * If all pages are properly aligned attempt to upgrade their locks 3181 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0. 3182 * upgrdfail was set to 0 by caller. 3183 * 3184 * Return 1 if all pages are aligned and locked exclusively. 3185 * 3186 * If all pages in ppa array happen to be physically contiguous to make one 3187 * szc page and all exclusive locks are successfully obtained promote the page 3188 * size to szc and set *pszc to szc. Return 1 with pages locked shared. 3189 */ 3190 static int 3191 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc) 3192 { 3193 page_t *pp; 3194 pfn_t pfn; 3195 pgcnt_t totnpgs = page_get_pagecnt(szc); 3196 pfn_t first_pfn; 3197 int contig = 1; 3198 pgcnt_t i; 3199 pgcnt_t j; 3200 uint_t curszc; 3201 pgcnt_t curnpgs; 3202 int root = 0; 3203 3204 ASSERT(szc > 0); 3205 3206 VM_STAT_ADD(segvnvmstats.fullszcpages[0]); 3207 3208 for (i = 0; i < totnpgs; i++) { 3209 pp = ppa[i]; 3210 ASSERT(PAGE_SHARED(pp)); 3211 ASSERT(!PP_ISFREE(pp)); 3212 pfn = page_pptonum(pp); 3213 if (i == 0) { 3214 if (!IS_P2ALIGNED(pfn, totnpgs)) { 3215 contig = 0; 3216 } else { 3217 first_pfn = pfn; 3218 } 3219 } else if (contig && pfn != first_pfn + i) { 3220 contig = 0; 3221 } 3222 if (pp->p_szc == 0) { 3223 if (root) { 3224 VM_STAT_ADD(segvnvmstats.fullszcpages[1]); 3225 return (0); 3226 } 3227 } else if (!root) { 3228 if ((curszc = pp->p_szc) >= szc) { 3229 VM_STAT_ADD(segvnvmstats.fullszcpages[2]); 3230 return (0); 3231 } 3232 if (curszc == 0) { 3233 /* 3234 * p_szc changed means we don't have all pages 3235 * locked. return failure. 3236 */ 3237 VM_STAT_ADD(segvnvmstats.fullszcpages[3]); 3238 return (0); 3239 } 3240 curnpgs = page_get_pagecnt(curszc); 3241 if (!IS_P2ALIGNED(pfn, curnpgs) || 3242 !IS_P2ALIGNED(i, curnpgs)) { 3243 VM_STAT_ADD(segvnvmstats.fullszcpages[4]); 3244 return (0); 3245 } 3246 root = 1; 3247 } else { 3248 ASSERT(i > 0); 3249 VM_STAT_ADD(segvnvmstats.fullszcpages[5]); 3250 if (pp->p_szc != curszc) { 3251 VM_STAT_ADD(segvnvmstats.fullszcpages[6]); 3252 return (0); 3253 } 3254 if (pfn - 1 != page_pptonum(ppa[i - 1])) { 3255 panic("segvn_full_szcpages: " 3256 "large page not physically contiguous"); 3257 } 3258 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) { 3259 root = 0; 3260 } 3261 } 3262 } 3263 3264 for (i = 0; i < totnpgs; i++) { 3265 ASSERT(ppa[i]->p_szc < szc); 3266 if (!page_tryupgrade(ppa[i])) { 3267 for (j = 0; j < i; j++) { 3268 page_downgrade(ppa[j]); 3269 } 3270 *pszc = ppa[i]->p_szc; 3271 *upgrdfail = 1; 3272 VM_STAT_ADD(segvnvmstats.fullszcpages[7]); 3273 return (0); 3274 } 3275 } 3276 3277 /* 3278 * When a page is put a free cachelist its szc is set to 0. if file 3279 * system reclaimed pages from cachelist targ pages will be physically 3280 * contiguous with 0 p_szc. in this case just upgrade szc of targ 3281 * pages without any relocations. 3282 * To avoid any hat issues with previous small mappings 3283 * hat_pageunload() the target pages first. 3284 */ 3285 if (contig) { 3286 VM_STAT_ADD(segvnvmstats.fullszcpages[8]); 3287 for (i = 0; i < totnpgs; i++) { 3288 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD); 3289 } 3290 for (i = 0; i < totnpgs; i++) { 3291 ppa[i]->p_szc = szc; 3292 } 3293 for (i = 0; i < totnpgs; i++) { 3294 ASSERT(PAGE_EXCL(ppa[i])); 3295 page_downgrade(ppa[i]); 3296 } 3297 if (pszc != NULL) { 3298 *pszc = szc; 3299 } 3300 } 3301 VM_STAT_ADD(segvnvmstats.fullszcpages[9]); 3302 return (1); 3303 } 3304 3305 /* 3306 * Create physically contiguous pages for [vp, off] - [vp, off + 3307 * page_size(szc)) range and for private segment return them in ppa array. 3308 * Pages are created either via IO or relocations. 3309 * 3310 * Return 1 on success and 0 on failure. 3311 * 3312 * If physically contiguous pages already exist for this range return 1 without 3313 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa 3314 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE(). 3315 */ 3316 3317 static int 3318 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off, 3319 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc, 3320 int *downsize) 3321 3322 { 3323 page_t *pplist = *ppplist; 3324 size_t pgsz = page_get_pagesize(szc); 3325 pgcnt_t pages = btop(pgsz); 3326 ulong_t start_off = off; 3327 u_offset_t eoff = off + pgsz; 3328 spgcnt_t nreloc; 3329 u_offset_t io_off = off; 3330 size_t io_len; 3331 page_t *io_pplist = NULL; 3332 page_t *done_pplist = NULL; 3333 pgcnt_t pgidx = 0; 3334 page_t *pp; 3335 page_t *newpp; 3336 page_t *targpp; 3337 int io_err = 0; 3338 int i; 3339 pfn_t pfn; 3340 ulong_t ppages; 3341 page_t *targ_pplist = NULL; 3342 page_t *repl_pplist = NULL; 3343 page_t *tmp_pplist; 3344 int nios = 0; 3345 uint_t pszc; 3346 struct vattr va; 3347 3348 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]); 3349 3350 ASSERT(szc != 0); 3351 ASSERT(pplist->p_szc == szc); 3352 3353 /* 3354 * downsize will be set to 1 only if we fail to lock pages. this will 3355 * allow subsequent faults to try to relocate the page again. If we 3356 * fail due to misalignment don't downsize and let the caller map the 3357 * whole region with small mappings to avoid more faults into the area 3358 * where we can't get large pages anyway. 3359 */ 3360 *downsize = 0; 3361 3362 while (off < eoff) { 3363 newpp = pplist; 3364 ASSERT(newpp != NULL); 3365 ASSERT(PAGE_EXCL(newpp)); 3366 ASSERT(!PP_ISFREE(newpp)); 3367 /* 3368 * we pass NULL for nrelocp to page_lookup_create() 3369 * so that it doesn't relocate. We relocate here 3370 * later only after we make sure we can lock all 3371 * pages in the range we handle and they are all 3372 * aligned. 3373 */ 3374 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0); 3375 ASSERT(pp != NULL); 3376 ASSERT(!PP_ISFREE(pp)); 3377 ASSERT(pp->p_vnode == vp); 3378 ASSERT(pp->p_offset == off); 3379 if (pp == newpp) { 3380 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]); 3381 page_sub(&pplist, pp); 3382 ASSERT(PAGE_EXCL(pp)); 3383 ASSERT(page_iolock_assert(pp)); 3384 page_list_concat(&io_pplist, &pp); 3385 off += PAGESIZE; 3386 continue; 3387 } 3388 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]); 3389 pfn = page_pptonum(pp); 3390 pszc = pp->p_szc; 3391 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL && 3392 IS_P2ALIGNED(pfn, pages)) { 3393 ASSERT(repl_pplist == NULL); 3394 ASSERT(done_pplist == NULL); 3395 ASSERT(pplist == *ppplist); 3396 page_unlock(pp); 3397 page_free_replacement_page(pplist); 3398 page_create_putback(pages); 3399 *ppplist = NULL; 3400 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]); 3401 return (1); 3402 } 3403 if (pszc >= szc) { 3404 page_unlock(pp); 3405 segvn_faultvnmpss_align_err1++; 3406 goto out; 3407 } 3408 ppages = page_get_pagecnt(pszc); 3409 if (!IS_P2ALIGNED(pfn, ppages)) { 3410 ASSERT(pszc > 0); 3411 /* 3412 * sizing down to pszc won't help. 3413 */ 3414 page_unlock(pp); 3415 segvn_faultvnmpss_align_err2++; 3416 goto out; 3417 } 3418 pfn = page_pptonum(newpp); 3419 if (!IS_P2ALIGNED(pfn, ppages)) { 3420 ASSERT(pszc > 0); 3421 /* 3422 * sizing down to pszc won't help. 3423 */ 3424 page_unlock(pp); 3425 segvn_faultvnmpss_align_err3++; 3426 goto out; 3427 } 3428 if (!PAGE_EXCL(pp)) { 3429 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]); 3430 page_unlock(pp); 3431 *downsize = 1; 3432 *ret_pszc = pp->p_szc; 3433 goto out; 3434 } 3435 targpp = pp; 3436 if (io_pplist != NULL) { 3437 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]); 3438 io_len = off - io_off; 3439 /* 3440 * Some file systems like NFS don't check EOF 3441 * conditions in VOP_PAGEIO(). Check it here 3442 * now that pages are locked SE_EXCL. Any file 3443 * truncation will wait until the pages are 3444 * unlocked so no need to worry that file will 3445 * be truncated after we check its size here. 3446 * XXX fix NFS to remove this check. 3447 */ 3448 va.va_mask = AT_SIZE; 3449 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) { 3450 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]); 3451 page_unlock(targpp); 3452 goto out; 3453 } 3454 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3455 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]); 3456 *downsize = 1; 3457 *ret_pszc = 0; 3458 page_unlock(targpp); 3459 goto out; 3460 } 3461 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3462 B_READ, svd->cred, NULL); 3463 if (io_err) { 3464 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]); 3465 page_unlock(targpp); 3466 if (io_err == EDEADLK) { 3467 segvn_vmpss_pageio_deadlk_err++; 3468 } 3469 goto out; 3470 } 3471 nios++; 3472 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]); 3473 while (io_pplist != NULL) { 3474 pp = io_pplist; 3475 page_sub(&io_pplist, pp); 3476 ASSERT(page_iolock_assert(pp)); 3477 page_io_unlock(pp); 3478 pgidx = (pp->p_offset - start_off) >> 3479 PAGESHIFT; 3480 ASSERT(pgidx < pages); 3481 ppa[pgidx] = pp; 3482 page_list_concat(&done_pplist, &pp); 3483 } 3484 } 3485 pp = targpp; 3486 ASSERT(PAGE_EXCL(pp)); 3487 ASSERT(pp->p_szc <= pszc); 3488 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) { 3489 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]); 3490 page_unlock(pp); 3491 *downsize = 1; 3492 *ret_pszc = pp->p_szc; 3493 goto out; 3494 } 3495 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]); 3496 /* 3497 * page szc chould have changed before the entire group was 3498 * locked. reread page szc. 3499 */ 3500 pszc = pp->p_szc; 3501 ppages = page_get_pagecnt(pszc); 3502 3503 /* link just the roots */ 3504 page_list_concat(&targ_pplist, &pp); 3505 page_sub(&pplist, newpp); 3506 page_list_concat(&repl_pplist, &newpp); 3507 off += PAGESIZE; 3508 while (--ppages != 0) { 3509 newpp = pplist; 3510 page_sub(&pplist, newpp); 3511 off += PAGESIZE; 3512 } 3513 io_off = off; 3514 } 3515 if (io_pplist != NULL) { 3516 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]); 3517 io_len = eoff - io_off; 3518 va.va_mask = AT_SIZE; 3519 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) { 3520 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]); 3521 goto out; 3522 } 3523 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3524 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]); 3525 *downsize = 1; 3526 *ret_pszc = 0; 3527 goto out; 3528 } 3529 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3530 B_READ, svd->cred, NULL); 3531 if (io_err) { 3532 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]); 3533 if (io_err == EDEADLK) { 3534 segvn_vmpss_pageio_deadlk_err++; 3535 } 3536 goto out; 3537 } 3538 nios++; 3539 while (io_pplist != NULL) { 3540 pp = io_pplist; 3541 page_sub(&io_pplist, pp); 3542 ASSERT(page_iolock_assert(pp)); 3543 page_io_unlock(pp); 3544 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3545 ASSERT(pgidx < pages); 3546 ppa[pgidx] = pp; 3547 } 3548 } 3549 /* 3550 * we're now bound to succeed or panic. 3551 * remove pages from done_pplist. it's not needed anymore. 3552 */ 3553 while (done_pplist != NULL) { 3554 pp = done_pplist; 3555 page_sub(&done_pplist, pp); 3556 } 3557 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]); 3558 ASSERT(pplist == NULL); 3559 *ppplist = NULL; 3560 while (targ_pplist != NULL) { 3561 int ret; 3562 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]); 3563 ASSERT(repl_pplist); 3564 pp = targ_pplist; 3565 page_sub(&targ_pplist, pp); 3566 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3567 newpp = repl_pplist; 3568 page_sub(&repl_pplist, newpp); 3569 #ifdef DEBUG 3570 pfn = page_pptonum(pp); 3571 pszc = pp->p_szc; 3572 ppages = page_get_pagecnt(pszc); 3573 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3574 pfn = page_pptonum(newpp); 3575 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3576 ASSERT(P2PHASE(pfn, pages) == pgidx); 3577 #endif 3578 nreloc = 0; 3579 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL); 3580 if (ret != 0 || nreloc == 0) { 3581 panic("segvn_fill_vp_pages: " 3582 "page_relocate failed"); 3583 } 3584 pp = newpp; 3585 while (nreloc-- != 0) { 3586 ASSERT(PAGE_EXCL(pp)); 3587 ASSERT(pp->p_vnode == vp); 3588 ASSERT(pgidx == 3589 ((pp->p_offset - start_off) >> PAGESHIFT)); 3590 ppa[pgidx++] = pp; 3591 pp++; 3592 } 3593 } 3594 3595 if (svd->type == MAP_PRIVATE) { 3596 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]); 3597 for (i = 0; i < pages; i++) { 3598 ASSERT(ppa[i] != NULL); 3599 ASSERT(PAGE_EXCL(ppa[i])); 3600 ASSERT(ppa[i]->p_vnode == vp); 3601 ASSERT(ppa[i]->p_offset == 3602 start_off + (i << PAGESHIFT)); 3603 page_downgrade(ppa[i]); 3604 } 3605 ppa[pages] = NULL; 3606 } else { 3607 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]); 3608 /* 3609 * the caller will still call VOP_GETPAGE() for shared segments 3610 * to check FS write permissions. For private segments we map 3611 * file read only anyway. so no VOP_GETPAGE is needed. 3612 */ 3613 for (i = 0; i < pages; i++) { 3614 ASSERT(ppa[i] != NULL); 3615 ASSERT(PAGE_EXCL(ppa[i])); 3616 ASSERT(ppa[i]->p_vnode == vp); 3617 ASSERT(ppa[i]->p_offset == 3618 start_off + (i << PAGESHIFT)); 3619 page_unlock(ppa[i]); 3620 } 3621 ppa[0] = NULL; 3622 } 3623 3624 return (1); 3625 out: 3626 /* 3627 * Do the cleanup. Unlock target pages we didn't relocate. They are 3628 * linked on targ_pplist by root pages. reassemble unused replacement 3629 * and io pages back to pplist. 3630 */ 3631 if (io_pplist != NULL) { 3632 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]); 3633 pp = io_pplist; 3634 do { 3635 ASSERT(pp->p_vnode == vp); 3636 ASSERT(pp->p_offset == io_off); 3637 ASSERT(page_iolock_assert(pp)); 3638 page_io_unlock(pp); 3639 page_hashout(pp, NULL); 3640 io_off += PAGESIZE; 3641 } while ((pp = pp->p_next) != io_pplist); 3642 page_list_concat(&io_pplist, &pplist); 3643 pplist = io_pplist; 3644 } 3645 tmp_pplist = NULL; 3646 while (targ_pplist != NULL) { 3647 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]); 3648 pp = targ_pplist; 3649 ASSERT(PAGE_EXCL(pp)); 3650 page_sub(&targ_pplist, pp); 3651 3652 pszc = pp->p_szc; 3653 ppages = page_get_pagecnt(pszc); 3654 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3655 3656 if (pszc != 0) { 3657 group_page_unlock(pp); 3658 } 3659 page_unlock(pp); 3660 3661 pp = repl_pplist; 3662 ASSERT(pp != NULL); 3663 ASSERT(PAGE_EXCL(pp)); 3664 ASSERT(pp->p_szc == szc); 3665 page_sub(&repl_pplist, pp); 3666 3667 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3668 3669 /* relink replacement page */ 3670 page_list_concat(&tmp_pplist, &pp); 3671 while (--ppages != 0) { 3672 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]); 3673 pp++; 3674 ASSERT(PAGE_EXCL(pp)); 3675 ASSERT(pp->p_szc == szc); 3676 page_list_concat(&tmp_pplist, &pp); 3677 } 3678 } 3679 if (tmp_pplist != NULL) { 3680 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]); 3681 page_list_concat(&tmp_pplist, &pplist); 3682 pplist = tmp_pplist; 3683 } 3684 /* 3685 * at this point all pages are either on done_pplist or 3686 * pplist. They can't be all on done_pplist otherwise 3687 * we'd've been done. 3688 */ 3689 ASSERT(pplist != NULL); 3690 if (nios != 0) { 3691 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]); 3692 pp = pplist; 3693 do { 3694 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]); 3695 ASSERT(pp->p_szc == szc); 3696 ASSERT(PAGE_EXCL(pp)); 3697 ASSERT(pp->p_vnode != vp); 3698 pp->p_szc = 0; 3699 } while ((pp = pp->p_next) != pplist); 3700 3701 pp = done_pplist; 3702 do { 3703 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]); 3704 ASSERT(pp->p_szc == szc); 3705 ASSERT(PAGE_EXCL(pp)); 3706 ASSERT(pp->p_vnode == vp); 3707 pp->p_szc = 0; 3708 } while ((pp = pp->p_next) != done_pplist); 3709 3710 while (pplist != NULL) { 3711 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]); 3712 pp = pplist; 3713 page_sub(&pplist, pp); 3714 page_free(pp, 0); 3715 } 3716 3717 while (done_pplist != NULL) { 3718 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]); 3719 pp = done_pplist; 3720 page_sub(&done_pplist, pp); 3721 page_unlock(pp); 3722 } 3723 *ppplist = NULL; 3724 return (0); 3725 } 3726 ASSERT(pplist == *ppplist); 3727 if (io_err) { 3728 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]); 3729 /* 3730 * don't downsize on io error. 3731 * see if vop_getpage succeeds. 3732 * pplist may still be used in this case 3733 * for relocations. 3734 */ 3735 return (0); 3736 } 3737 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]); 3738 page_free_replacement_page(pplist); 3739 page_create_putback(pages); 3740 *ppplist = NULL; 3741 return (0); 3742 } 3743 3744 int segvn_anypgsz = 0; 3745 3746 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \ 3747 if ((type) == F_SOFTLOCK) { \ 3748 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \ 3749 -(pages)); \ 3750 } 3751 3752 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \ 3753 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \ 3754 if ((rw) == S_WRITE) { \ 3755 for (i = 0; i < (pages); i++) { \ 3756 ASSERT((ppa)[i]->p_vnode == \ 3757 (ppa)[0]->p_vnode); \ 3758 hat_setmod((ppa)[i]); \ 3759 } \ 3760 } else if ((rw) != S_OTHER && \ 3761 ((prot) & (vpprot) & PROT_WRITE)) { \ 3762 for (i = 0; i < (pages); i++) { \ 3763 ASSERT((ppa)[i]->p_vnode == \ 3764 (ppa)[0]->p_vnode); \ 3765 if (!hat_ismod((ppa)[i])) { \ 3766 prot &= ~PROT_WRITE; \ 3767 break; \ 3768 } \ 3769 } \ 3770 } \ 3771 } 3772 3773 #ifdef VM_STATS 3774 3775 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \ 3776 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]); 3777 3778 #else /* VM_STATS */ 3779 3780 #define SEGVN_VMSTAT_FLTVNPAGES(idx) 3781 3782 #endif 3783 3784 static faultcode_t 3785 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 3786 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 3787 caddr_t eaddr, int brkcow) 3788 { 3789 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 3790 struct anon_map *amp = svd->amp; 3791 uchar_t segtype = svd->type; 3792 uint_t szc = seg->s_szc; 3793 size_t pgsz = page_get_pagesize(szc); 3794 size_t maxpgsz = pgsz; 3795 pgcnt_t pages = btop(pgsz); 3796 pgcnt_t maxpages = pages; 3797 size_t ppasize = (pages + 1) * sizeof (page_t *); 3798 caddr_t a = lpgaddr; 3799 caddr_t maxlpgeaddr = lpgeaddr; 3800 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base); 3801 ulong_t aindx = svd->anon_index + seg_page(seg, a); 3802 struct vpage *vpage = (svd->vpage != NULL) ? 3803 &svd->vpage[seg_page(seg, a)] : NULL; 3804 vnode_t *vp = svd->vp; 3805 page_t **ppa; 3806 uint_t pszc; 3807 size_t ppgsz; 3808 pgcnt_t ppages; 3809 faultcode_t err = 0; 3810 int ierr; 3811 int vop_size_err = 0; 3812 uint_t protchk, prot, vpprot; 3813 ulong_t i; 3814 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 3815 anon_sync_obj_t an_cookie; 3816 enum seg_rw arw; 3817 int alloc_failed = 0; 3818 int adjszc_chk; 3819 struct vattr va; 3820 int xhat = 0; 3821 page_t *pplist; 3822 pfn_t pfn; 3823 int physcontig; 3824 int upgrdfail; 3825 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */ 3826 int tron = (svd->tr_state == SEGVN_TR_ON); 3827 3828 ASSERT(szc != 0); 3829 ASSERT(vp != NULL); 3830 ASSERT(brkcow == 0 || amp != NULL); 3831 ASSERT(tron == 0 || amp != NULL); 3832 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 3833 ASSERT(!(svd->flags & MAP_NORESERVE)); 3834 ASSERT(type != F_SOFTUNLOCK); 3835 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 3836 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages)); 3837 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 3838 ASSERT(seg->s_szc < NBBY * sizeof (int)); 3839 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz); 3840 ASSERT(svd->tr_state != SEGVN_TR_INIT); 3841 3842 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]); 3843 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]); 3844 3845 if (svd->flags & MAP_TEXT) { 3846 hat_flag |= HAT_LOAD_TEXT; 3847 } 3848 3849 if (svd->pageprot) { 3850 switch (rw) { 3851 case S_READ: 3852 protchk = PROT_READ; 3853 break; 3854 case S_WRITE: 3855 protchk = PROT_WRITE; 3856 break; 3857 case S_EXEC: 3858 protchk = PROT_EXEC; 3859 break; 3860 case S_OTHER: 3861 default: 3862 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 3863 break; 3864 } 3865 } else { 3866 prot = svd->prot; 3867 /* caller has already done segment level protection check. */ 3868 } 3869 3870 if (seg->s_as->a_hat != hat) { 3871 xhat = 1; 3872 } 3873 3874 if (rw == S_WRITE && segtype == MAP_PRIVATE) { 3875 SEGVN_VMSTAT_FLTVNPAGES(2); 3876 arw = S_READ; 3877 } else { 3878 arw = rw; 3879 } 3880 3881 ppa = kmem_alloc(ppasize, KM_SLEEP); 3882 3883 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]); 3884 3885 for (;;) { 3886 adjszc_chk = 0; 3887 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) { 3888 if (adjszc_chk) { 3889 while (szc < seg->s_szc) { 3890 uintptr_t e; 3891 uint_t tszc; 3892 tszc = segvn_anypgsz_vnode ? szc + 1 : 3893 seg->s_szc; 3894 ppgsz = page_get_pagesize(tszc); 3895 if (!IS_P2ALIGNED(a, ppgsz) || 3896 ((alloc_failed >> tszc) & 0x1)) { 3897 break; 3898 } 3899 SEGVN_VMSTAT_FLTVNPAGES(4); 3900 szc = tszc; 3901 pgsz = ppgsz; 3902 pages = btop(pgsz); 3903 e = P2ROUNDUP((uintptr_t)eaddr, pgsz); 3904 lpgeaddr = (caddr_t)e; 3905 } 3906 } 3907 3908 again: 3909 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) { 3910 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 3911 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 3912 anon_array_enter(amp, aindx, &an_cookie); 3913 if (anon_get_ptr(amp->ahp, aindx) != NULL) { 3914 SEGVN_VMSTAT_FLTVNPAGES(5); 3915 ASSERT(anon_pages(amp->ahp, aindx, 3916 maxpages) == maxpages); 3917 anon_array_exit(&an_cookie); 3918 ANON_LOCK_EXIT(&->a_rwlock); 3919 err = segvn_fault_anonpages(hat, seg, 3920 a, a + maxpgsz, type, rw, 3921 MAX(a, addr), 3922 MIN(a + maxpgsz, eaddr), brkcow); 3923 if (err != 0) { 3924 SEGVN_VMSTAT_FLTVNPAGES(6); 3925 goto out; 3926 } 3927 if (szc < seg->s_szc) { 3928 szc = seg->s_szc; 3929 pgsz = maxpgsz; 3930 pages = maxpages; 3931 lpgeaddr = maxlpgeaddr; 3932 } 3933 goto next; 3934 } else { 3935 ASSERT(anon_pages(amp->ahp, aindx, 3936 maxpages) == 0); 3937 SEGVN_VMSTAT_FLTVNPAGES(7); 3938 anon_array_exit(&an_cookie); 3939 ANON_LOCK_EXIT(&->a_rwlock); 3940 } 3941 } 3942 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz)); 3943 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz)); 3944 3945 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 3946 ASSERT(vpage != NULL); 3947 prot = VPP_PROT(vpage); 3948 ASSERT(sameprot(seg, a, maxpgsz)); 3949 if ((prot & protchk) == 0) { 3950 SEGVN_VMSTAT_FLTVNPAGES(8); 3951 err = FC_PROT; 3952 goto out; 3953 } 3954 } 3955 if (type == F_SOFTLOCK) { 3956 atomic_add_long((ulong_t *)&svd->softlockcnt, 3957 pages); 3958 } 3959 3960 pplist = NULL; 3961 physcontig = 0; 3962 ppa[0] = NULL; 3963 if (!brkcow && !tron && szc && 3964 !page_exists_physcontig(vp, off, szc, 3965 segtype == MAP_PRIVATE ? ppa : NULL)) { 3966 SEGVN_VMSTAT_FLTVNPAGES(9); 3967 if (page_alloc_pages(vp, seg, a, &pplist, NULL, 3968 szc, 0, 0) && type != F_SOFTLOCK) { 3969 SEGVN_VMSTAT_FLTVNPAGES(10); 3970 pszc = 0; 3971 ierr = -1; 3972 alloc_failed |= (1 << szc); 3973 break; 3974 } 3975 if (pplist != NULL && 3976 vp->v_mpssdata == SEGVN_PAGEIO) { 3977 int downsize; 3978 SEGVN_VMSTAT_FLTVNPAGES(11); 3979 physcontig = segvn_fill_vp_pages(svd, 3980 vp, off, szc, ppa, &pplist, 3981 &pszc, &downsize); 3982 ASSERT(!physcontig || pplist == NULL); 3983 if (!physcontig && downsize && 3984 type != F_SOFTLOCK) { 3985 ASSERT(pplist == NULL); 3986 SEGVN_VMSTAT_FLTVNPAGES(12); 3987 ierr = -1; 3988 break; 3989 } 3990 ASSERT(!physcontig || 3991 segtype == MAP_PRIVATE || 3992 ppa[0] == NULL); 3993 if (physcontig && ppa[0] == NULL) { 3994 physcontig = 0; 3995 } 3996 } 3997 } else if (!brkcow && !tron && szc && ppa[0] != NULL) { 3998 SEGVN_VMSTAT_FLTVNPAGES(13); 3999 ASSERT(segtype == MAP_PRIVATE); 4000 physcontig = 1; 4001 } 4002 4003 if (!physcontig) { 4004 SEGVN_VMSTAT_FLTVNPAGES(14); 4005 ppa[0] = NULL; 4006 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz, 4007 &vpprot, ppa, pgsz, seg, a, arw, 4008 svd->cred, NULL); 4009 #ifdef DEBUG 4010 if (ierr == 0) { 4011 for (i = 0; i < pages; i++) { 4012 ASSERT(PAGE_LOCKED(ppa[i])); 4013 ASSERT(!PP_ISFREE(ppa[i])); 4014 ASSERT(ppa[i]->p_vnode == vp); 4015 ASSERT(ppa[i]->p_offset == 4016 off + (i << PAGESHIFT)); 4017 } 4018 } 4019 #endif /* DEBUG */ 4020 if (segtype == MAP_PRIVATE) { 4021 SEGVN_VMSTAT_FLTVNPAGES(15); 4022 vpprot &= ~PROT_WRITE; 4023 } 4024 } else { 4025 ASSERT(segtype == MAP_PRIVATE); 4026 SEGVN_VMSTAT_FLTVNPAGES(16); 4027 vpprot = PROT_ALL & ~PROT_WRITE; 4028 ierr = 0; 4029 } 4030 4031 if (ierr != 0) { 4032 SEGVN_VMSTAT_FLTVNPAGES(17); 4033 if (pplist != NULL) { 4034 SEGVN_VMSTAT_FLTVNPAGES(18); 4035 page_free_replacement_page(pplist); 4036 page_create_putback(pages); 4037 } 4038 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4039 if (a + pgsz <= eaddr) { 4040 SEGVN_VMSTAT_FLTVNPAGES(19); 4041 err = FC_MAKE_ERR(ierr); 4042 goto out; 4043 } 4044 va.va_mask = AT_SIZE; 4045 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) { 4046 SEGVN_VMSTAT_FLTVNPAGES(20); 4047 err = FC_MAKE_ERR(EIO); 4048 goto out; 4049 } 4050 if (btopr(va.va_size) >= btopr(off + pgsz)) { 4051 SEGVN_VMSTAT_FLTVNPAGES(21); 4052 err = FC_MAKE_ERR(ierr); 4053 goto out; 4054 } 4055 if (btopr(va.va_size) < 4056 btopr(off + (eaddr - a))) { 4057 SEGVN_VMSTAT_FLTVNPAGES(22); 4058 err = FC_MAKE_ERR(ierr); 4059 goto out; 4060 } 4061 if (brkcow || tron || type == F_SOFTLOCK) { 4062 /* can't reduce map area */ 4063 SEGVN_VMSTAT_FLTVNPAGES(23); 4064 vop_size_err = 1; 4065 goto out; 4066 } 4067 SEGVN_VMSTAT_FLTVNPAGES(24); 4068 ASSERT(szc != 0); 4069 pszc = 0; 4070 ierr = -1; 4071 break; 4072 } 4073 4074 if (amp != NULL) { 4075 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4076 anon_array_enter(amp, aindx, &an_cookie); 4077 } 4078 if (amp != NULL && 4079 anon_get_ptr(amp->ahp, aindx) != NULL) { 4080 ulong_t taindx = P2ALIGN(aindx, maxpages); 4081 4082 SEGVN_VMSTAT_FLTVNPAGES(25); 4083 ASSERT(anon_pages(amp->ahp, taindx, 4084 maxpages) == maxpages); 4085 for (i = 0; i < pages; i++) { 4086 page_unlock(ppa[i]); 4087 } 4088 anon_array_exit(&an_cookie); 4089 ANON_LOCK_EXIT(&->a_rwlock); 4090 if (pplist != NULL) { 4091 page_free_replacement_page(pplist); 4092 page_create_putback(pages); 4093 } 4094 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4095 if (szc < seg->s_szc) { 4096 SEGVN_VMSTAT_FLTVNPAGES(26); 4097 /* 4098 * For private segments SOFTLOCK 4099 * either always breaks cow (any rw 4100 * type except S_READ_NOCOW) or 4101 * address space is locked as writer 4102 * (S_READ_NOCOW case) and anon slots 4103 * can't show up on second check. 4104 * Therefore if we are here for 4105 * SOFTLOCK case it must be a cow 4106 * break but cow break never reduces 4107 * szc. text replication (tron) in 4108 * this case works as cow break. 4109 * Thus the assert below. 4110 */ 4111 ASSERT(!brkcow && !tron && 4112 type != F_SOFTLOCK); 4113 pszc = seg->s_szc; 4114 ierr = -2; 4115 break; 4116 } 4117 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4118 goto again; 4119 } 4120 #ifdef DEBUG 4121 if (amp != NULL) { 4122 ulong_t taindx = P2ALIGN(aindx, maxpages); 4123 ASSERT(!anon_pages(amp->ahp, taindx, maxpages)); 4124 } 4125 #endif /* DEBUG */ 4126 4127 if (brkcow || tron) { 4128 ASSERT(amp != NULL); 4129 ASSERT(pplist == NULL); 4130 ASSERT(szc == seg->s_szc); 4131 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4132 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 4133 SEGVN_VMSTAT_FLTVNPAGES(27); 4134 ierr = anon_map_privatepages(amp, aindx, szc, 4135 seg, a, prot, ppa, vpage, segvn_anypgsz, 4136 tron ? PG_LOCAL : 0, svd->cred); 4137 if (ierr != 0) { 4138 SEGVN_VMSTAT_FLTVNPAGES(28); 4139 anon_array_exit(&an_cookie); 4140 ANON_LOCK_EXIT(&->a_rwlock); 4141 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4142 err = FC_MAKE_ERR(ierr); 4143 goto out; 4144 } 4145 4146 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4147 /* 4148 * p_szc can't be changed for locked 4149 * swapfs pages. 4150 */ 4151 ASSERT(svd->rcookie == 4152 HAT_INVALID_REGION_COOKIE); 4153 hat_memload_array(hat, a, pgsz, ppa, prot, 4154 hat_flag); 4155 4156 if (!(hat_flag & HAT_LOAD_LOCK)) { 4157 SEGVN_VMSTAT_FLTVNPAGES(29); 4158 for (i = 0; i < pages; i++) { 4159 page_unlock(ppa[i]); 4160 } 4161 } 4162 anon_array_exit(&an_cookie); 4163 ANON_LOCK_EXIT(&->a_rwlock); 4164 goto next; 4165 } 4166 4167 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 4168 (!svd->pageprot && svd->prot == (prot & vpprot))); 4169 4170 pfn = page_pptonum(ppa[0]); 4171 /* 4172 * hat_page_demote() needs an SE_EXCL lock on one of 4173 * constituent page_t's and it decreases root's p_szc 4174 * last. This means if root's p_szc is equal szc and 4175 * all its constituent pages are locked 4176 * hat_page_demote() that could have changed p_szc to 4177 * szc is already done and no new have page_demote() 4178 * can start for this large page. 4179 */ 4180 4181 /* 4182 * we need to make sure same mapping size is used for 4183 * the same address range if there's a possibility the 4184 * adddress is already mapped because hat layer panics 4185 * when translation is loaded for the range already 4186 * mapped with a different page size. We achieve it 4187 * by always using largest page size possible subject 4188 * to the constraints of page size, segment page size 4189 * and page alignment. Since mappings are invalidated 4190 * when those constraints change and make it 4191 * impossible to use previously used mapping size no 4192 * mapping size conflicts should happen. 4193 */ 4194 4195 chkszc: 4196 if ((pszc = ppa[0]->p_szc) == szc && 4197 IS_P2ALIGNED(pfn, pages)) { 4198 4199 SEGVN_VMSTAT_FLTVNPAGES(30); 4200 #ifdef DEBUG 4201 for (i = 0; i < pages; i++) { 4202 ASSERT(PAGE_LOCKED(ppa[i])); 4203 ASSERT(!PP_ISFREE(ppa[i])); 4204 ASSERT(page_pptonum(ppa[i]) == 4205 pfn + i); 4206 ASSERT(ppa[i]->p_szc == szc); 4207 ASSERT(ppa[i]->p_vnode == vp); 4208 ASSERT(ppa[i]->p_offset == 4209 off + (i << PAGESHIFT)); 4210 } 4211 #endif /* DEBUG */ 4212 /* 4213 * All pages are of szc we need and they are 4214 * all locked so they can't change szc. load 4215 * translations. 4216 * 4217 * if page got promoted since last check 4218 * we don't need pplist. 4219 */ 4220 if (pplist != NULL) { 4221 page_free_replacement_page(pplist); 4222 page_create_putback(pages); 4223 } 4224 if (PP_ISMIGRATE(ppa[0])) { 4225 page_migrate(seg, a, ppa, pages); 4226 } 4227 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4228 prot, vpprot); 4229 if (!xhat) { 4230 hat_memload_array_region(hat, a, pgsz, 4231 ppa, prot & vpprot, hat_flag, 4232 svd->rcookie); 4233 } else { 4234 /* 4235 * avoid large xhat mappings to FS 4236 * pages so that hat_page_demote() 4237 * doesn't need to check for xhat 4238 * large mappings. 4239 * Don't use regions with xhats. 4240 */ 4241 for (i = 0; i < pages; i++) { 4242 hat_memload(hat, 4243 a + (i << PAGESHIFT), 4244 ppa[i], prot & vpprot, 4245 hat_flag); 4246 } 4247 } 4248 4249 if (!(hat_flag & HAT_LOAD_LOCK)) { 4250 for (i = 0; i < pages; i++) { 4251 page_unlock(ppa[i]); 4252 } 4253 } 4254 if (amp != NULL) { 4255 anon_array_exit(&an_cookie); 4256 ANON_LOCK_EXIT(&->a_rwlock); 4257 } 4258 goto next; 4259 } 4260 4261 /* 4262 * See if upsize is possible. 4263 */ 4264 if (pszc > szc && szc < seg->s_szc && 4265 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) { 4266 pgcnt_t aphase; 4267 uint_t pszc1 = MIN(pszc, seg->s_szc); 4268 ppgsz = page_get_pagesize(pszc1); 4269 ppages = btop(ppgsz); 4270 aphase = btop(P2PHASE((uintptr_t)a, ppgsz)); 4271 4272 ASSERT(type != F_SOFTLOCK); 4273 4274 SEGVN_VMSTAT_FLTVNPAGES(31); 4275 if (aphase != P2PHASE(pfn, ppages)) { 4276 segvn_faultvnmpss_align_err4++; 4277 } else { 4278 SEGVN_VMSTAT_FLTVNPAGES(32); 4279 if (pplist != NULL) { 4280 page_t *pl = pplist; 4281 page_free_replacement_page(pl); 4282 page_create_putback(pages); 4283 } 4284 for (i = 0; i < pages; i++) { 4285 page_unlock(ppa[i]); 4286 } 4287 if (amp != NULL) { 4288 anon_array_exit(&an_cookie); 4289 ANON_LOCK_EXIT(&->a_rwlock); 4290 } 4291 pszc = pszc1; 4292 ierr = -2; 4293 break; 4294 } 4295 } 4296 4297 /* 4298 * check if we should use smallest mapping size. 4299 */ 4300 upgrdfail = 0; 4301 if (szc == 0 || xhat || 4302 (pszc >= szc && 4303 !IS_P2ALIGNED(pfn, pages)) || 4304 (pszc < szc && 4305 !segvn_full_szcpages(ppa, szc, &upgrdfail, 4306 &pszc))) { 4307 4308 if (upgrdfail && type != F_SOFTLOCK) { 4309 /* 4310 * segvn_full_szcpages failed to lock 4311 * all pages EXCL. Size down. 4312 */ 4313 ASSERT(pszc < szc); 4314 4315 SEGVN_VMSTAT_FLTVNPAGES(33); 4316 4317 if (pplist != NULL) { 4318 page_t *pl = pplist; 4319 page_free_replacement_page(pl); 4320 page_create_putback(pages); 4321 } 4322 4323 for (i = 0; i < pages; i++) { 4324 page_unlock(ppa[i]); 4325 } 4326 if (amp != NULL) { 4327 anon_array_exit(&an_cookie); 4328 ANON_LOCK_EXIT(&->a_rwlock); 4329 } 4330 ierr = -1; 4331 break; 4332 } 4333 if (szc != 0 && !xhat && !upgrdfail) { 4334 segvn_faultvnmpss_align_err5++; 4335 } 4336 SEGVN_VMSTAT_FLTVNPAGES(34); 4337 if (pplist != NULL) { 4338 page_free_replacement_page(pplist); 4339 page_create_putback(pages); 4340 } 4341 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4342 prot, vpprot); 4343 if (upgrdfail && segvn_anypgsz_vnode) { 4344 /* SOFTLOCK case */ 4345 hat_memload_array_region(hat, a, pgsz, 4346 ppa, prot & vpprot, hat_flag, 4347 svd->rcookie); 4348 } else { 4349 for (i = 0; i < pages; i++) { 4350 hat_memload_region(hat, 4351 a + (i << PAGESHIFT), 4352 ppa[i], prot & vpprot, 4353 hat_flag, svd->rcookie); 4354 } 4355 } 4356 if (!(hat_flag & HAT_LOAD_LOCK)) { 4357 for (i = 0; i < pages; i++) { 4358 page_unlock(ppa[i]); 4359 } 4360 } 4361 if (amp != NULL) { 4362 anon_array_exit(&an_cookie); 4363 ANON_LOCK_EXIT(&->a_rwlock); 4364 } 4365 goto next; 4366 } 4367 4368 if (pszc == szc) { 4369 /* 4370 * segvn_full_szcpages() upgraded pages szc. 4371 */ 4372 ASSERT(pszc == ppa[0]->p_szc); 4373 ASSERT(IS_P2ALIGNED(pfn, pages)); 4374 goto chkszc; 4375 } 4376 4377 if (pszc > szc) { 4378 kmutex_t *szcmtx; 4379 SEGVN_VMSTAT_FLTVNPAGES(35); 4380 /* 4381 * p_szc of ppa[0] can change since we haven't 4382 * locked all constituent pages. Call 4383 * page_lock_szc() to prevent szc changes. 4384 * This should be a rare case that happens when 4385 * multiple segments use a different page size 4386 * to map the same file offsets. 4387 */ 4388 szcmtx = page_szc_lock(ppa[0]); 4389 pszc = ppa[0]->p_szc; 4390 ASSERT(szcmtx != NULL || pszc == 0); 4391 ASSERT(ppa[0]->p_szc <= pszc); 4392 if (pszc <= szc) { 4393 SEGVN_VMSTAT_FLTVNPAGES(36); 4394 if (szcmtx != NULL) { 4395 mutex_exit(szcmtx); 4396 } 4397 goto chkszc; 4398 } 4399 if (pplist != NULL) { 4400 /* 4401 * page got promoted since last check. 4402 * we don't need preaalocated large 4403 * page. 4404 */ 4405 SEGVN_VMSTAT_FLTVNPAGES(37); 4406 page_free_replacement_page(pplist); 4407 page_create_putback(pages); 4408 } 4409 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4410 prot, vpprot); 4411 hat_memload_array_region(hat, a, pgsz, ppa, 4412 prot & vpprot, hat_flag, svd->rcookie); 4413 mutex_exit(szcmtx); 4414 if (!(hat_flag & HAT_LOAD_LOCK)) { 4415 for (i = 0; i < pages; i++) { 4416 page_unlock(ppa[i]); 4417 } 4418 } 4419 if (amp != NULL) { 4420 anon_array_exit(&an_cookie); 4421 ANON_LOCK_EXIT(&->a_rwlock); 4422 } 4423 goto next; 4424 } 4425 4426 /* 4427 * if page got demoted since last check 4428 * we could have not allocated larger page. 4429 * allocate now. 4430 */ 4431 if (pplist == NULL && 4432 page_alloc_pages(vp, seg, a, &pplist, NULL, 4433 szc, 0, 0) && type != F_SOFTLOCK) { 4434 SEGVN_VMSTAT_FLTVNPAGES(38); 4435 for (i = 0; i < pages; i++) { 4436 page_unlock(ppa[i]); 4437 } 4438 if (amp != NULL) { 4439 anon_array_exit(&an_cookie); 4440 ANON_LOCK_EXIT(&->a_rwlock); 4441 } 4442 ierr = -1; 4443 alloc_failed |= (1 << szc); 4444 break; 4445 } 4446 4447 SEGVN_VMSTAT_FLTVNPAGES(39); 4448 4449 if (pplist != NULL) { 4450 segvn_relocate_pages(ppa, pplist); 4451 #ifdef DEBUG 4452 } else { 4453 ASSERT(type == F_SOFTLOCK); 4454 SEGVN_VMSTAT_FLTVNPAGES(40); 4455 #endif /* DEBUG */ 4456 } 4457 4458 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot); 4459 4460 if (pplist == NULL && segvn_anypgsz_vnode == 0) { 4461 ASSERT(type == F_SOFTLOCK); 4462 for (i = 0; i < pages; i++) { 4463 ASSERT(ppa[i]->p_szc < szc); 4464 hat_memload_region(hat, 4465 a + (i << PAGESHIFT), 4466 ppa[i], prot & vpprot, hat_flag, 4467 svd->rcookie); 4468 } 4469 } else { 4470 ASSERT(pplist != NULL || type == F_SOFTLOCK); 4471 hat_memload_array_region(hat, a, pgsz, ppa, 4472 prot & vpprot, hat_flag, svd->rcookie); 4473 } 4474 if (!(hat_flag & HAT_LOAD_LOCK)) { 4475 for (i = 0; i < pages; i++) { 4476 ASSERT(PAGE_SHARED(ppa[i])); 4477 page_unlock(ppa[i]); 4478 } 4479 } 4480 if (amp != NULL) { 4481 anon_array_exit(&an_cookie); 4482 ANON_LOCK_EXIT(&->a_rwlock); 4483 } 4484 4485 next: 4486 if (vpage != NULL) { 4487 vpage += pages; 4488 } 4489 adjszc_chk = 1; 4490 } 4491 if (a == lpgeaddr) 4492 break; 4493 ASSERT(a < lpgeaddr); 4494 4495 ASSERT(!brkcow && !tron && type != F_SOFTLOCK); 4496 4497 /* 4498 * ierr == -1 means we failed to map with a large page. 4499 * (either due to allocation/relocation failures or 4500 * misalignment with other mappings to this file. 4501 * 4502 * ierr == -2 means some other thread allocated a large page 4503 * after we gave up tp map with a large page. retry with 4504 * larger mapping. 4505 */ 4506 ASSERT(ierr == -1 || ierr == -2); 4507 ASSERT(ierr == -2 || szc != 0); 4508 ASSERT(ierr == -1 || szc < seg->s_szc); 4509 if (ierr == -2) { 4510 SEGVN_VMSTAT_FLTVNPAGES(41); 4511 ASSERT(pszc > szc && pszc <= seg->s_szc); 4512 szc = pszc; 4513 } else if (segvn_anypgsz_vnode) { 4514 SEGVN_VMSTAT_FLTVNPAGES(42); 4515 szc--; 4516 } else { 4517 SEGVN_VMSTAT_FLTVNPAGES(43); 4518 ASSERT(pszc < szc); 4519 /* 4520 * other process created pszc large page. 4521 * but we still have to drop to 0 szc. 4522 */ 4523 szc = 0; 4524 } 4525 4526 pgsz = page_get_pagesize(szc); 4527 pages = btop(pgsz); 4528 if (ierr == -2) { 4529 /* 4530 * Size up case. Note lpgaddr may only be needed for 4531 * softlock case so we don't adjust it here. 4532 */ 4533 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4534 ASSERT(a >= lpgaddr); 4535 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4536 off = svd->offset + (uintptr_t)(a - seg->s_base); 4537 aindx = svd->anon_index + seg_page(seg, a); 4538 vpage = (svd->vpage != NULL) ? 4539 &svd->vpage[seg_page(seg, a)] : NULL; 4540 } else { 4541 /* 4542 * Size down case. Note lpgaddr may only be needed for 4543 * softlock case so we don't adjust it here. 4544 */ 4545 ASSERT(IS_P2ALIGNED(a, pgsz)); 4546 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4547 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4548 ASSERT(a < lpgeaddr); 4549 if (a < addr) { 4550 SEGVN_VMSTAT_FLTVNPAGES(44); 4551 /* 4552 * The beginning of the large page region can 4553 * be pulled to the right to make a smaller 4554 * region. We haven't yet faulted a single 4555 * page. 4556 */ 4557 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4558 ASSERT(a >= lpgaddr); 4559 off = svd->offset + 4560 (uintptr_t)(a - seg->s_base); 4561 aindx = svd->anon_index + seg_page(seg, a); 4562 vpage = (svd->vpage != NULL) ? 4563 &svd->vpage[seg_page(seg, a)] : NULL; 4564 } 4565 } 4566 } 4567 out: 4568 kmem_free(ppa, ppasize); 4569 if (!err && !vop_size_err) { 4570 SEGVN_VMSTAT_FLTVNPAGES(45); 4571 return (0); 4572 } 4573 if (type == F_SOFTLOCK && a > lpgaddr) { 4574 SEGVN_VMSTAT_FLTVNPAGES(46); 4575 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4576 } 4577 if (!vop_size_err) { 4578 SEGVN_VMSTAT_FLTVNPAGES(47); 4579 return (err); 4580 } 4581 ASSERT(brkcow || tron || type == F_SOFTLOCK); 4582 /* 4583 * Large page end is mapped beyond the end of file and it's a cow 4584 * fault (can be a text replication induced cow) or softlock so we can't 4585 * reduce the map area. For now just demote the segment. This should 4586 * really only happen if the end of the file changed after the mapping 4587 * was established since when large page segments are created we make 4588 * sure they don't extend beyond the end of the file. 4589 */ 4590 SEGVN_VMSTAT_FLTVNPAGES(48); 4591 4592 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4593 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4594 err = 0; 4595 if (seg->s_szc != 0) { 4596 segvn_fltvnpages_clrszc_cnt++; 4597 ASSERT(svd->softlockcnt == 0); 4598 err = segvn_clrszc(seg); 4599 if (err != 0) { 4600 segvn_fltvnpages_clrszc_err++; 4601 } 4602 } 4603 ASSERT(err || seg->s_szc == 0); 4604 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock); 4605 /* segvn_fault will do its job as if szc had been zero to begin with */ 4606 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err)); 4607 } 4608 4609 /* 4610 * This routine will attempt to fault in one large page. 4611 * it will use smaller pages if that fails. 4612 * It should only be called for pure anonymous segments. 4613 */ 4614 static faultcode_t 4615 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 4616 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 4617 caddr_t eaddr, int brkcow) 4618 { 4619 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4620 struct anon_map *amp = svd->amp; 4621 uchar_t segtype = svd->type; 4622 uint_t szc = seg->s_szc; 4623 size_t pgsz = page_get_pagesize(szc); 4624 size_t maxpgsz = pgsz; 4625 pgcnt_t pages = btop(pgsz); 4626 uint_t ppaszc = szc; 4627 caddr_t a = lpgaddr; 4628 ulong_t aindx = svd->anon_index + seg_page(seg, a); 4629 struct vpage *vpage = (svd->vpage != NULL) ? 4630 &svd->vpage[seg_page(seg, a)] : NULL; 4631 page_t **ppa; 4632 uint_t ppa_szc; 4633 faultcode_t err; 4634 int ierr; 4635 uint_t protchk, prot, vpprot; 4636 ulong_t i; 4637 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 4638 anon_sync_obj_t cookie; 4639 int adjszc_chk; 4640 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0; 4641 4642 ASSERT(szc != 0); 4643 ASSERT(amp != NULL); 4644 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 4645 ASSERT(!(svd->flags & MAP_NORESERVE)); 4646 ASSERT(type != F_SOFTUNLOCK); 4647 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4648 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF); 4649 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4650 4651 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 4652 4653 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]); 4654 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]); 4655 4656 if (svd->flags & MAP_TEXT) { 4657 hat_flag |= HAT_LOAD_TEXT; 4658 } 4659 4660 if (svd->pageprot) { 4661 switch (rw) { 4662 case S_READ: 4663 protchk = PROT_READ; 4664 break; 4665 case S_WRITE: 4666 protchk = PROT_WRITE; 4667 break; 4668 case S_EXEC: 4669 protchk = PROT_EXEC; 4670 break; 4671 case S_OTHER: 4672 default: 4673 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 4674 break; 4675 } 4676 VM_STAT_ADD(segvnvmstats.fltanpages[2]); 4677 } else { 4678 prot = svd->prot; 4679 /* caller has already done segment level protection check. */ 4680 } 4681 4682 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP); 4683 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4684 for (;;) { 4685 adjszc_chk = 0; 4686 for (; a < lpgeaddr; a += pgsz, aindx += pages) { 4687 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 4688 VM_STAT_ADD(segvnvmstats.fltanpages[3]); 4689 ASSERT(vpage != NULL); 4690 prot = VPP_PROT(vpage); 4691 ASSERT(sameprot(seg, a, maxpgsz)); 4692 if ((prot & protchk) == 0) { 4693 err = FC_PROT; 4694 goto error; 4695 } 4696 } 4697 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) && 4698 pgsz < maxpgsz) { 4699 ASSERT(a > lpgaddr); 4700 szc = seg->s_szc; 4701 pgsz = maxpgsz; 4702 pages = btop(pgsz); 4703 ASSERT(IS_P2ALIGNED(aindx, pages)); 4704 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, 4705 pgsz); 4706 } 4707 if (type == F_SOFTLOCK) { 4708 atomic_add_long((ulong_t *)&svd->softlockcnt, 4709 pages); 4710 } 4711 anon_array_enter(amp, aindx, &cookie); 4712 ppa_szc = (uint_t)-1; 4713 ierr = anon_map_getpages(amp, aindx, szc, seg, a, 4714 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow, 4715 segvn_anypgsz, pgflags, svd->cred); 4716 if (ierr != 0) { 4717 anon_array_exit(&cookie); 4718 VM_STAT_ADD(segvnvmstats.fltanpages[4]); 4719 if (type == F_SOFTLOCK) { 4720 atomic_add_long( 4721 (ulong_t *)&svd->softlockcnt, 4722 -pages); 4723 } 4724 if (ierr > 0) { 4725 VM_STAT_ADD(segvnvmstats.fltanpages[6]); 4726 err = FC_MAKE_ERR(ierr); 4727 goto error; 4728 } 4729 break; 4730 } 4731 4732 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4733 4734 ASSERT(segtype == MAP_SHARED || 4735 ppa[0]->p_szc <= szc); 4736 ASSERT(segtype == MAP_PRIVATE || 4737 ppa[0]->p_szc >= szc); 4738 4739 /* 4740 * Handle pages that have been marked for migration 4741 */ 4742 if (lgrp_optimizations()) 4743 page_migrate(seg, a, ppa, pages); 4744 4745 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 4746 4747 if (segtype == MAP_SHARED) { 4748 vpprot |= PROT_WRITE; 4749 } 4750 4751 hat_memload_array(hat, a, pgsz, ppa, 4752 prot & vpprot, hat_flag); 4753 4754 if (hat_flag & HAT_LOAD_LOCK) { 4755 VM_STAT_ADD(segvnvmstats.fltanpages[7]); 4756 } else { 4757 VM_STAT_ADD(segvnvmstats.fltanpages[8]); 4758 for (i = 0; i < pages; i++) 4759 page_unlock(ppa[i]); 4760 } 4761 if (vpage != NULL) 4762 vpage += pages; 4763 4764 anon_array_exit(&cookie); 4765 adjszc_chk = 1; 4766 } 4767 if (a == lpgeaddr) 4768 break; 4769 ASSERT(a < lpgeaddr); 4770 /* 4771 * ierr == -1 means we failed to allocate a large page. 4772 * so do a size down operation. 4773 * 4774 * ierr == -2 means some other process that privately shares 4775 * pages with this process has allocated a larger page and we 4776 * need to retry with larger pages. So do a size up 4777 * operation. This relies on the fact that large pages are 4778 * never partially shared i.e. if we share any constituent 4779 * page of a large page with another process we must share the 4780 * entire large page. Note this cannot happen for SOFTLOCK 4781 * case, unless current address (a) is at the beginning of the 4782 * next page size boundary because the other process couldn't 4783 * have relocated locked pages. 4784 */ 4785 ASSERT(ierr == -1 || ierr == -2); 4786 4787 if (segvn_anypgsz) { 4788 ASSERT(ierr == -2 || szc != 0); 4789 ASSERT(ierr == -1 || szc < seg->s_szc); 4790 szc = (ierr == -1) ? szc - 1 : szc + 1; 4791 } else { 4792 /* 4793 * For non COW faults and segvn_anypgsz == 0 4794 * we need to be careful not to loop forever 4795 * if existing page is found with szc other 4796 * than 0 or seg->s_szc. This could be due 4797 * to page relocations on behalf of DR or 4798 * more likely large page creation. For this 4799 * case simply re-size to existing page's szc 4800 * if returned by anon_map_getpages(). 4801 */ 4802 if (ppa_szc == (uint_t)-1) { 4803 szc = (ierr == -1) ? 0 : seg->s_szc; 4804 } else { 4805 ASSERT(ppa_szc <= seg->s_szc); 4806 ASSERT(ierr == -2 || ppa_szc < szc); 4807 ASSERT(ierr == -1 || ppa_szc > szc); 4808 szc = ppa_szc; 4809 } 4810 } 4811 4812 pgsz = page_get_pagesize(szc); 4813 pages = btop(pgsz); 4814 ASSERT(type != F_SOFTLOCK || ierr == -1 || 4815 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz))); 4816 if (type == F_SOFTLOCK) { 4817 /* 4818 * For softlocks we cannot reduce the fault area 4819 * (calculated based on the largest page size for this 4820 * segment) for size down and a is already next 4821 * page size aligned as assertted above for size 4822 * ups. Therefore just continue in case of softlock. 4823 */ 4824 VM_STAT_ADD(segvnvmstats.fltanpages[9]); 4825 continue; /* keep lint happy */ 4826 } else if (ierr == -2) { 4827 4828 /* 4829 * Size up case. Note lpgaddr may only be needed for 4830 * softlock case so we don't adjust it here. 4831 */ 4832 VM_STAT_ADD(segvnvmstats.fltanpages[10]); 4833 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4834 ASSERT(a >= lpgaddr); 4835 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4836 aindx = svd->anon_index + seg_page(seg, a); 4837 vpage = (svd->vpage != NULL) ? 4838 &svd->vpage[seg_page(seg, a)] : NULL; 4839 } else { 4840 /* 4841 * Size down case. Note lpgaddr may only be needed for 4842 * softlock case so we don't adjust it here. 4843 */ 4844 VM_STAT_ADD(segvnvmstats.fltanpages[11]); 4845 ASSERT(IS_P2ALIGNED(a, pgsz)); 4846 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4847 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4848 ASSERT(a < lpgeaddr); 4849 if (a < addr) { 4850 /* 4851 * The beginning of the large page region can 4852 * be pulled to the right to make a smaller 4853 * region. We haven't yet faulted a single 4854 * page. 4855 */ 4856 VM_STAT_ADD(segvnvmstats.fltanpages[12]); 4857 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4858 ASSERT(a >= lpgaddr); 4859 aindx = svd->anon_index + seg_page(seg, a); 4860 vpage = (svd->vpage != NULL) ? 4861 &svd->vpage[seg_page(seg, a)] : NULL; 4862 } 4863 } 4864 } 4865 VM_STAT_ADD(segvnvmstats.fltanpages[13]); 4866 ANON_LOCK_EXIT(&->a_rwlock); 4867 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); 4868 return (0); 4869 error: 4870 VM_STAT_ADD(segvnvmstats.fltanpages[14]); 4871 ANON_LOCK_EXIT(&->a_rwlock); 4872 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); 4873 if (type == F_SOFTLOCK && a > lpgaddr) { 4874 VM_STAT_ADD(segvnvmstats.fltanpages[15]); 4875 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4876 } 4877 return (err); 4878 } 4879 4880 int fltadvice = 1; /* set to free behind pages for sequential access */ 4881 4882 /* 4883 * This routine is called via a machine specific fault handling routine. 4884 * It is also called by software routines wishing to lock or unlock 4885 * a range of addresses. 4886 * 4887 * Here is the basic algorithm: 4888 * If unlocking 4889 * Call segvn_softunlock 4890 * Return 4891 * endif 4892 * Checking and set up work 4893 * If we will need some non-anonymous pages 4894 * Call VOP_GETPAGE over the range of non-anonymous pages 4895 * endif 4896 * Loop over all addresses requested 4897 * Call segvn_faultpage passing in page list 4898 * to load up translations and handle anonymous pages 4899 * endloop 4900 * Load up translation to any additional pages in page list not 4901 * already handled that fit into this segment 4902 */ 4903 static faultcode_t 4904 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len, 4905 enum fault_type type, enum seg_rw rw) 4906 { 4907 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4908 page_t **plp, **ppp, *pp; 4909 u_offset_t off; 4910 caddr_t a; 4911 struct vpage *vpage; 4912 uint_t vpprot, prot; 4913 int err; 4914 page_t *pl[PVN_GETPAGE_NUM + 1]; 4915 size_t plsz, pl_alloc_sz; 4916 size_t page; 4917 ulong_t anon_index; 4918 struct anon_map *amp; 4919 int dogetpage = 0; 4920 caddr_t lpgaddr, lpgeaddr; 4921 size_t pgsz; 4922 anon_sync_obj_t cookie; 4923 int brkcow = BREAK_COW_SHARE(rw, type, svd->type); 4924 4925 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 4926 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE); 4927 4928 /* 4929 * First handle the easy stuff 4930 */ 4931 if (type == F_SOFTUNLOCK) { 4932 if (rw == S_READ_NOCOW) { 4933 rw = S_READ; 4934 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 4935 } 4936 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 4937 pgsz = (seg->s_szc == 0) ? PAGESIZE : 4938 page_get_pagesize(seg->s_szc); 4939 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]); 4940 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 4941 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw); 4942 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4943 return (0); 4944 } 4945 4946 ASSERT(svd->tr_state == SEGVN_TR_OFF || 4947 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 4948 if (brkcow == 0) { 4949 if (svd->tr_state == SEGVN_TR_INIT) { 4950 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4951 if (svd->tr_state == SEGVN_TR_INIT) { 4952 ASSERT(svd->vp != NULL && svd->amp == NULL); 4953 ASSERT(svd->flags & MAP_TEXT); 4954 ASSERT(svd->type == MAP_PRIVATE); 4955 segvn_textrepl(seg); 4956 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4957 ASSERT(svd->tr_state != SEGVN_TR_ON || 4958 svd->amp != NULL); 4959 } 4960 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4961 } 4962 } else if (svd->tr_state != SEGVN_TR_OFF) { 4963 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4964 4965 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) { 4966 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 4967 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4968 return (FC_PROT); 4969 } 4970 4971 if (svd->tr_state == SEGVN_TR_ON) { 4972 ASSERT(svd->vp != NULL && svd->amp != NULL); 4973 segvn_textunrepl(seg, 0); 4974 ASSERT(svd->amp == NULL && 4975 svd->tr_state == SEGVN_TR_OFF); 4976 } else if (svd->tr_state != SEGVN_TR_OFF) { 4977 svd->tr_state = SEGVN_TR_OFF; 4978 } 4979 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 4980 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4981 } 4982 4983 top: 4984 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 4985 4986 /* 4987 * If we have the same protections for the entire segment, 4988 * insure that the access being attempted is legitimate. 4989 */ 4990 4991 if (svd->pageprot == 0) { 4992 uint_t protchk; 4993 4994 switch (rw) { 4995 case S_READ: 4996 case S_READ_NOCOW: 4997 protchk = PROT_READ; 4998 break; 4999 case S_WRITE: 5000 protchk = PROT_WRITE; 5001 break; 5002 case S_EXEC: 5003 protchk = PROT_EXEC; 5004 break; 5005 case S_OTHER: 5006 default: 5007 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 5008 break; 5009 } 5010 5011 if ((svd->prot & protchk) == 0) { 5012 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5013 return (FC_PROT); /* illegal access type */ 5014 } 5015 } 5016 5017 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5018 /* this must be SOFTLOCK S_READ fault */ 5019 ASSERT(svd->amp == NULL); 5020 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5021 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5022 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5023 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5024 /* 5025 * this must be the first ever non S_READ_NOCOW 5026 * softlock for this segment. 5027 */ 5028 ASSERT(svd->softlockcnt == 0); 5029 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5030 HAT_REGION_TEXT); 5031 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5032 } 5033 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5034 goto top; 5035 } 5036 5037 /* 5038 * We can't allow the long term use of softlocks for vmpss segments, 5039 * because in some file truncation cases we should be able to demote 5040 * the segment, which requires that there are no softlocks. The 5041 * only case where it's ok to allow a SOFTLOCK fault against a vmpss 5042 * segment is S_READ_NOCOW, where the caller holds the address space 5043 * locked as writer and calls softunlock before dropping the as lock. 5044 * S_READ_NOCOW is used by /proc to read memory from another user. 5045 * 5046 * Another deadlock between SOFTLOCK and file truncation can happen 5047 * because segvn_fault_vnodepages() calls the FS one pagesize at 5048 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages() 5049 * can cause a deadlock because the first set of page_t's remain 5050 * locked SE_SHARED. To avoid this, we demote segments on a first 5051 * SOFTLOCK if they have a length greater than the segment's 5052 * page size. 5053 * 5054 * So for now, we only avoid demoting a segment on a SOFTLOCK when 5055 * the access type is S_READ_NOCOW and the fault length is less than 5056 * or equal to the segment's page size. While this is quite restrictive, 5057 * it should be the most common case of SOFTLOCK against a vmpss 5058 * segment. 5059 * 5060 * For S_READ_NOCOW, it's safe not to do a copy on write because the 5061 * caller makes sure no COW will be caused by another thread for a 5062 * softlocked page. 5063 */ 5064 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) { 5065 int demote = 0; 5066 5067 if (rw != S_READ_NOCOW) { 5068 demote = 1; 5069 } 5070 if (!demote && len > PAGESIZE) { 5071 pgsz = page_get_pagesize(seg->s_szc); 5072 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, 5073 lpgeaddr); 5074 if (lpgeaddr - lpgaddr > pgsz) { 5075 demote = 1; 5076 } 5077 } 5078 5079 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5080 5081 if (demote) { 5082 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5083 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5084 if (seg->s_szc != 0) { 5085 segvn_vmpss_clrszc_cnt++; 5086 ASSERT(svd->softlockcnt == 0); 5087 err = segvn_clrszc(seg); 5088 if (err) { 5089 segvn_vmpss_clrszc_err++; 5090 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5091 return (FC_MAKE_ERR(err)); 5092 } 5093 } 5094 ASSERT(seg->s_szc == 0); 5095 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5096 goto top; 5097 } 5098 } 5099 5100 /* 5101 * Check to see if we need to allocate an anon_map structure. 5102 */ 5103 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) { 5104 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5105 /* 5106 * Drop the "read" lock on the segment and acquire 5107 * the "write" version since we have to allocate the 5108 * anon_map. 5109 */ 5110 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5111 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5112 5113 if (svd->amp == NULL) { 5114 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 5115 svd->amp->a_szc = seg->s_szc; 5116 } 5117 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5118 5119 /* 5120 * Start all over again since segment protections 5121 * may have changed after we dropped the "read" lock. 5122 */ 5123 goto top; 5124 } 5125 5126 /* 5127 * S_READ_NOCOW vs S_READ distinction was 5128 * only needed for the code above. After 5129 * that we treat it as S_READ. 5130 */ 5131 if (rw == S_READ_NOCOW) { 5132 ASSERT(type == F_SOFTLOCK); 5133 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5134 rw = S_READ; 5135 } 5136 5137 amp = svd->amp; 5138 5139 /* 5140 * MADV_SEQUENTIAL work is ignored for large page segments. 5141 */ 5142 if (seg->s_szc != 0) { 5143 pgsz = page_get_pagesize(seg->s_szc); 5144 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 5145 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 5146 if (svd->vp == NULL) { 5147 err = segvn_fault_anonpages(hat, seg, lpgaddr, 5148 lpgeaddr, type, rw, addr, addr + len, brkcow); 5149 } else { 5150 err = segvn_fault_vnodepages(hat, seg, lpgaddr, 5151 lpgeaddr, type, rw, addr, addr + len, brkcow); 5152 if (err == IE_RETRY) { 5153 ASSERT(seg->s_szc == 0); 5154 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 5155 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5156 goto top; 5157 } 5158 } 5159 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5160 return (err); 5161 } 5162 5163 page = seg_page(seg, addr); 5164 if (amp != NULL) { 5165 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5166 anon_index = svd->anon_index + page; 5167 5168 if (type == F_PROT && rw == S_READ && 5169 svd->tr_state == SEGVN_TR_OFF && 5170 svd->type == MAP_PRIVATE && svd->pageprot == 0) { 5171 size_t index = anon_index; 5172 struct anon *ap; 5173 5174 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5175 /* 5176 * The fast path could apply to S_WRITE also, except 5177 * that the protection fault could be caused by lazy 5178 * tlb flush when ro->rw. In this case, the pte is 5179 * RW already. But RO in the other cpu's tlb causes 5180 * the fault. Since hat_chgprot won't do anything if 5181 * pte doesn't change, we may end up faulting 5182 * indefinitely until the RO tlb entry gets replaced. 5183 */ 5184 for (a = addr; a < addr + len; a += PAGESIZE, index++) { 5185 anon_array_enter(amp, index, &cookie); 5186 ap = anon_get_ptr(amp->ahp, index); 5187 anon_array_exit(&cookie); 5188 if ((ap == NULL) || (ap->an_refcnt != 1)) { 5189 ANON_LOCK_EXIT(&->a_rwlock); 5190 goto slow; 5191 } 5192 } 5193 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot); 5194 ANON_LOCK_EXIT(&->a_rwlock); 5195 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5196 return (0); 5197 } 5198 } 5199 slow: 5200 5201 if (svd->vpage == NULL) 5202 vpage = NULL; 5203 else 5204 vpage = &svd->vpage[page]; 5205 5206 off = svd->offset + (uintptr_t)(addr - seg->s_base); 5207 5208 /* 5209 * If MADV_SEQUENTIAL has been set for the particular page we 5210 * are faulting on, free behind all pages in the segment and put 5211 * them on the free list. 5212 */ 5213 5214 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) { 5215 struct vpage *vpp; 5216 ulong_t fanon_index; 5217 size_t fpage; 5218 u_offset_t pgoff, fpgoff; 5219 struct vnode *fvp; 5220 struct anon *fap = NULL; 5221 5222 if (svd->advice == MADV_SEQUENTIAL || 5223 (svd->pageadvice && 5224 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) { 5225 pgoff = off - PAGESIZE; 5226 fpage = page - 1; 5227 if (vpage != NULL) 5228 vpp = &svd->vpage[fpage]; 5229 if (amp != NULL) 5230 fanon_index = svd->anon_index + fpage; 5231 5232 while (pgoff > svd->offset) { 5233 if (svd->advice != MADV_SEQUENTIAL && 5234 (!svd->pageadvice || (vpage && 5235 VPP_ADVICE(vpp) != MADV_SEQUENTIAL))) 5236 break; 5237 5238 /* 5239 * If this is an anon page, we must find the 5240 * correct <vp, offset> for it 5241 */ 5242 fap = NULL; 5243 if (amp != NULL) { 5244 ANON_LOCK_ENTER(&->a_rwlock, 5245 RW_READER); 5246 anon_array_enter(amp, fanon_index, 5247 &cookie); 5248 fap = anon_get_ptr(amp->ahp, 5249 fanon_index); 5250 if (fap != NULL) { 5251 swap_xlate(fap, &fvp, &fpgoff); 5252 } else { 5253 fpgoff = pgoff; 5254 fvp = svd->vp; 5255 } 5256 anon_array_exit(&cookie); 5257 ANON_LOCK_EXIT(&->a_rwlock); 5258 } else { 5259 fpgoff = pgoff; 5260 fvp = svd->vp; 5261 } 5262 if (fvp == NULL) 5263 break; /* XXX */ 5264 /* 5265 * Skip pages that are free or have an 5266 * "exclusive" lock. 5267 */ 5268 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED); 5269 if (pp == NULL) 5270 break; 5271 /* 5272 * We don't need the page_struct_lock to test 5273 * as this is only advisory; even if we 5274 * acquire it someone might race in and lock 5275 * the page after we unlock and before the 5276 * PUTPAGE, then VOP_PUTPAGE will do nothing. 5277 */ 5278 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) { 5279 /* 5280 * Hold the vnode before releasing 5281 * the page lock to prevent it from 5282 * being freed and re-used by some 5283 * other thread. 5284 */ 5285 VN_HOLD(fvp); 5286 page_unlock(pp); 5287 /* 5288 * We should build a page list 5289 * to kluster putpages XXX 5290 */ 5291 (void) VOP_PUTPAGE(fvp, 5292 (offset_t)fpgoff, PAGESIZE, 5293 (B_DONTNEED|B_FREE|B_ASYNC), 5294 svd->cred, NULL); 5295 VN_RELE(fvp); 5296 } else { 5297 /* 5298 * XXX - Should the loop terminate if 5299 * the page is `locked'? 5300 */ 5301 page_unlock(pp); 5302 } 5303 --vpp; 5304 --fanon_index; 5305 pgoff -= PAGESIZE; 5306 } 5307 } 5308 } 5309 5310 plp = pl; 5311 *plp = NULL; 5312 pl_alloc_sz = 0; 5313 5314 /* 5315 * See if we need to call VOP_GETPAGE for 5316 * *any* of the range being faulted on. 5317 * We can skip all of this work if there 5318 * was no original vnode. 5319 */ 5320 if (svd->vp != NULL) { 5321 u_offset_t vp_off; 5322 size_t vp_len; 5323 struct anon *ap; 5324 vnode_t *vp; 5325 5326 vp_off = off; 5327 vp_len = len; 5328 5329 if (amp == NULL) 5330 dogetpage = 1; 5331 else { 5332 /* 5333 * Only acquire reader lock to prevent amp->ahp 5334 * from being changed. It's ok to miss pages, 5335 * hence we don't do anon_array_enter 5336 */ 5337 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5338 ap = anon_get_ptr(amp->ahp, anon_index); 5339 5340 if (len <= PAGESIZE) 5341 /* inline non_anon() */ 5342 dogetpage = (ap == NULL); 5343 else 5344 dogetpage = non_anon(amp->ahp, anon_index, 5345 &vp_off, &vp_len); 5346 ANON_LOCK_EXIT(&->a_rwlock); 5347 } 5348 5349 if (dogetpage) { 5350 enum seg_rw arw; 5351 struct as *as = seg->s_as; 5352 5353 if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) { 5354 /* 5355 * Page list won't fit in local array, 5356 * allocate one of the needed size. 5357 */ 5358 pl_alloc_sz = 5359 (btop(len) + 1) * sizeof (page_t *); 5360 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP); 5361 plp[0] = NULL; 5362 plsz = len; 5363 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE || 5364 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER || 5365 (((size_t)(addr + PAGESIZE) < 5366 (size_t)(seg->s_base + seg->s_size)) && 5367 hat_probe(as->a_hat, addr + PAGESIZE))) { 5368 /* 5369 * Ask VOP_GETPAGE to return the exact number 5370 * of pages if 5371 * (a) this is a COW fault, or 5372 * (b) this is a software fault, or 5373 * (c) next page is already mapped. 5374 */ 5375 plsz = len; 5376 } else { 5377 /* 5378 * Ask VOP_GETPAGE to return adjacent pages 5379 * within the segment. 5380 */ 5381 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t) 5382 ((seg->s_base + seg->s_size) - addr)); 5383 ASSERT((addr + plsz) <= 5384 (seg->s_base + seg->s_size)); 5385 } 5386 5387 /* 5388 * Need to get some non-anonymous pages. 5389 * We need to make only one call to GETPAGE to do 5390 * this to prevent certain deadlocking conditions 5391 * when we are doing locking. In this case 5392 * non_anon() should have picked up the smallest 5393 * range which includes all the non-anonymous 5394 * pages in the requested range. We have to 5395 * be careful regarding which rw flag to pass in 5396 * because on a private mapping, the underlying 5397 * object is never allowed to be written. 5398 */ 5399 if (rw == S_WRITE && svd->type == MAP_PRIVATE) { 5400 arw = S_READ; 5401 } else { 5402 arw = rw; 5403 } 5404 vp = svd->vp; 5405 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5406 "segvn_getpage:seg %p addr %p vp %p", 5407 seg, addr, vp); 5408 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len, 5409 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw, 5410 svd->cred, NULL); 5411 if (err) { 5412 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5413 segvn_pagelist_rele(plp); 5414 if (pl_alloc_sz) 5415 kmem_free(plp, pl_alloc_sz); 5416 return (FC_MAKE_ERR(err)); 5417 } 5418 if (svd->type == MAP_PRIVATE) 5419 vpprot &= ~PROT_WRITE; 5420 } 5421 } 5422 5423 /* 5424 * N.B. at this time the plp array has all the needed non-anon 5425 * pages in addition to (possibly) having some adjacent pages. 5426 */ 5427 5428 /* 5429 * Always acquire the anon_array_lock to prevent 5430 * 2 threads from allocating separate anon slots for 5431 * the same "addr". 5432 * 5433 * If this is a copy-on-write fault and we don't already 5434 * have the anon_array_lock, acquire it to prevent the 5435 * fault routine from handling multiple copy-on-write faults 5436 * on the same "addr" in the same address space. 5437 * 5438 * Only one thread should deal with the fault since after 5439 * it is handled, the other threads can acquire a translation 5440 * to the newly created private page. This prevents two or 5441 * more threads from creating different private pages for the 5442 * same fault. 5443 * 5444 * We grab "serialization" lock here if this is a MAP_PRIVATE segment 5445 * to prevent deadlock between this thread and another thread 5446 * which has soft-locked this page and wants to acquire serial_lock. 5447 * ( bug 4026339 ) 5448 * 5449 * The fix for bug 4026339 becomes unnecessary when using the 5450 * locking scheme with per amp rwlock and a global set of hash 5451 * lock, anon_array_lock. If we steal a vnode page when low 5452 * on memory and upgrad the page lock through page_rename, 5453 * then the page is PAGE_HANDLED, nothing needs to be done 5454 * for this page after returning from segvn_faultpage. 5455 * 5456 * But really, the page lock should be downgraded after 5457 * the stolen page is page_rename'd. 5458 */ 5459 5460 if (amp != NULL) 5461 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5462 5463 /* 5464 * Ok, now loop over the address range and handle faults 5465 */ 5466 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) { 5467 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot, 5468 type, rw, brkcow); 5469 if (err) { 5470 if (amp != NULL) 5471 ANON_LOCK_EXIT(&->a_rwlock); 5472 if (type == F_SOFTLOCK && a > addr) { 5473 segvn_softunlock(seg, addr, (a - addr), 5474 S_OTHER); 5475 } 5476 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5477 segvn_pagelist_rele(plp); 5478 if (pl_alloc_sz) 5479 kmem_free(plp, pl_alloc_sz); 5480 return (err); 5481 } 5482 if (vpage) { 5483 vpage++; 5484 } else if (svd->vpage) { 5485 page = seg_page(seg, addr); 5486 vpage = &svd->vpage[++page]; 5487 } 5488 } 5489 5490 /* Didn't get pages from the underlying fs so we're done */ 5491 if (!dogetpage) 5492 goto done; 5493 5494 /* 5495 * Now handle any other pages in the list returned. 5496 * If the page can be used, load up the translations now. 5497 * Note that the for loop will only be entered if "plp" 5498 * is pointing to a non-NULL page pointer which means that 5499 * VOP_GETPAGE() was called and vpprot has been initialized. 5500 */ 5501 if (svd->pageprot == 0) 5502 prot = svd->prot & vpprot; 5503 5504 5505 /* 5506 * Large Files: diff should be unsigned value because we started 5507 * supporting > 2GB segment sizes from 2.5.1 and when a 5508 * large file of size > 2GB gets mapped to address space 5509 * the diff value can be > 2GB. 5510 */ 5511 5512 for (ppp = plp; (pp = *ppp) != NULL; ppp++) { 5513 size_t diff; 5514 struct anon *ap; 5515 int anon_index; 5516 anon_sync_obj_t cookie; 5517 int hat_flag = HAT_LOAD_ADV; 5518 5519 if (svd->flags & MAP_TEXT) { 5520 hat_flag |= HAT_LOAD_TEXT; 5521 } 5522 5523 if (pp == PAGE_HANDLED) 5524 continue; 5525 5526 if (svd->tr_state != SEGVN_TR_ON && 5527 pp->p_offset >= svd->offset && 5528 pp->p_offset < svd->offset + seg->s_size) { 5529 5530 diff = pp->p_offset - svd->offset; 5531 5532 /* 5533 * Large Files: Following is the assertion 5534 * validating the above cast. 5535 */ 5536 ASSERT(svd->vp == pp->p_vnode); 5537 5538 page = btop(diff); 5539 if (svd->pageprot) 5540 prot = VPP_PROT(&svd->vpage[page]) & vpprot; 5541 5542 /* 5543 * Prevent other threads in the address space from 5544 * creating private pages (i.e., allocating anon slots) 5545 * while we are in the process of loading translations 5546 * to additional pages returned by the underlying 5547 * object. 5548 */ 5549 if (amp != NULL) { 5550 anon_index = svd->anon_index + page; 5551 anon_array_enter(amp, anon_index, &cookie); 5552 ap = anon_get_ptr(amp->ahp, anon_index); 5553 } 5554 if ((amp == NULL) || (ap == NULL)) { 5555 if (IS_VMODSORT(pp->p_vnode) || 5556 enable_mbit_wa) { 5557 if (rw == S_WRITE) 5558 hat_setmod(pp); 5559 else if (rw != S_OTHER && 5560 !hat_ismod(pp)) 5561 prot &= ~PROT_WRITE; 5562 } 5563 /* 5564 * Skip mapping read ahead pages marked 5565 * for migration, so they will get migrated 5566 * properly on fault 5567 */ 5568 ASSERT(amp == NULL || 5569 svd->rcookie == HAT_INVALID_REGION_COOKIE); 5570 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) { 5571 hat_memload_region(hat, 5572 seg->s_base + diff, 5573 pp, prot, hat_flag, 5574 svd->rcookie); 5575 } 5576 } 5577 if (amp != NULL) 5578 anon_array_exit(&cookie); 5579 } 5580 page_unlock(pp); 5581 } 5582 done: 5583 if (amp != NULL) 5584 ANON_LOCK_EXIT(&->a_rwlock); 5585 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5586 if (pl_alloc_sz) 5587 kmem_free(plp, pl_alloc_sz); 5588 return (0); 5589 } 5590 5591 /* 5592 * This routine is used to start I/O on pages asynchronously. XXX it will 5593 * only create PAGESIZE pages. At fault time they will be relocated into 5594 * larger pages. 5595 */ 5596 static faultcode_t 5597 segvn_faulta(struct seg *seg, caddr_t addr) 5598 { 5599 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5600 int err; 5601 struct anon_map *amp; 5602 vnode_t *vp; 5603 5604 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5605 5606 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 5607 if ((amp = svd->amp) != NULL) { 5608 struct anon *ap; 5609 5610 /* 5611 * Reader lock to prevent amp->ahp from being changed. 5612 * This is advisory, it's ok to miss a page, so 5613 * we don't do anon_array_enter lock. 5614 */ 5615 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5616 if ((ap = anon_get_ptr(amp->ahp, 5617 svd->anon_index + seg_page(seg, addr))) != NULL) { 5618 5619 err = anon_getpage(&ap, NULL, NULL, 5620 0, seg, addr, S_READ, svd->cred); 5621 5622 ANON_LOCK_EXIT(&->a_rwlock); 5623 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5624 if (err) 5625 return (FC_MAKE_ERR(err)); 5626 return (0); 5627 } 5628 ANON_LOCK_EXIT(&->a_rwlock); 5629 } 5630 5631 if (svd->vp == NULL) { 5632 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5633 return (0); /* zfod page - do nothing now */ 5634 } 5635 5636 vp = svd->vp; 5637 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5638 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp); 5639 err = VOP_GETPAGE(vp, 5640 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)), 5641 PAGESIZE, NULL, NULL, 0, seg, addr, 5642 S_OTHER, svd->cred, NULL); 5643 5644 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5645 if (err) 5646 return (FC_MAKE_ERR(err)); 5647 return (0); 5648 } 5649 5650 static int 5651 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 5652 { 5653 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5654 struct vpage *cvp, *svp, *evp; 5655 struct vnode *vp; 5656 size_t pgsz; 5657 pgcnt_t pgcnt; 5658 anon_sync_obj_t cookie; 5659 int unload_done = 0; 5660 5661 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5662 5663 if ((svd->maxprot & prot) != prot) 5664 return (EACCES); /* violated maxprot */ 5665 5666 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5667 5668 /* return if prot is the same */ 5669 if (!svd->pageprot && svd->prot == prot) { 5670 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5671 return (0); 5672 } 5673 5674 /* 5675 * Since we change protections we first have to flush the cache. 5676 * This makes sure all the pagelock calls have to recheck 5677 * protections. 5678 */ 5679 if (svd->softlockcnt > 0) { 5680 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5681 5682 /* 5683 * If this is shared segment non 0 softlockcnt 5684 * means locked pages are still in use. 5685 */ 5686 if (svd->type == MAP_SHARED) { 5687 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5688 return (EAGAIN); 5689 } 5690 5691 /* 5692 * Since we do have the segvn writers lock nobody can fill 5693 * the cache with entries belonging to this seg during 5694 * the purge. The flush either succeeds or we still have 5695 * pending I/Os. 5696 */ 5697 segvn_purge(seg); 5698 if (svd->softlockcnt > 0) { 5699 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5700 return (EAGAIN); 5701 } 5702 } 5703 5704 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5705 ASSERT(svd->amp == NULL); 5706 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5707 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5708 HAT_REGION_TEXT); 5709 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5710 unload_done = 1; 5711 } else if (svd->tr_state == SEGVN_TR_INIT) { 5712 svd->tr_state = SEGVN_TR_OFF; 5713 } else if (svd->tr_state == SEGVN_TR_ON) { 5714 ASSERT(svd->amp != NULL); 5715 segvn_textunrepl(seg, 0); 5716 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 5717 unload_done = 1; 5718 } 5719 5720 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED && 5721 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) { 5722 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 5723 segvn_inval_trcache(svd->vp); 5724 } 5725 if (seg->s_szc != 0) { 5726 int err; 5727 pgsz = page_get_pagesize(seg->s_szc); 5728 pgcnt = pgsz >> PAGESHIFT; 5729 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 5730 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 5731 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5732 ASSERT(seg->s_base != addr || seg->s_size != len); 5733 /* 5734 * If we are holding the as lock as a reader then 5735 * we need to return IE_RETRY and let the as 5736 * layer drop and re-acquire the lock as a writer. 5737 */ 5738 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) 5739 return (IE_RETRY); 5740 VM_STAT_ADD(segvnvmstats.demoterange[1]); 5741 if (svd->type == MAP_PRIVATE || svd->vp != NULL) { 5742 err = segvn_demote_range(seg, addr, len, 5743 SDR_END, 0); 5744 } else { 5745 uint_t szcvec = map_pgszcvec(seg->s_base, 5746 pgsz, (uintptr_t)seg->s_base, 5747 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0); 5748 err = segvn_demote_range(seg, addr, len, 5749 SDR_END, szcvec); 5750 } 5751 if (err == 0) 5752 return (IE_RETRY); 5753 if (err == ENOMEM) 5754 return (IE_NOMEM); 5755 return (err); 5756 } 5757 } 5758 5759 5760 /* 5761 * If it's a private mapping and we're making it writable then we 5762 * may have to reserve the additional swap space now. If we are 5763 * making writable only a part of the segment then we use its vpage 5764 * array to keep a record of the pages for which we have reserved 5765 * swap. In this case we set the pageswap field in the segment's 5766 * segvn structure to record this. 5767 * 5768 * If it's a private mapping to a file (i.e., vp != NULL) and we're 5769 * removing write permission on the entire segment and we haven't 5770 * modified any pages, we can release the swap space. 5771 */ 5772 if (svd->type == MAP_PRIVATE) { 5773 if (prot & PROT_WRITE) { 5774 if (!(svd->flags & MAP_NORESERVE) && 5775 !(svd->swresv && svd->pageswap == 0)) { 5776 size_t sz = 0; 5777 5778 /* 5779 * Start by determining how much swap 5780 * space is required. 5781 */ 5782 if (addr == seg->s_base && 5783 len == seg->s_size && 5784 svd->pageswap == 0) { 5785 /* The whole segment */ 5786 sz = seg->s_size; 5787 } else { 5788 /* 5789 * Make sure that the vpage array 5790 * exists, and make a note of the 5791 * range of elements corresponding 5792 * to len. 5793 */ 5794 segvn_vpage(seg); 5795 if (svd->vpage == NULL) { 5796 SEGVN_LOCK_EXIT(seg->s_as, 5797 &svd->lock); 5798 return (ENOMEM); 5799 } 5800 svp = &svd->vpage[seg_page(seg, addr)]; 5801 evp = &svd->vpage[seg_page(seg, 5802 addr + len)]; 5803 5804 if (svd->pageswap == 0) { 5805 /* 5806 * This is the first time we've 5807 * asked for a part of this 5808 * segment, so we need to 5809 * reserve everything we've 5810 * been asked for. 5811 */ 5812 sz = len; 5813 } else { 5814 /* 5815 * We have to count the number 5816 * of pages required. 5817 */ 5818 for (cvp = svp; cvp < evp; 5819 cvp++) { 5820 if (!VPP_ISSWAPRES(cvp)) 5821 sz++; 5822 } 5823 sz <<= PAGESHIFT; 5824 } 5825 } 5826 5827 /* Try to reserve the necessary swap. */ 5828 if (anon_resv_zone(sz, 5829 seg->s_as->a_proc->p_zone) == 0) { 5830 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5831 return (IE_NOMEM); 5832 } 5833 5834 /* 5835 * Make a note of how much swap space 5836 * we've reserved. 5837 */ 5838 if (svd->pageswap == 0 && sz == seg->s_size) { 5839 svd->swresv = sz; 5840 } else { 5841 ASSERT(svd->vpage != NULL); 5842 svd->swresv += sz; 5843 svd->pageswap = 1; 5844 for (cvp = svp; cvp < evp; cvp++) { 5845 if (!VPP_ISSWAPRES(cvp)) 5846 VPP_SETSWAPRES(cvp); 5847 } 5848 } 5849 } 5850 } else { 5851 /* 5852 * Swap space is released only if this segment 5853 * does not map anonymous memory, since read faults 5854 * on such segments still need an anon slot to read 5855 * in the data. 5856 */ 5857 if (svd->swresv != 0 && svd->vp != NULL && 5858 svd->amp == NULL && addr == seg->s_base && 5859 len == seg->s_size && svd->pageprot == 0) { 5860 ASSERT(svd->pageswap == 0); 5861 anon_unresv_zone(svd->swresv, 5862 seg->s_as->a_proc->p_zone); 5863 svd->swresv = 0; 5864 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 5865 "anon proc:%p %lu %u", seg, 0, 0); 5866 } 5867 } 5868 } 5869 5870 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) { 5871 if (svd->prot == prot) { 5872 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5873 return (0); /* all done */ 5874 } 5875 svd->prot = (uchar_t)prot; 5876 } else if (svd->type == MAP_PRIVATE) { 5877 struct anon *ap = NULL; 5878 page_t *pp; 5879 u_offset_t offset, off; 5880 struct anon_map *amp; 5881 ulong_t anon_idx = 0; 5882 5883 /* 5884 * A vpage structure exists or else the change does not 5885 * involve the entire segment. Establish a vpage structure 5886 * if none is there. Then, for each page in the range, 5887 * adjust its individual permissions. Note that write- 5888 * enabling a MAP_PRIVATE page can affect the claims for 5889 * locked down memory. Overcommitting memory terminates 5890 * the operation. 5891 */ 5892 segvn_vpage(seg); 5893 if (svd->vpage == NULL) { 5894 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5895 return (ENOMEM); 5896 } 5897 svd->pageprot = 1; 5898 if ((amp = svd->amp) != NULL) { 5899 anon_idx = svd->anon_index + seg_page(seg, addr); 5900 ASSERT(seg->s_szc == 0 || 5901 IS_P2ALIGNED(anon_idx, pgcnt)); 5902 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5903 } 5904 5905 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 5906 evp = &svd->vpage[seg_page(seg, addr + len)]; 5907 5908 /* 5909 * See Statement at the beginning of segvn_lockop regarding 5910 * the way cowcnts and lckcnts are handled. 5911 */ 5912 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 5913 5914 if (seg->s_szc != 0) { 5915 if (amp != NULL) { 5916 anon_array_enter(amp, anon_idx, 5917 &cookie); 5918 } 5919 if (IS_P2ALIGNED(anon_idx, pgcnt) && 5920 !segvn_claim_pages(seg, svp, offset, 5921 anon_idx, prot)) { 5922 if (amp != NULL) { 5923 anon_array_exit(&cookie); 5924 } 5925 break; 5926 } 5927 if (amp != NULL) { 5928 anon_array_exit(&cookie); 5929 } 5930 anon_idx++; 5931 } else { 5932 if (amp != NULL) { 5933 anon_array_enter(amp, anon_idx, 5934 &cookie); 5935 ap = anon_get_ptr(amp->ahp, anon_idx++); 5936 } 5937 5938 if (VPP_ISPPLOCK(svp) && 5939 VPP_PROT(svp) != prot) { 5940 5941 if (amp == NULL || ap == NULL) { 5942 vp = svd->vp; 5943 off = offset; 5944 } else 5945 swap_xlate(ap, &vp, &off); 5946 if (amp != NULL) 5947 anon_array_exit(&cookie); 5948 5949 if ((pp = page_lookup(vp, off, 5950 SE_SHARED)) == NULL) { 5951 panic("segvn_setprot: no page"); 5952 /*NOTREACHED*/ 5953 } 5954 ASSERT(seg->s_szc == 0); 5955 if ((VPP_PROT(svp) ^ prot) & 5956 PROT_WRITE) { 5957 if (prot & PROT_WRITE) { 5958 if (!page_addclaim( 5959 pp)) { 5960 page_unlock(pp); 5961 break; 5962 } 5963 } else { 5964 if (!page_subclaim( 5965 pp)) { 5966 page_unlock(pp); 5967 break; 5968 } 5969 } 5970 } 5971 page_unlock(pp); 5972 } else if (amp != NULL) 5973 anon_array_exit(&cookie); 5974 } 5975 VPP_SETPROT(svp, prot); 5976 offset += PAGESIZE; 5977 } 5978 if (amp != NULL) 5979 ANON_LOCK_EXIT(&->a_rwlock); 5980 5981 /* 5982 * Did we terminate prematurely? If so, simply unload 5983 * the translations to the things we've updated so far. 5984 */ 5985 if (svp != evp) { 5986 if (unload_done) { 5987 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5988 return (IE_NOMEM); 5989 } 5990 len = (svp - &svd->vpage[seg_page(seg, addr)]) * 5991 PAGESIZE; 5992 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz)); 5993 if (len != 0) 5994 hat_unload(seg->s_as->a_hat, addr, 5995 len, HAT_UNLOAD); 5996 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5997 return (IE_NOMEM); 5998 } 5999 } else { 6000 segvn_vpage(seg); 6001 if (svd->vpage == NULL) { 6002 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6003 return (ENOMEM); 6004 } 6005 svd->pageprot = 1; 6006 evp = &svd->vpage[seg_page(seg, addr + len)]; 6007 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 6008 VPP_SETPROT(svp, prot); 6009 } 6010 } 6011 6012 if (unload_done) { 6013 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6014 return (0); 6015 } 6016 6017 if (((prot & PROT_WRITE) != 0 && 6018 (svd->vp != NULL || svd->type == MAP_PRIVATE)) || 6019 (prot & ~PROT_USER) == PROT_NONE) { 6020 /* 6021 * Either private or shared data with write access (in 6022 * which case we need to throw out all former translations 6023 * so that we get the right translations set up on fault 6024 * and we don't allow write access to any copy-on-write pages 6025 * that might be around or to prevent write access to pages 6026 * representing holes in a file), or we don't have permission 6027 * to access the memory at all (in which case we have to 6028 * unload any current translations that might exist). 6029 */ 6030 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 6031 } else { 6032 /* 6033 * A shared mapping or a private mapping in which write 6034 * protection is going to be denied - just change all the 6035 * protections over the range of addresses in question. 6036 * segvn does not support any other attributes other 6037 * than prot so we can use hat_chgattr. 6038 */ 6039 hat_chgattr(seg->s_as->a_hat, addr, len, prot); 6040 } 6041 6042 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6043 6044 return (0); 6045 } 6046 6047 /* 6048 * segvn_setpagesize is called via SEGOP_SETPAGESIZE from as_setpagesize, 6049 * to determine if the seg is capable of mapping the requested szc. 6050 */ 6051 static int 6052 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 6053 { 6054 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6055 struct segvn_data *nsvd; 6056 struct anon_map *amp = svd->amp; 6057 struct seg *nseg; 6058 caddr_t eaddr = addr + len, a; 6059 size_t pgsz = page_get_pagesize(szc); 6060 pgcnt_t pgcnt = page_get_pagecnt(szc); 6061 int err; 6062 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base); 6063 6064 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6065 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6066 6067 if (seg->s_szc == szc || segvn_lpg_disable != 0) { 6068 return (0); 6069 } 6070 6071 /* 6072 * addr should always be pgsz aligned but eaddr may be misaligned if 6073 * it's at the end of the segment. 6074 * 6075 * XXX we should assert this condition since as_setpagesize() logic 6076 * guarantees it. 6077 */ 6078 if (!IS_P2ALIGNED(addr, pgsz) || 6079 (!IS_P2ALIGNED(eaddr, pgsz) && 6080 eaddr != seg->s_base + seg->s_size)) { 6081 6082 segvn_setpgsz_align_err++; 6083 return (EINVAL); 6084 } 6085 6086 if (amp != NULL && svd->type == MAP_SHARED) { 6087 ulong_t an_idx = svd->anon_index + seg_page(seg, addr); 6088 if (!IS_P2ALIGNED(an_idx, pgcnt)) { 6089 6090 segvn_setpgsz_anon_align_err++; 6091 return (EINVAL); 6092 } 6093 } 6094 6095 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas || 6096 szc > segvn_maxpgszc) { 6097 return (EINVAL); 6098 } 6099 6100 /* paranoid check */ 6101 if (svd->vp != NULL && 6102 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) { 6103 return (EINVAL); 6104 } 6105 6106 if (seg->s_szc == 0 && svd->vp != NULL && 6107 map_addr_vacalign_check(addr, off)) { 6108 return (EINVAL); 6109 } 6110 6111 /* 6112 * Check that protections are the same within new page 6113 * size boundaries. 6114 */ 6115 if (svd->pageprot) { 6116 for (a = addr; a < eaddr; a += pgsz) { 6117 if ((a + pgsz) > eaddr) { 6118 if (!sameprot(seg, a, eaddr - a)) { 6119 return (EINVAL); 6120 } 6121 } else { 6122 if (!sameprot(seg, a, pgsz)) { 6123 return (EINVAL); 6124 } 6125 } 6126 } 6127 } 6128 6129 /* 6130 * Since we are changing page size we first have to flush 6131 * the cache. This makes sure all the pagelock calls have 6132 * to recheck protections. 6133 */ 6134 if (svd->softlockcnt > 0) { 6135 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6136 6137 /* 6138 * If this is shared segment non 0 softlockcnt 6139 * means locked pages are still in use. 6140 */ 6141 if (svd->type == MAP_SHARED) { 6142 return (EAGAIN); 6143 } 6144 6145 /* 6146 * Since we do have the segvn writers lock nobody can fill 6147 * the cache with entries belonging to this seg during 6148 * the purge. The flush either succeeds or we still have 6149 * pending I/Os. 6150 */ 6151 segvn_purge(seg); 6152 if (svd->softlockcnt > 0) { 6153 return (EAGAIN); 6154 } 6155 } 6156 6157 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6158 ASSERT(svd->amp == NULL); 6159 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6160 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6161 HAT_REGION_TEXT); 6162 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6163 } else if (svd->tr_state == SEGVN_TR_INIT) { 6164 svd->tr_state = SEGVN_TR_OFF; 6165 } else if (svd->tr_state == SEGVN_TR_ON) { 6166 ASSERT(svd->amp != NULL); 6167 segvn_textunrepl(seg, 1); 6168 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6169 amp = NULL; 6170 } 6171 6172 /* 6173 * Operation for sub range of existing segment. 6174 */ 6175 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) { 6176 if (szc < seg->s_szc) { 6177 VM_STAT_ADD(segvnvmstats.demoterange[2]); 6178 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0); 6179 if (err == 0) { 6180 return (IE_RETRY); 6181 } 6182 if (err == ENOMEM) { 6183 return (IE_NOMEM); 6184 } 6185 return (err); 6186 } 6187 if (addr != seg->s_base) { 6188 nseg = segvn_split_seg(seg, addr); 6189 if (eaddr != (nseg->s_base + nseg->s_size)) { 6190 /* eaddr is szc aligned */ 6191 (void) segvn_split_seg(nseg, eaddr); 6192 } 6193 return (IE_RETRY); 6194 } 6195 if (eaddr != (seg->s_base + seg->s_size)) { 6196 /* eaddr is szc aligned */ 6197 (void) segvn_split_seg(seg, eaddr); 6198 } 6199 return (IE_RETRY); 6200 } 6201 6202 /* 6203 * Break any low level sharing and reset seg->s_szc to 0. 6204 */ 6205 if ((err = segvn_clrszc(seg)) != 0) { 6206 if (err == ENOMEM) { 6207 err = IE_NOMEM; 6208 } 6209 return (err); 6210 } 6211 ASSERT(seg->s_szc == 0); 6212 6213 /* 6214 * If the end of the current segment is not pgsz aligned 6215 * then attempt to concatenate with the next segment. 6216 */ 6217 if (!IS_P2ALIGNED(eaddr, pgsz)) { 6218 nseg = AS_SEGNEXT(seg->s_as, seg); 6219 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) { 6220 return (ENOMEM); 6221 } 6222 if (nseg->s_ops != &segvn_ops) { 6223 return (EINVAL); 6224 } 6225 nsvd = (struct segvn_data *)nseg->s_data; 6226 if (nsvd->softlockcnt > 0) { 6227 /* 6228 * If this is shared segment non 0 softlockcnt 6229 * means locked pages are still in use. 6230 */ 6231 if (nsvd->type == MAP_SHARED) { 6232 return (EAGAIN); 6233 } 6234 segvn_purge(nseg); 6235 if (nsvd->softlockcnt > 0) { 6236 return (EAGAIN); 6237 } 6238 } 6239 err = segvn_clrszc(nseg); 6240 if (err == ENOMEM) { 6241 err = IE_NOMEM; 6242 } 6243 if (err != 0) { 6244 return (err); 6245 } 6246 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6247 err = segvn_concat(seg, nseg, 1); 6248 if (err == -1) { 6249 return (EINVAL); 6250 } 6251 if (err == -2) { 6252 return (IE_NOMEM); 6253 } 6254 return (IE_RETRY); 6255 } 6256 6257 /* 6258 * May need to re-align anon array to 6259 * new szc. 6260 */ 6261 if (amp != NULL) { 6262 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) { 6263 struct anon_hdr *nahp; 6264 6265 ASSERT(svd->type == MAP_PRIVATE); 6266 6267 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6268 ASSERT(amp->refcnt == 1); 6269 nahp = anon_create(btop(amp->size), ANON_NOSLEEP); 6270 if (nahp == NULL) { 6271 ANON_LOCK_EXIT(&->a_rwlock); 6272 return (IE_NOMEM); 6273 } 6274 if (anon_copy_ptr(amp->ahp, svd->anon_index, 6275 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) { 6276 anon_release(nahp, btop(amp->size)); 6277 ANON_LOCK_EXIT(&->a_rwlock); 6278 return (IE_NOMEM); 6279 } 6280 anon_release(amp->ahp, btop(amp->size)); 6281 amp->ahp = nahp; 6282 svd->anon_index = 0; 6283 ANON_LOCK_EXIT(&->a_rwlock); 6284 } 6285 } 6286 if (svd->vp != NULL && szc != 0) { 6287 struct vattr va; 6288 u_offset_t eoffpage = svd->offset; 6289 va.va_mask = AT_SIZE; 6290 eoffpage += seg->s_size; 6291 eoffpage = btopr(eoffpage); 6292 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) { 6293 segvn_setpgsz_getattr_err++; 6294 return (EINVAL); 6295 } 6296 if (btopr(va.va_size) < eoffpage) { 6297 segvn_setpgsz_eof_err++; 6298 return (EINVAL); 6299 } 6300 if (amp != NULL) { 6301 /* 6302 * anon_fill_cow_holes() may call VOP_GETPAGE(). 6303 * don't take anon map lock here to avoid holding it 6304 * across VOP_GETPAGE() calls that may call back into 6305 * segvn for klsutering checks. We don't really need 6306 * anon map lock here since it's a private segment and 6307 * we hold as level lock as writers. 6308 */ 6309 if ((err = anon_fill_cow_holes(seg, seg->s_base, 6310 amp->ahp, svd->anon_index, svd->vp, svd->offset, 6311 seg->s_size, szc, svd->prot, svd->vpage, 6312 svd->cred)) != 0) { 6313 return (EINVAL); 6314 } 6315 } 6316 segvn_setvnode_mpss(svd->vp); 6317 } 6318 6319 if (amp != NULL) { 6320 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6321 if (svd->type == MAP_PRIVATE) { 6322 amp->a_szc = szc; 6323 } else if (szc > amp->a_szc) { 6324 amp->a_szc = szc; 6325 } 6326 ANON_LOCK_EXIT(&->a_rwlock); 6327 } 6328 6329 seg->s_szc = szc; 6330 6331 return (0); 6332 } 6333 6334 static int 6335 segvn_clrszc(struct seg *seg) 6336 { 6337 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6338 struct anon_map *amp = svd->amp; 6339 size_t pgsz; 6340 pgcnt_t pages; 6341 int err = 0; 6342 caddr_t a = seg->s_base; 6343 caddr_t ea = a + seg->s_size; 6344 ulong_t an_idx = svd->anon_index; 6345 vnode_t *vp = svd->vp; 6346 struct vpage *vpage = svd->vpage; 6347 page_t *anon_pl[1 + 1], *pp; 6348 struct anon *ap, *oldap; 6349 uint_t prot = svd->prot, vpprot; 6350 int pageflag = 0; 6351 6352 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6353 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 6354 ASSERT(svd->softlockcnt == 0); 6355 6356 if (vp == NULL && amp == NULL) { 6357 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6358 seg->s_szc = 0; 6359 return (0); 6360 } 6361 6362 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6363 ASSERT(svd->amp == NULL); 6364 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6365 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6366 HAT_REGION_TEXT); 6367 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6368 } else if (svd->tr_state == SEGVN_TR_ON) { 6369 ASSERT(svd->amp != NULL); 6370 segvn_textunrepl(seg, 1); 6371 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6372 amp = NULL; 6373 } else { 6374 if (svd->tr_state != SEGVN_TR_OFF) { 6375 ASSERT(svd->tr_state == SEGVN_TR_INIT); 6376 svd->tr_state = SEGVN_TR_OFF; 6377 } 6378 6379 /* 6380 * do HAT_UNLOAD_UNMAP since we are changing the pagesize. 6381 * unload argument is 0 when we are freeing the segment 6382 * and unload was already done. 6383 */ 6384 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size, 6385 HAT_UNLOAD_UNMAP); 6386 } 6387 6388 if (amp == NULL || svd->type == MAP_SHARED) { 6389 seg->s_szc = 0; 6390 return (0); 6391 } 6392 6393 pgsz = page_get_pagesize(seg->s_szc); 6394 pages = btop(pgsz); 6395 6396 /* 6397 * XXX anon rwlock is not really needed because this is a 6398 * private segment and we are writers. 6399 */ 6400 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6401 6402 for (; a < ea; a += pgsz, an_idx += pages) { 6403 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) { 6404 ASSERT(vpage != NULL || svd->pageprot == 0); 6405 if (vpage != NULL) { 6406 ASSERT(sameprot(seg, a, pgsz)); 6407 prot = VPP_PROT(vpage); 6408 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0; 6409 } 6410 if (seg->s_szc != 0) { 6411 ASSERT(vp == NULL || anon_pages(amp->ahp, 6412 an_idx, pages) == pages); 6413 if ((err = anon_map_demotepages(amp, an_idx, 6414 seg, a, prot, vpage, svd->cred)) != 0) { 6415 goto out; 6416 } 6417 } else { 6418 if (oldap->an_refcnt == 1) { 6419 continue; 6420 } 6421 if ((err = anon_getpage(&oldap, &vpprot, 6422 anon_pl, PAGESIZE, seg, a, S_READ, 6423 svd->cred))) { 6424 goto out; 6425 } 6426 if ((pp = anon_private(&ap, seg, a, prot, 6427 anon_pl[0], pageflag, svd->cred)) == NULL) { 6428 err = ENOMEM; 6429 goto out; 6430 } 6431 anon_decref(oldap); 6432 (void) anon_set_ptr(amp->ahp, an_idx, ap, 6433 ANON_SLEEP); 6434 page_unlock(pp); 6435 } 6436 } 6437 vpage = (vpage == NULL) ? NULL : vpage + pages; 6438 } 6439 6440 amp->a_szc = 0; 6441 seg->s_szc = 0; 6442 out: 6443 ANON_LOCK_EXIT(&->a_rwlock); 6444 return (err); 6445 } 6446 6447 static int 6448 segvn_claim_pages( 6449 struct seg *seg, 6450 struct vpage *svp, 6451 u_offset_t off, 6452 ulong_t anon_idx, 6453 uint_t prot) 6454 { 6455 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6456 size_t ppasize = (pgcnt + 1) * sizeof (page_t *); 6457 page_t **ppa; 6458 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6459 struct anon_map *amp = svd->amp; 6460 struct vpage *evp = svp + pgcnt; 6461 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT) 6462 + seg->s_base; 6463 struct anon *ap; 6464 struct vnode *vp = svd->vp; 6465 page_t *pp; 6466 pgcnt_t pg_idx, i; 6467 int err = 0; 6468 anoff_t aoff; 6469 int anon = (amp != NULL) ? 1 : 0; 6470 6471 ASSERT(svd->type == MAP_PRIVATE); 6472 ASSERT(svd->vpage != NULL); 6473 ASSERT(seg->s_szc != 0); 6474 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 6475 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt)); 6476 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT)); 6477 6478 if (VPP_PROT(svp) == prot) 6479 return (1); 6480 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE)) 6481 return (1); 6482 6483 ppa = kmem_alloc(ppasize, KM_SLEEP); 6484 if (anon && vp != NULL) { 6485 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) { 6486 anon = 0; 6487 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt)); 6488 } 6489 ASSERT(!anon || 6490 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt); 6491 } 6492 6493 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) { 6494 if (!VPP_ISPPLOCK(svp)) 6495 continue; 6496 if (anon) { 6497 ap = anon_get_ptr(amp->ahp, anon_idx); 6498 if (ap == NULL) { 6499 panic("segvn_claim_pages: no anon slot"); 6500 } 6501 swap_xlate(ap, &vp, &aoff); 6502 off = (u_offset_t)aoff; 6503 } 6504 ASSERT(vp != NULL); 6505 if ((pp = page_lookup(vp, 6506 (u_offset_t)off, SE_SHARED)) == NULL) { 6507 panic("segvn_claim_pages: no page"); 6508 } 6509 ppa[pg_idx++] = pp; 6510 off += PAGESIZE; 6511 } 6512 6513 if (ppa[0] == NULL) { 6514 kmem_free(ppa, ppasize); 6515 return (1); 6516 } 6517 6518 ASSERT(pg_idx <= pgcnt); 6519 ppa[pg_idx] = NULL; 6520 6521 6522 /* Find each large page within ppa, and adjust its claim */ 6523 6524 /* Does ppa cover a single large page? */ 6525 if (ppa[0]->p_szc == seg->s_szc) { 6526 if (prot & PROT_WRITE) 6527 err = page_addclaim_pages(ppa); 6528 else 6529 err = page_subclaim_pages(ppa); 6530 } else { 6531 for (i = 0; ppa[i]; i += pgcnt) { 6532 ASSERT(IS_P2ALIGNED(page_pptonum(ppa[i]), pgcnt)); 6533 if (prot & PROT_WRITE) 6534 err = page_addclaim_pages(&ppa[i]); 6535 else 6536 err = page_subclaim_pages(&ppa[i]); 6537 if (err == 0) 6538 break; 6539 } 6540 } 6541 6542 for (i = 0; i < pg_idx; i++) { 6543 ASSERT(ppa[i] != NULL); 6544 page_unlock(ppa[i]); 6545 } 6546 6547 kmem_free(ppa, ppasize); 6548 return (err); 6549 } 6550 6551 /* 6552 * Returns right (upper address) segment if split occurred. 6553 * If the address is equal to the beginning or end of its segment it returns 6554 * the current segment. 6555 */ 6556 static struct seg * 6557 segvn_split_seg(struct seg *seg, caddr_t addr) 6558 { 6559 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6560 struct seg *nseg; 6561 size_t nsize; 6562 struct segvn_data *nsvd; 6563 6564 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6565 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6566 6567 ASSERT(addr >= seg->s_base); 6568 ASSERT(addr <= seg->s_base + seg->s_size); 6569 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6570 6571 if (addr == seg->s_base || addr == seg->s_base + seg->s_size) 6572 return (seg); 6573 6574 nsize = seg->s_base + seg->s_size - addr; 6575 seg->s_size = addr - seg->s_base; 6576 nseg = seg_alloc(seg->s_as, addr, nsize); 6577 ASSERT(nseg != NULL); 6578 nseg->s_ops = seg->s_ops; 6579 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 6580 nseg->s_data = (void *)nsvd; 6581 nseg->s_szc = seg->s_szc; 6582 *nsvd = *svd; 6583 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6584 nsvd->seg = nseg; 6585 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL); 6586 6587 if (nsvd->vp != NULL) { 6588 VN_HOLD(nsvd->vp); 6589 nsvd->offset = svd->offset + 6590 (uintptr_t)(nseg->s_base - seg->s_base); 6591 if (nsvd->type == MAP_SHARED) 6592 lgrp_shm_policy_init(NULL, nsvd->vp); 6593 } else { 6594 /* 6595 * The offset for an anonymous segment has no signifigance in 6596 * terms of an offset into a file. If we were to use the above 6597 * calculation instead, the structures read out of 6598 * /proc/<pid>/xmap would be more difficult to decipher since 6599 * it would be unclear whether two seemingly contiguous 6600 * prxmap_t structures represented different segments or a 6601 * single segment that had been split up into multiple prxmap_t 6602 * structures (e.g. if some part of the segment had not yet 6603 * been faulted in). 6604 */ 6605 nsvd->offset = 0; 6606 } 6607 6608 ASSERT(svd->softlockcnt == 0); 6609 ASSERT(svd->softlockcnt_sbase == 0); 6610 ASSERT(svd->softlockcnt_send == 0); 6611 crhold(svd->cred); 6612 6613 if (svd->vpage != NULL) { 6614 size_t bytes = vpgtob(seg_pages(seg)); 6615 size_t nbytes = vpgtob(seg_pages(nseg)); 6616 struct vpage *ovpage = svd->vpage; 6617 6618 svd->vpage = kmem_alloc(bytes, KM_SLEEP); 6619 bcopy(ovpage, svd->vpage, bytes); 6620 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 6621 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes); 6622 kmem_free(ovpage, bytes + nbytes); 6623 } 6624 if (svd->amp != NULL && svd->type == MAP_PRIVATE) { 6625 struct anon_map *oamp = svd->amp, *namp; 6626 struct anon_hdr *nahp; 6627 6628 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER); 6629 ASSERT(oamp->refcnt == 1); 6630 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 6631 (void) anon_copy_ptr(oamp->ahp, svd->anon_index, 6632 nahp, 0, btop(seg->s_size), ANON_SLEEP); 6633 6634 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 6635 namp->a_szc = nseg->s_szc; 6636 (void) anon_copy_ptr(oamp->ahp, 6637 svd->anon_index + btop(seg->s_size), 6638 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 6639 anon_release(oamp->ahp, btop(oamp->size)); 6640 oamp->ahp = nahp; 6641 oamp->size = seg->s_size; 6642 svd->anon_index = 0; 6643 nsvd->amp = namp; 6644 nsvd->anon_index = 0; 6645 ANON_LOCK_EXIT(&oamp->a_rwlock); 6646 } else if (svd->amp != NULL) { 6647 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6648 ASSERT(svd->amp == nsvd->amp); 6649 ASSERT(seg->s_szc <= svd->amp->a_szc); 6650 nsvd->anon_index = svd->anon_index + seg_pages(seg); 6651 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt)); 6652 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER); 6653 svd->amp->refcnt++; 6654 ANON_LOCK_EXIT(&svd->amp->a_rwlock); 6655 } 6656 6657 /* 6658 * Split the amount of swap reserved. 6659 */ 6660 if (svd->swresv) { 6661 /* 6662 * For MAP_NORESERVE, only allocate swap reserve for pages 6663 * being used. Other segments get enough to cover whole 6664 * segment. 6665 */ 6666 if (svd->flags & MAP_NORESERVE) { 6667 size_t oswresv; 6668 6669 ASSERT(svd->amp); 6670 oswresv = svd->swresv; 6671 svd->swresv = ptob(anon_pages(svd->amp->ahp, 6672 svd->anon_index, btop(seg->s_size))); 6673 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 6674 nsvd->anon_index, btop(nseg->s_size))); 6675 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 6676 } else { 6677 if (svd->pageswap) { 6678 svd->swresv = segvn_count_swap_by_vpages(seg); 6679 ASSERT(nsvd->swresv >= svd->swresv); 6680 nsvd->swresv -= svd->swresv; 6681 } else { 6682 ASSERT(svd->swresv == seg->s_size + 6683 nseg->s_size); 6684 svd->swresv = seg->s_size; 6685 nsvd->swresv = nseg->s_size; 6686 } 6687 } 6688 } 6689 6690 return (nseg); 6691 } 6692 6693 /* 6694 * called on memory operations (unmap, setprot, setpagesize) for a subset 6695 * of a large page segment to either demote the memory range (SDR_RANGE) 6696 * or the ends (SDR_END) by addr/len. 6697 * 6698 * returns 0 on success. returns errno, including ENOMEM, on failure. 6699 */ 6700 static int 6701 segvn_demote_range( 6702 struct seg *seg, 6703 caddr_t addr, 6704 size_t len, 6705 int flag, 6706 uint_t szcvec) 6707 { 6708 caddr_t eaddr = addr + len; 6709 caddr_t lpgaddr, lpgeaddr; 6710 struct seg *nseg; 6711 struct seg *badseg1 = NULL; 6712 struct seg *badseg2 = NULL; 6713 size_t pgsz; 6714 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6715 int err; 6716 uint_t szc = seg->s_szc; 6717 uint_t tszcvec; 6718 6719 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6720 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6721 ASSERT(szc != 0); 6722 pgsz = page_get_pagesize(szc); 6723 ASSERT(seg->s_base != addr || seg->s_size != len); 6724 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6725 ASSERT(svd->softlockcnt == 0); 6726 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6727 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED)); 6728 6729 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 6730 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr); 6731 if (flag == SDR_RANGE) { 6732 /* demote entire range */ 6733 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6734 (void) segvn_split_seg(nseg, lpgeaddr); 6735 ASSERT(badseg1->s_base == lpgaddr); 6736 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr); 6737 } else if (addr != lpgaddr) { 6738 ASSERT(flag == SDR_END); 6739 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6740 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz && 6741 eaddr < lpgaddr + 2 * pgsz) { 6742 (void) segvn_split_seg(nseg, lpgeaddr); 6743 ASSERT(badseg1->s_base == lpgaddr); 6744 ASSERT(badseg1->s_size == 2 * pgsz); 6745 } else { 6746 nseg = segvn_split_seg(nseg, lpgaddr + pgsz); 6747 ASSERT(badseg1->s_base == lpgaddr); 6748 ASSERT(badseg1->s_size == pgsz); 6749 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) { 6750 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz); 6751 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz); 6752 badseg2 = nseg; 6753 (void) segvn_split_seg(nseg, lpgeaddr); 6754 ASSERT(badseg2->s_base == lpgeaddr - pgsz); 6755 ASSERT(badseg2->s_size == pgsz); 6756 } 6757 } 6758 } else { 6759 ASSERT(flag == SDR_END); 6760 ASSERT(eaddr < lpgeaddr); 6761 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz); 6762 (void) segvn_split_seg(nseg, lpgeaddr); 6763 ASSERT(badseg1->s_base == lpgeaddr - pgsz); 6764 ASSERT(badseg1->s_size == pgsz); 6765 } 6766 6767 ASSERT(badseg1 != NULL); 6768 ASSERT(badseg1->s_szc == szc); 6769 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz || 6770 badseg1->s_size == 2 * pgsz); 6771 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz)); 6772 ASSERT(badseg1->s_size == pgsz || 6773 sameprot(badseg1, badseg1->s_base + pgsz, pgsz)); 6774 if (err = segvn_clrszc(badseg1)) { 6775 return (err); 6776 } 6777 ASSERT(badseg1->s_szc == 0); 6778 6779 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6780 uint_t tszc = highbit(tszcvec) - 1; 6781 caddr_t ta = MAX(addr, badseg1->s_base); 6782 caddr_t te; 6783 size_t tpgsz = page_get_pagesize(tszc); 6784 6785 ASSERT(svd->type == MAP_SHARED); 6786 ASSERT(flag == SDR_END); 6787 ASSERT(tszc < szc && tszc > 0); 6788 6789 if (eaddr > badseg1->s_base + badseg1->s_size) { 6790 te = badseg1->s_base + badseg1->s_size; 6791 } else { 6792 te = eaddr; 6793 } 6794 6795 ASSERT(ta <= te); 6796 badseg1->s_szc = tszc; 6797 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) { 6798 if (badseg2 != NULL) { 6799 err = segvn_demote_range(badseg1, ta, te - ta, 6800 SDR_END, tszcvec); 6801 if (err != 0) { 6802 return (err); 6803 } 6804 } else { 6805 return (segvn_demote_range(badseg1, ta, 6806 te - ta, SDR_END, tszcvec)); 6807 } 6808 } 6809 } 6810 6811 if (badseg2 == NULL) 6812 return (0); 6813 ASSERT(badseg2->s_szc == szc); 6814 ASSERT(badseg2->s_size == pgsz); 6815 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size)); 6816 if (err = segvn_clrszc(badseg2)) { 6817 return (err); 6818 } 6819 ASSERT(badseg2->s_szc == 0); 6820 6821 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6822 uint_t tszc = highbit(tszcvec) - 1; 6823 size_t tpgsz = page_get_pagesize(tszc); 6824 6825 ASSERT(svd->type == MAP_SHARED); 6826 ASSERT(flag == SDR_END); 6827 ASSERT(tszc < szc && tszc > 0); 6828 ASSERT(badseg2->s_base > addr); 6829 ASSERT(eaddr > badseg2->s_base); 6830 ASSERT(eaddr < badseg2->s_base + badseg2->s_size); 6831 6832 badseg2->s_szc = tszc; 6833 if (!IS_P2ALIGNED(eaddr, tpgsz)) { 6834 return (segvn_demote_range(badseg2, badseg2->s_base, 6835 eaddr - badseg2->s_base, SDR_END, tszcvec)); 6836 } 6837 } 6838 6839 return (0); 6840 } 6841 6842 static int 6843 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 6844 { 6845 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6846 struct vpage *vp, *evp; 6847 6848 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6849 6850 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6851 /* 6852 * If segment protection can be used, simply check against them. 6853 */ 6854 if (svd->pageprot == 0) { 6855 int err; 6856 6857 err = ((svd->prot & prot) != prot) ? EACCES : 0; 6858 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6859 return (err); 6860 } 6861 6862 /* 6863 * Have to check down to the vpage level. 6864 */ 6865 evp = &svd->vpage[seg_page(seg, addr + len)]; 6866 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) { 6867 if ((VPP_PROT(vp) & prot) != prot) { 6868 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6869 return (EACCES); 6870 } 6871 } 6872 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6873 return (0); 6874 } 6875 6876 static int 6877 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 6878 { 6879 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6880 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1; 6881 6882 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6883 6884 if (pgno != 0) { 6885 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6886 if (svd->pageprot == 0) { 6887 do { 6888 protv[--pgno] = svd->prot; 6889 } while (pgno != 0); 6890 } else { 6891 size_t pgoff = seg_page(seg, addr); 6892 6893 do { 6894 pgno--; 6895 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]); 6896 } while (pgno != 0); 6897 } 6898 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6899 } 6900 return (0); 6901 } 6902 6903 static u_offset_t 6904 segvn_getoffset(struct seg *seg, caddr_t addr) 6905 { 6906 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6907 6908 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6909 6910 return (svd->offset + (uintptr_t)(addr - seg->s_base)); 6911 } 6912 6913 /*ARGSUSED*/ 6914 static int 6915 segvn_gettype(struct seg *seg, caddr_t addr) 6916 { 6917 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6918 6919 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6920 6921 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT | 6922 MAP_INITDATA))); 6923 } 6924 6925 /*ARGSUSED*/ 6926 static int 6927 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 6928 { 6929 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6930 6931 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6932 6933 *vpp = svd->vp; 6934 return (0); 6935 } 6936 6937 /* 6938 * Check to see if it makes sense to do kluster/read ahead to 6939 * addr + delta relative to the mapping at addr. We assume here 6940 * that delta is a signed PAGESIZE'd multiple (which can be negative). 6941 * 6942 * For segvn, we currently "approve" of the action if we are 6943 * still in the segment and it maps from the same vp/off, 6944 * or if the advice stored in segvn_data or vpages allows it. 6945 * Currently, klustering is not allowed only if MADV_RANDOM is set. 6946 */ 6947 static int 6948 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 6949 { 6950 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6951 struct anon *oap, *ap; 6952 ssize_t pd; 6953 size_t page; 6954 struct vnode *vp1, *vp2; 6955 u_offset_t off1, off2; 6956 struct anon_map *amp; 6957 6958 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6959 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6960 SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 6961 6962 if (addr + delta < seg->s_base || 6963 addr + delta >= (seg->s_base + seg->s_size)) 6964 return (-1); /* exceeded segment bounds */ 6965 6966 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */ 6967 page = seg_page(seg, addr); 6968 6969 /* 6970 * Check to see if either of the pages addr or addr + delta 6971 * have advice set that prevents klustering (if MADV_RANDOM advice 6972 * is set for entire segment, or MADV_SEQUENTIAL is set and delta 6973 * is negative). 6974 */ 6975 if (svd->advice == MADV_RANDOM || 6976 svd->advice == MADV_SEQUENTIAL && delta < 0) 6977 return (-1); 6978 else if (svd->pageadvice && svd->vpage) { 6979 struct vpage *bvpp, *evpp; 6980 6981 bvpp = &svd->vpage[page]; 6982 evpp = &svd->vpage[page + pd]; 6983 if (VPP_ADVICE(bvpp) == MADV_RANDOM || 6984 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0) 6985 return (-1); 6986 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) && 6987 VPP_ADVICE(evpp) == MADV_RANDOM) 6988 return (-1); 6989 } 6990 6991 if (svd->type == MAP_SHARED) 6992 return (0); /* shared mapping - all ok */ 6993 6994 if ((amp = svd->amp) == NULL) 6995 return (0); /* off original vnode */ 6996 6997 page += svd->anon_index; 6998 6999 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7000 7001 oap = anon_get_ptr(amp->ahp, page); 7002 ap = anon_get_ptr(amp->ahp, page + pd); 7003 7004 ANON_LOCK_EXIT(&->a_rwlock); 7005 7006 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) { 7007 return (-1); /* one with and one without an anon */ 7008 } 7009 7010 if (oap == NULL) { /* implies that ap == NULL */ 7011 return (0); /* off original vnode */ 7012 } 7013 7014 /* 7015 * Now we know we have two anon pointers - check to 7016 * see if they happen to be properly allocated. 7017 */ 7018 7019 /* 7020 * XXX We cheat here and don't lock the anon slots. We can't because 7021 * we may have been called from the anon layer which might already 7022 * have locked them. We are holding a refcnt on the slots so they 7023 * can't disappear. The worst that will happen is we'll get the wrong 7024 * names (vp, off) for the slots and make a poor klustering decision. 7025 */ 7026 swap_xlate(ap, &vp1, &off1); 7027 swap_xlate(oap, &vp2, &off2); 7028 7029 7030 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta) 7031 return (-1); 7032 return (0); 7033 } 7034 7035 /* 7036 * Swap the pages of seg out to secondary storage, returning the 7037 * number of bytes of storage freed. 7038 * 7039 * The basic idea is first to unload all translations and then to call 7040 * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the 7041 * swap device. Pages to which other segments have mappings will remain 7042 * mapped and won't be swapped. Our caller (as_swapout) has already 7043 * performed the unloading step. 7044 * 7045 * The value returned is intended to correlate well with the process's 7046 * memory requirements. However, there are some caveats: 7047 * 1) When given a shared segment as argument, this routine will 7048 * only succeed in swapping out pages for the last sharer of the 7049 * segment. (Previous callers will only have decremented mapping 7050 * reference counts.) 7051 * 2) We assume that the hat layer maintains a large enough translation 7052 * cache to capture process reference patterns. 7053 */ 7054 static size_t 7055 segvn_swapout(struct seg *seg) 7056 { 7057 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7058 struct anon_map *amp; 7059 pgcnt_t pgcnt = 0; 7060 pgcnt_t npages; 7061 pgcnt_t page; 7062 ulong_t anon_index; 7063 7064 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7065 7066 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7067 /* 7068 * Find pages unmapped by our caller and force them 7069 * out to the virtual swap device. 7070 */ 7071 if ((amp = svd->amp) != NULL) 7072 anon_index = svd->anon_index; 7073 npages = seg->s_size >> PAGESHIFT; 7074 for (page = 0; page < npages; page++) { 7075 page_t *pp; 7076 struct anon *ap; 7077 struct vnode *vp; 7078 u_offset_t off; 7079 anon_sync_obj_t cookie; 7080 7081 /* 7082 * Obtain <vp, off> pair for the page, then look it up. 7083 * 7084 * Note that this code is willing to consider regular 7085 * pages as well as anon pages. Is this appropriate here? 7086 */ 7087 ap = NULL; 7088 if (amp != NULL) { 7089 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7090 if (anon_array_try_enter(amp, anon_index + page, 7091 &cookie)) { 7092 ANON_LOCK_EXIT(&->a_rwlock); 7093 continue; 7094 } 7095 ap = anon_get_ptr(amp->ahp, anon_index + page); 7096 if (ap != NULL) { 7097 swap_xlate(ap, &vp, &off); 7098 } else { 7099 vp = svd->vp; 7100 off = svd->offset + ptob(page); 7101 } 7102 anon_array_exit(&cookie); 7103 ANON_LOCK_EXIT(&->a_rwlock); 7104 } else { 7105 vp = svd->vp; 7106 off = svd->offset + ptob(page); 7107 } 7108 if (vp == NULL) { /* untouched zfod page */ 7109 ASSERT(ap == NULL); 7110 continue; 7111 } 7112 7113 pp = page_lookup_nowait(vp, off, SE_SHARED); 7114 if (pp == NULL) 7115 continue; 7116 7117 7118 /* 7119 * Examine the page to see whether it can be tossed out, 7120 * keeping track of how many we've found. 7121 */ 7122 if (!page_tryupgrade(pp)) { 7123 /* 7124 * If the page has an i/o lock and no mappings, 7125 * it's very likely that the page is being 7126 * written out as a result of klustering. 7127 * Assume this is so and take credit for it here. 7128 */ 7129 if (!page_io_trylock(pp)) { 7130 if (!hat_page_is_mapped(pp)) 7131 pgcnt++; 7132 } else { 7133 page_io_unlock(pp); 7134 } 7135 page_unlock(pp); 7136 continue; 7137 } 7138 ASSERT(!page_iolock_assert(pp)); 7139 7140 7141 /* 7142 * Skip if page is locked or has mappings. 7143 * We don't need the page_struct_lock to look at lckcnt 7144 * and cowcnt because the page is exclusive locked. 7145 */ 7146 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 7147 hat_page_is_mapped(pp)) { 7148 page_unlock(pp); 7149 continue; 7150 } 7151 7152 /* 7153 * dispose skips large pages so try to demote first. 7154 */ 7155 if (pp->p_szc != 0 && !page_try_demote_pages(pp)) { 7156 page_unlock(pp); 7157 /* 7158 * XXX should skip the remaining page_t's of this 7159 * large page. 7160 */ 7161 continue; 7162 } 7163 7164 ASSERT(pp->p_szc == 0); 7165 7166 /* 7167 * No longer mapped -- we can toss it out. How 7168 * we do so depends on whether or not it's dirty. 7169 */ 7170 if (hat_ismod(pp) && pp->p_vnode) { 7171 /* 7172 * We must clean the page before it can be 7173 * freed. Setting B_FREE will cause pvn_done 7174 * to free the page when the i/o completes. 7175 * XXX: This also causes it to be accounted 7176 * as a pageout instead of a swap: need 7177 * B_SWAPOUT bit to use instead of B_FREE. 7178 * 7179 * Hold the vnode before releasing the page lock 7180 * to prevent it from being freed and re-used by 7181 * some other thread. 7182 */ 7183 VN_HOLD(vp); 7184 page_unlock(pp); 7185 7186 /* 7187 * Queue all i/o requests for the pageout thread 7188 * to avoid saturating the pageout devices. 7189 */ 7190 if (!queue_io_request(vp, off)) 7191 VN_RELE(vp); 7192 } else { 7193 /* 7194 * The page was clean, free it. 7195 * 7196 * XXX: Can we ever encounter modified pages 7197 * with no associated vnode here? 7198 */ 7199 ASSERT(pp->p_vnode != NULL); 7200 /*LINTED: constant in conditional context*/ 7201 VN_DISPOSE(pp, B_FREE, 0, kcred); 7202 } 7203 7204 /* 7205 * Credit now even if i/o is in progress. 7206 */ 7207 pgcnt++; 7208 } 7209 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7210 7211 /* 7212 * Wakeup pageout to initiate i/o on all queued requests. 7213 */ 7214 cv_signal_pageout(); 7215 return (ptob(pgcnt)); 7216 } 7217 7218 /* 7219 * Synchronize primary storage cache with real object in virtual memory. 7220 * 7221 * XXX - Anonymous pages should not be sync'ed out at all. 7222 */ 7223 static int 7224 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) 7225 { 7226 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7227 struct vpage *vpp; 7228 page_t *pp; 7229 u_offset_t offset; 7230 struct vnode *vp; 7231 u_offset_t off; 7232 caddr_t eaddr; 7233 int bflags; 7234 int err = 0; 7235 int segtype; 7236 int pageprot; 7237 int prot; 7238 ulong_t anon_index; 7239 struct anon_map *amp; 7240 struct anon *ap; 7241 anon_sync_obj_t cookie; 7242 7243 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7244 7245 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7246 7247 if (svd->softlockcnt > 0) { 7248 /* 7249 * If this is shared segment non 0 softlockcnt 7250 * means locked pages are still in use. 7251 */ 7252 if (svd->type == MAP_SHARED) { 7253 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7254 return (EAGAIN); 7255 } 7256 7257 /* 7258 * flush all pages from seg cache 7259 * otherwise we may deadlock in swap_putpage 7260 * for B_INVAL page (4175402). 7261 * 7262 * Even if we grab segvn WRITER's lock 7263 * here, there might be another thread which could've 7264 * successfully performed lookup/insert just before 7265 * we acquired the lock here. So, grabbing either 7266 * lock here is of not much use. Until we devise 7267 * a strategy at upper layers to solve the 7268 * synchronization issues completely, we expect 7269 * applications to handle this appropriately. 7270 */ 7271 segvn_purge(seg); 7272 if (svd->softlockcnt > 0) { 7273 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7274 return (EAGAIN); 7275 } 7276 } else if (svd->type == MAP_SHARED && svd->amp != NULL && 7277 svd->amp->a_softlockcnt > 0) { 7278 /* 7279 * Try to purge this amp's entries from pcache. It will 7280 * succeed only if other segments that share the amp have no 7281 * outstanding softlock's. 7282 */ 7283 segvn_purge(seg); 7284 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) { 7285 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7286 return (EAGAIN); 7287 } 7288 } 7289 7290 vpp = svd->vpage; 7291 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7292 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) | 7293 ((flags & MS_INVALIDATE) ? B_INVAL : 0); 7294 7295 if (attr) { 7296 pageprot = attr & ~(SHARED|PRIVATE); 7297 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE; 7298 7299 /* 7300 * We are done if the segment types don't match 7301 * or if we have segment level protections and 7302 * they don't match. 7303 */ 7304 if (svd->type != segtype) { 7305 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7306 return (0); 7307 } 7308 if (vpp == NULL) { 7309 if (svd->prot != pageprot) { 7310 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7311 return (0); 7312 } 7313 prot = svd->prot; 7314 } else 7315 vpp = &svd->vpage[seg_page(seg, addr)]; 7316 7317 } else if (svd->vp && svd->amp == NULL && 7318 (flags & MS_INVALIDATE) == 0) { 7319 7320 /* 7321 * No attributes, no anonymous pages and MS_INVALIDATE flag 7322 * is not on, just use one big request. 7323 */ 7324 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len, 7325 bflags, svd->cred, NULL); 7326 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7327 return (err); 7328 } 7329 7330 if ((amp = svd->amp) != NULL) 7331 anon_index = svd->anon_index + seg_page(seg, addr); 7332 7333 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) { 7334 ap = NULL; 7335 if (amp != NULL) { 7336 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7337 anon_array_enter(amp, anon_index, &cookie); 7338 ap = anon_get_ptr(amp->ahp, anon_index++); 7339 if (ap != NULL) { 7340 swap_xlate(ap, &vp, &off); 7341 } else { 7342 vp = svd->vp; 7343 off = offset; 7344 } 7345 anon_array_exit(&cookie); 7346 ANON_LOCK_EXIT(&->a_rwlock); 7347 } else { 7348 vp = svd->vp; 7349 off = offset; 7350 } 7351 offset += PAGESIZE; 7352 7353 if (vp == NULL) /* untouched zfod page */ 7354 continue; 7355 7356 if (attr) { 7357 if (vpp) { 7358 prot = VPP_PROT(vpp); 7359 vpp++; 7360 } 7361 if (prot != pageprot) { 7362 continue; 7363 } 7364 } 7365 7366 /* 7367 * See if any of these pages are locked -- if so, then we 7368 * will have to truncate an invalidate request at the first 7369 * locked one. We don't need the page_struct_lock to test 7370 * as this is only advisory; even if we acquire it someone 7371 * might race in and lock the page after we unlock and before 7372 * we do the PUTPAGE, then PUTPAGE simply does nothing. 7373 */ 7374 if (flags & MS_INVALIDATE) { 7375 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) { 7376 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 7377 page_unlock(pp); 7378 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7379 return (EBUSY); 7380 } 7381 if (ap != NULL && pp->p_szc != 0 && 7382 page_tryupgrade(pp)) { 7383 if (pp->p_lckcnt == 0 && 7384 pp->p_cowcnt == 0) { 7385 /* 7386 * swapfs VN_DISPOSE() won't 7387 * invalidate large pages. 7388 * Attempt to demote. 7389 * XXX can't help it if it 7390 * fails. But for swapfs 7391 * pages it is no big deal. 7392 */ 7393 (void) page_try_demote_pages( 7394 pp); 7395 } 7396 } 7397 page_unlock(pp); 7398 } 7399 } else if (svd->type == MAP_SHARED && amp != NULL) { 7400 /* 7401 * Avoid writing out to disk ISM's large pages 7402 * because segspt_free_pages() relies on NULL an_pvp 7403 * of anon slots of such pages. 7404 */ 7405 7406 ASSERT(svd->vp == NULL); 7407 /* 7408 * swapfs uses page_lookup_nowait if not freeing or 7409 * invalidating and skips a page if 7410 * page_lookup_nowait returns NULL. 7411 */ 7412 pp = page_lookup_nowait(vp, off, SE_SHARED); 7413 if (pp == NULL) { 7414 continue; 7415 } 7416 if (pp->p_szc != 0) { 7417 page_unlock(pp); 7418 continue; 7419 } 7420 7421 /* 7422 * Note ISM pages are created large so (vp, off)'s 7423 * page cannot suddenly become large after we unlock 7424 * pp. 7425 */ 7426 page_unlock(pp); 7427 } 7428 /* 7429 * XXX - Should ultimately try to kluster 7430 * calls to VOP_PUTPAGE() for performance. 7431 */ 7432 VN_HOLD(vp); 7433 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 7434 (bflags | (IS_SWAPFSVP(vp) ? B_PAGE_NOWAIT : 0)), 7435 svd->cred, NULL); 7436 7437 VN_RELE(vp); 7438 if (err) 7439 break; 7440 } 7441 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7442 return (err); 7443 } 7444 7445 /* 7446 * Determine if we have data corresponding to pages in the 7447 * primary storage virtual memory cache (i.e., "in core"). 7448 */ 7449 static size_t 7450 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec) 7451 { 7452 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7453 struct vnode *vp, *avp; 7454 u_offset_t offset, aoffset; 7455 size_t p, ep; 7456 int ret; 7457 struct vpage *vpp; 7458 page_t *pp; 7459 uint_t start; 7460 struct anon_map *amp; /* XXX - for locknest */ 7461 struct anon *ap; 7462 uint_t attr; 7463 anon_sync_obj_t cookie; 7464 7465 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7466 7467 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7468 if (svd->amp == NULL && svd->vp == NULL) { 7469 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7470 bzero(vec, btopr(len)); 7471 return (len); /* no anonymous pages created yet */ 7472 } 7473 7474 p = seg_page(seg, addr); 7475 ep = seg_page(seg, addr + len); 7476 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0; 7477 7478 amp = svd->amp; 7479 for (; p < ep; p++, addr += PAGESIZE) { 7480 vpp = (svd->vpage) ? &svd->vpage[p]: NULL; 7481 ret = start; 7482 ap = NULL; 7483 avp = NULL; 7484 /* Grab the vnode/offset for the anon slot */ 7485 if (amp != NULL) { 7486 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7487 anon_array_enter(amp, svd->anon_index + p, &cookie); 7488 ap = anon_get_ptr(amp->ahp, svd->anon_index + p); 7489 if (ap != NULL) { 7490 swap_xlate(ap, &avp, &aoffset); 7491 } 7492 anon_array_exit(&cookie); 7493 ANON_LOCK_EXIT(&->a_rwlock); 7494 } 7495 if ((avp != NULL) && page_exists(avp, aoffset)) { 7496 /* A page exists for the anon slot */ 7497 ret |= SEG_PAGE_INCORE; 7498 7499 /* 7500 * If page is mapped and writable 7501 */ 7502 attr = (uint_t)0; 7503 if ((hat_getattr(seg->s_as->a_hat, addr, 7504 &attr) != -1) && (attr & PROT_WRITE)) { 7505 ret |= SEG_PAGE_ANON; 7506 } 7507 /* 7508 * Don't get page_struct lock for lckcnt and cowcnt, 7509 * since this is purely advisory. 7510 */ 7511 if ((pp = page_lookup_nowait(avp, aoffset, 7512 SE_SHARED)) != NULL) { 7513 if (pp->p_lckcnt) 7514 ret |= SEG_PAGE_SOFTLOCK; 7515 if (pp->p_cowcnt) 7516 ret |= SEG_PAGE_HASCOW; 7517 page_unlock(pp); 7518 } 7519 } 7520 7521 /* Gather vnode statistics */ 7522 vp = svd->vp; 7523 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7524 7525 if (vp != NULL) { 7526 /* 7527 * Try to obtain a "shared" lock on the page 7528 * without blocking. If this fails, determine 7529 * if the page is in memory. 7530 */ 7531 pp = page_lookup_nowait(vp, offset, SE_SHARED); 7532 if ((pp == NULL) && (page_exists(vp, offset))) { 7533 /* Page is incore, and is named */ 7534 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7535 } 7536 /* 7537 * Don't get page_struct lock for lckcnt and cowcnt, 7538 * since this is purely advisory. 7539 */ 7540 if (pp != NULL) { 7541 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7542 if (pp->p_lckcnt) 7543 ret |= SEG_PAGE_SOFTLOCK; 7544 if (pp->p_cowcnt) 7545 ret |= SEG_PAGE_HASCOW; 7546 page_unlock(pp); 7547 } 7548 } 7549 7550 /* Gather virtual page information */ 7551 if (vpp) { 7552 if (VPP_ISPPLOCK(vpp)) 7553 ret |= SEG_PAGE_LOCKED; 7554 vpp++; 7555 } 7556 7557 *vec++ = (char)ret; 7558 } 7559 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7560 return (len); 7561 } 7562 7563 /* 7564 * Statement for p_cowcnts/p_lckcnts. 7565 * 7566 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region 7567 * irrespective of the following factors or anything else: 7568 * 7569 * (1) anon slots are populated or not 7570 * (2) cow is broken or not 7571 * (3) refcnt on ap is 1 or greater than 1 7572 * 7573 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock 7574 * and munlock. 7575 * 7576 * 7577 * Handling p_cowcnts/p_lckcnts during copy-on-write fault: 7578 * 7579 * if vpage has PROT_WRITE 7580 * transfer cowcnt on the oldpage -> cowcnt on the newpage 7581 * else 7582 * transfer lckcnt on the oldpage -> lckcnt on the newpage 7583 * 7584 * During copy-on-write, decrement p_cowcnt on the oldpage and increment 7585 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE. 7586 * 7587 * We may also break COW if softlocking on read access in the physio case. 7588 * In this case, vpage may not have PROT_WRITE. So, we need to decrement 7589 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the 7590 * vpage doesn't have PROT_WRITE. 7591 * 7592 * 7593 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region: 7594 * 7595 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and 7596 * increment p_lckcnt by calling page_subclaim() which takes care of 7597 * availrmem accounting and p_lckcnt overflow. 7598 * 7599 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and 7600 * increment p_cowcnt by calling page_addclaim() which takes care of 7601 * availrmem availability and p_cowcnt overflow. 7602 */ 7603 7604 /* 7605 * Lock down (or unlock) pages mapped by this segment. 7606 * 7607 * XXX only creates PAGESIZE pages if anon slots are not initialized. 7608 * At fault time they will be relocated into larger pages. 7609 */ 7610 static int 7611 segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 7612 int attr, int op, ulong_t *lockmap, size_t pos) 7613 { 7614 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7615 struct vpage *vpp; 7616 struct vpage *evp; 7617 page_t *pp; 7618 u_offset_t offset; 7619 u_offset_t off; 7620 int segtype; 7621 int pageprot; 7622 int claim; 7623 struct vnode *vp; 7624 ulong_t anon_index; 7625 struct anon_map *amp; 7626 struct anon *ap; 7627 struct vattr va; 7628 anon_sync_obj_t cookie; 7629 struct kshmid *sp = NULL; 7630 struct proc *p = curproc; 7631 kproject_t *proj = NULL; 7632 int chargeproc = 1; 7633 size_t locked_bytes = 0; 7634 size_t unlocked_bytes = 0; 7635 int err = 0; 7636 7637 /* 7638 * Hold write lock on address space because may split or concatenate 7639 * segments 7640 */ 7641 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7642 7643 /* 7644 * If this is a shm, use shm's project and zone, else use 7645 * project and zone of calling process 7646 */ 7647 7648 /* Determine if this segment backs a sysV shm */ 7649 if (svd->amp != NULL && svd->amp->a_sp != NULL) { 7650 ASSERT(svd->type == MAP_SHARED); 7651 ASSERT(svd->tr_state == SEGVN_TR_OFF); 7652 sp = svd->amp->a_sp; 7653 proj = sp->shm_perm.ipc_proj; 7654 chargeproc = 0; 7655 } 7656 7657 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 7658 if (attr) { 7659 pageprot = attr & ~(SHARED|PRIVATE); 7660 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE; 7661 7662 /* 7663 * We are done if the segment types don't match 7664 * or if we have segment level protections and 7665 * they don't match. 7666 */ 7667 if (svd->type != segtype) { 7668 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7669 return (0); 7670 } 7671 if (svd->pageprot == 0 && svd->prot != pageprot) { 7672 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7673 return (0); 7674 } 7675 } 7676 7677 if (op == MC_LOCK) { 7678 if (svd->tr_state == SEGVN_TR_INIT) { 7679 svd->tr_state = SEGVN_TR_OFF; 7680 } else if (svd->tr_state == SEGVN_TR_ON) { 7681 ASSERT(svd->amp != NULL); 7682 segvn_textunrepl(seg, 0); 7683 ASSERT(svd->amp == NULL && 7684 svd->tr_state == SEGVN_TR_OFF); 7685 } 7686 } 7687 7688 /* 7689 * If we're locking, then we must create a vpage structure if 7690 * none exists. If we're unlocking, then check to see if there 7691 * is a vpage -- if not, then we could not have locked anything. 7692 */ 7693 7694 if ((vpp = svd->vpage) == NULL) { 7695 if (op == MC_LOCK) { 7696 segvn_vpage(seg); 7697 if (svd->vpage == NULL) { 7698 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7699 return (ENOMEM); 7700 } 7701 } else { 7702 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7703 return (0); 7704 } 7705 } 7706 7707 /* 7708 * The anonymous data vector (i.e., previously 7709 * unreferenced mapping to swap space) can be allocated 7710 * by lazily testing for its existence. 7711 */ 7712 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) { 7713 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 7714 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 7715 svd->amp->a_szc = seg->s_szc; 7716 } 7717 7718 if ((amp = svd->amp) != NULL) { 7719 anon_index = svd->anon_index + seg_page(seg, addr); 7720 } 7721 7722 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7723 evp = &svd->vpage[seg_page(seg, addr + len)]; 7724 7725 if (sp != NULL) 7726 mutex_enter(&sp->shm_mlock); 7727 7728 /* determine number of unlocked bytes in range for lock operation */ 7729 if (op == MC_LOCK) { 7730 7731 if (sp == NULL) { 7732 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7733 vpp++) { 7734 if (!VPP_ISPPLOCK(vpp)) 7735 unlocked_bytes += PAGESIZE; 7736 } 7737 } else { 7738 ulong_t i_idx, i_edx; 7739 anon_sync_obj_t i_cookie; 7740 struct anon *i_ap; 7741 struct vnode *i_vp; 7742 u_offset_t i_off; 7743 7744 /* Only count sysV pages once for locked memory */ 7745 i_edx = svd->anon_index + seg_page(seg, addr + len); 7746 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7747 for (i_idx = anon_index; i_idx < i_edx; i_idx++) { 7748 anon_array_enter(amp, i_idx, &i_cookie); 7749 i_ap = anon_get_ptr(amp->ahp, i_idx); 7750 if (i_ap == NULL) { 7751 unlocked_bytes += PAGESIZE; 7752 anon_array_exit(&i_cookie); 7753 continue; 7754 } 7755 swap_xlate(i_ap, &i_vp, &i_off); 7756 anon_array_exit(&i_cookie); 7757 pp = page_lookup(i_vp, i_off, SE_SHARED); 7758 if (pp == NULL) { 7759 unlocked_bytes += PAGESIZE; 7760 continue; 7761 } else if (pp->p_lckcnt == 0) 7762 unlocked_bytes += PAGESIZE; 7763 page_unlock(pp); 7764 } 7765 ANON_LOCK_EXIT(&->a_rwlock); 7766 } 7767 7768 mutex_enter(&p->p_lock); 7769 err = rctl_incr_locked_mem(p, proj, unlocked_bytes, 7770 chargeproc); 7771 mutex_exit(&p->p_lock); 7772 7773 if (err) { 7774 if (sp != NULL) 7775 mutex_exit(&sp->shm_mlock); 7776 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7777 return (err); 7778 } 7779 } 7780 /* 7781 * Loop over all pages in the range. Process if we're locking and 7782 * page has not already been locked in this mapping; or if we're 7783 * unlocking and the page has been locked. 7784 */ 7785 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7786 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) { 7787 if ((attr == 0 || VPP_PROT(vpp) == pageprot) && 7788 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) || 7789 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) { 7790 7791 if (amp != NULL) 7792 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7793 /* 7794 * If this isn't a MAP_NORESERVE segment and 7795 * we're locking, allocate anon slots if they 7796 * don't exist. The page is brought in later on. 7797 */ 7798 if (op == MC_LOCK && svd->vp == NULL && 7799 ((svd->flags & MAP_NORESERVE) == 0) && 7800 amp != NULL && 7801 ((ap = anon_get_ptr(amp->ahp, anon_index)) 7802 == NULL)) { 7803 anon_array_enter(amp, anon_index, &cookie); 7804 7805 if ((ap = anon_get_ptr(amp->ahp, 7806 anon_index)) == NULL) { 7807 pp = anon_zero(seg, addr, &ap, 7808 svd->cred); 7809 if (pp == NULL) { 7810 anon_array_exit(&cookie); 7811 ANON_LOCK_EXIT(&->a_rwlock); 7812 err = ENOMEM; 7813 goto out; 7814 } 7815 ASSERT(anon_get_ptr(amp->ahp, 7816 anon_index) == NULL); 7817 (void) anon_set_ptr(amp->ahp, 7818 anon_index, ap, ANON_SLEEP); 7819 page_unlock(pp); 7820 } 7821 anon_array_exit(&cookie); 7822 } 7823 7824 /* 7825 * Get name for page, accounting for 7826 * existence of private copy. 7827 */ 7828 ap = NULL; 7829 if (amp != NULL) { 7830 anon_array_enter(amp, anon_index, &cookie); 7831 ap = anon_get_ptr(amp->ahp, anon_index); 7832 if (ap != NULL) { 7833 swap_xlate(ap, &vp, &off); 7834 } else { 7835 if (svd->vp == NULL && 7836 (svd->flags & MAP_NORESERVE)) { 7837 anon_array_exit(&cookie); 7838 ANON_LOCK_EXIT(&->a_rwlock); 7839 continue; 7840 } 7841 vp = svd->vp; 7842 off = offset; 7843 } 7844 if (op != MC_LOCK || ap == NULL) { 7845 anon_array_exit(&cookie); 7846 ANON_LOCK_EXIT(&->a_rwlock); 7847 } 7848 } else { 7849 vp = svd->vp; 7850 off = offset; 7851 } 7852 7853 /* 7854 * Get page frame. It's ok if the page is 7855 * not available when we're unlocking, as this 7856 * may simply mean that a page we locked got 7857 * truncated out of existence after we locked it. 7858 * 7859 * Invoke VOP_GETPAGE() to obtain the page struct 7860 * since we may need to read it from disk if its 7861 * been paged out. 7862 */ 7863 if (op != MC_LOCK) 7864 pp = page_lookup(vp, off, SE_SHARED); 7865 else { 7866 page_t *pl[1 + 1]; 7867 int error; 7868 7869 ASSERT(vp != NULL); 7870 7871 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 7872 (uint_t *)NULL, pl, PAGESIZE, seg, addr, 7873 S_OTHER, svd->cred, NULL); 7874 7875 if (error && ap != NULL) { 7876 anon_array_exit(&cookie); 7877 ANON_LOCK_EXIT(&->a_rwlock); 7878 } 7879 7880 /* 7881 * If the error is EDEADLK then we must bounce 7882 * up and drop all vm subsystem locks and then 7883 * retry the operation later 7884 * This behavior is a temporary measure because 7885 * ufs/sds logging is badly designed and will 7886 * deadlock if we don't allow this bounce to 7887 * happen. The real solution is to re-design 7888 * the logging code to work properly. See bug 7889 * 4125102 for details of the problem. 7890 */ 7891 if (error == EDEADLK) { 7892 err = error; 7893 goto out; 7894 } 7895 /* 7896 * Quit if we fail to fault in the page. Treat 7897 * the failure as an error, unless the addr 7898 * is mapped beyond the end of a file. 7899 */ 7900 if (error && svd->vp) { 7901 va.va_mask = AT_SIZE; 7902 if (VOP_GETATTR(svd->vp, &va, 0, 7903 svd->cred, NULL) != 0) { 7904 err = EIO; 7905 goto out; 7906 } 7907 if (btopr(va.va_size) >= 7908 btopr(off + 1)) { 7909 err = EIO; 7910 goto out; 7911 } 7912 goto out; 7913 7914 } else if (error) { 7915 err = EIO; 7916 goto out; 7917 } 7918 pp = pl[0]; 7919 ASSERT(pp != NULL); 7920 } 7921 7922 /* 7923 * See Statement at the beginning of this routine. 7924 * 7925 * claim is always set if MAP_PRIVATE and PROT_WRITE 7926 * irrespective of following factors: 7927 * 7928 * (1) anon slots are populated or not 7929 * (2) cow is broken or not 7930 * (3) refcnt on ap is 1 or greater than 1 7931 * 7932 * See 4140683 for details 7933 */ 7934 claim = ((VPP_PROT(vpp) & PROT_WRITE) && 7935 (svd->type == MAP_PRIVATE)); 7936 7937 /* 7938 * Perform page-level operation appropriate to 7939 * operation. If locking, undo the SOFTLOCK 7940 * performed to bring the page into memory 7941 * after setting the lock. If unlocking, 7942 * and no page was found, account for the claim 7943 * separately. 7944 */ 7945 if (op == MC_LOCK) { 7946 int ret = 1; /* Assume success */ 7947 7948 ASSERT(!VPP_ISPPLOCK(vpp)); 7949 7950 ret = page_pp_lock(pp, claim, 0); 7951 if (ap != NULL) { 7952 if (ap->an_pvp != NULL) { 7953 anon_swap_free(ap, pp); 7954 } 7955 anon_array_exit(&cookie); 7956 ANON_LOCK_EXIT(&->a_rwlock); 7957 } 7958 if (ret == 0) { 7959 /* locking page failed */ 7960 page_unlock(pp); 7961 err = EAGAIN; 7962 goto out; 7963 } 7964 VPP_SETPPLOCK(vpp); 7965 if (sp != NULL) { 7966 if (pp->p_lckcnt == 1) 7967 locked_bytes += PAGESIZE; 7968 } else 7969 locked_bytes += PAGESIZE; 7970 7971 if (lockmap != (ulong_t *)NULL) 7972 BT_SET(lockmap, pos); 7973 7974 page_unlock(pp); 7975 } else { 7976 ASSERT(VPP_ISPPLOCK(vpp)); 7977 if (pp != NULL) { 7978 /* sysV pages should be locked */ 7979 ASSERT(sp == NULL || pp->p_lckcnt > 0); 7980 page_pp_unlock(pp, claim, 0); 7981 if (sp != NULL) { 7982 if (pp->p_lckcnt == 0) 7983 unlocked_bytes 7984 += PAGESIZE; 7985 } else 7986 unlocked_bytes += PAGESIZE; 7987 page_unlock(pp); 7988 } else { 7989 ASSERT(sp == NULL); 7990 unlocked_bytes += PAGESIZE; 7991 } 7992 VPP_CLRPPLOCK(vpp); 7993 } 7994 } 7995 } 7996 out: 7997 if (op == MC_LOCK) { 7998 /* Credit back bytes that did not get locked */ 7999 if ((unlocked_bytes - locked_bytes) > 0) { 8000 if (proj == NULL) 8001 mutex_enter(&p->p_lock); 8002 rctl_decr_locked_mem(p, proj, 8003 (unlocked_bytes - locked_bytes), chargeproc); 8004 if (proj == NULL) 8005 mutex_exit(&p->p_lock); 8006 } 8007 8008 } else { 8009 /* Account bytes that were unlocked */ 8010 if (unlocked_bytes > 0) { 8011 if (proj == NULL) 8012 mutex_enter(&p->p_lock); 8013 rctl_decr_locked_mem(p, proj, unlocked_bytes, 8014 chargeproc); 8015 if (proj == NULL) 8016 mutex_exit(&p->p_lock); 8017 } 8018 } 8019 if (sp != NULL) 8020 mutex_exit(&sp->shm_mlock); 8021 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8022 8023 return (err); 8024 } 8025 8026 /* 8027 * Set advice from user for specified pages 8028 * There are 5 types of advice: 8029 * MADV_NORMAL - Normal (default) behavior (whatever that is) 8030 * MADV_RANDOM - Random page references 8031 * do not allow readahead or 'klustering' 8032 * MADV_SEQUENTIAL - Sequential page references 8033 * Pages previous to the one currently being 8034 * accessed (determined by fault) are 'not needed' 8035 * and are freed immediately 8036 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl) 8037 * MADV_DONTNEED - Pages are not needed (synced out in mctl) 8038 * MADV_FREE - Contents can be discarded 8039 * MADV_ACCESS_DEFAULT- Default access 8040 * MADV_ACCESS_LWP - Next LWP will access heavily 8041 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily 8042 */ 8043 static int 8044 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 8045 { 8046 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8047 size_t page; 8048 int err = 0; 8049 int already_set; 8050 struct anon_map *amp; 8051 ulong_t anon_index; 8052 struct seg *next; 8053 lgrp_mem_policy_t policy; 8054 struct seg *prev; 8055 struct vnode *vp; 8056 8057 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8058 8059 /* 8060 * In case of MADV_FREE, we won't be modifying any segment private 8061 * data structures; so, we only need to grab READER's lock 8062 */ 8063 if (behav != MADV_FREE) { 8064 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 8065 if (svd->tr_state != SEGVN_TR_OFF) { 8066 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8067 return (0); 8068 } 8069 } else { 8070 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8071 } 8072 8073 /* 8074 * Large pages are assumed to be only turned on when accesses to the 8075 * segment's address range have spatial and temporal locality. That 8076 * justifies ignoring MADV_SEQUENTIAL for large page segments. 8077 * Also, ignore advice affecting lgroup memory allocation 8078 * if don't need to do lgroup optimizations on this system 8079 */ 8080 8081 if ((behav == MADV_SEQUENTIAL && 8082 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) || 8083 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT || 8084 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) { 8085 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8086 return (0); 8087 } 8088 8089 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT || 8090 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) { 8091 /* 8092 * Since we are going to unload hat mappings 8093 * we first have to flush the cache. Otherwise 8094 * this might lead to system panic if another 8095 * thread is doing physio on the range whose 8096 * mappings are unloaded by madvise(3C). 8097 */ 8098 if (svd->softlockcnt > 0) { 8099 /* 8100 * If this is shared segment non 0 softlockcnt 8101 * means locked pages are still in use. 8102 */ 8103 if (svd->type == MAP_SHARED) { 8104 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8105 return (EAGAIN); 8106 } 8107 /* 8108 * Since we do have the segvn writers lock 8109 * nobody can fill the cache with entries 8110 * belonging to this seg during the purge. 8111 * The flush either succeeds or we still 8112 * have pending I/Os. In the later case, 8113 * madvise(3C) fails. 8114 */ 8115 segvn_purge(seg); 8116 if (svd->softlockcnt > 0) { 8117 /* 8118 * Since madvise(3C) is advisory and 8119 * it's not part of UNIX98, madvise(3C) 8120 * failure here doesn't cause any hardship. 8121 * Note that we don't block in "as" layer. 8122 */ 8123 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8124 return (EAGAIN); 8125 } 8126 } else if (svd->type == MAP_SHARED && svd->amp != NULL && 8127 svd->amp->a_softlockcnt > 0) { 8128 /* 8129 * Try to purge this amp's entries from pcache. It 8130 * will succeed only if other segments that share the 8131 * amp have no outstanding softlock's. 8132 */ 8133 segvn_purge(seg); 8134 } 8135 } 8136 8137 amp = svd->amp; 8138 vp = svd->vp; 8139 if (behav == MADV_FREE) { 8140 /* 8141 * MADV_FREE is not supported for segments with 8142 * underlying object; if anonmap is NULL, anon slots 8143 * are not yet populated and there is nothing for 8144 * us to do. As MADV_FREE is advisory, we don't 8145 * return error in either case. 8146 */ 8147 if (vp != NULL || amp == NULL) { 8148 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8149 return (0); 8150 } 8151 8152 segvn_purge(seg); 8153 8154 page = seg_page(seg, addr); 8155 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8156 anon_disclaim(amp, svd->anon_index + page, len); 8157 ANON_LOCK_EXIT(&->a_rwlock); 8158 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8159 return (0); 8160 } 8161 8162 /* 8163 * If advice is to be applied to entire segment, 8164 * use advice field in seg_data structure 8165 * otherwise use appropriate vpage entry. 8166 */ 8167 if ((addr == seg->s_base) && (len == seg->s_size)) { 8168 switch (behav) { 8169 case MADV_ACCESS_LWP: 8170 case MADV_ACCESS_MANY: 8171 case MADV_ACCESS_DEFAULT: 8172 /* 8173 * Set memory allocation policy for this segment 8174 */ 8175 policy = lgrp_madv_to_policy(behav, len, svd->type); 8176 if (svd->type == MAP_SHARED) 8177 already_set = lgrp_shm_policy_set(policy, amp, 8178 svd->anon_index, vp, svd->offset, len); 8179 else { 8180 /* 8181 * For private memory, need writers lock on 8182 * address space because the segment may be 8183 * split or concatenated when changing policy 8184 */ 8185 if (AS_READ_HELD(seg->s_as, 8186 &seg->s_as->a_lock)) { 8187 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8188 return (IE_RETRY); 8189 } 8190 8191 already_set = lgrp_privm_policy_set(policy, 8192 &svd->policy_info, len); 8193 } 8194 8195 /* 8196 * If policy set already and it shouldn't be reapplied, 8197 * don't do anything. 8198 */ 8199 if (already_set && 8200 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8201 break; 8202 8203 /* 8204 * Mark any existing pages in given range for 8205 * migration 8206 */ 8207 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8208 vp, svd->offset, 1); 8209 8210 /* 8211 * If same policy set already or this is a shared 8212 * memory segment, don't need to try to concatenate 8213 * segment with adjacent ones. 8214 */ 8215 if (already_set || svd->type == MAP_SHARED) 8216 break; 8217 8218 /* 8219 * Try to concatenate this segment with previous 8220 * one and next one, since we changed policy for 8221 * this one and it may be compatible with adjacent 8222 * ones now. 8223 */ 8224 prev = AS_SEGPREV(seg->s_as, seg); 8225 next = AS_SEGNEXT(seg->s_as, seg); 8226 8227 if (next && next->s_ops == &segvn_ops && 8228 addr + len == next->s_base) 8229 (void) segvn_concat(seg, next, 1); 8230 8231 if (prev && prev->s_ops == &segvn_ops && 8232 addr == prev->s_base + prev->s_size) { 8233 /* 8234 * Drop lock for private data of current 8235 * segment before concatenating (deleting) it 8236 * and return IE_REATTACH to tell as_ctl() that 8237 * current segment has changed 8238 */ 8239 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8240 if (!segvn_concat(prev, seg, 1)) 8241 err = IE_REATTACH; 8242 8243 return (err); 8244 } 8245 break; 8246 8247 case MADV_SEQUENTIAL: 8248 /* 8249 * unloading mapping guarantees 8250 * detection in segvn_fault 8251 */ 8252 ASSERT(seg->s_szc == 0); 8253 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8254 hat_unload(seg->s_as->a_hat, addr, len, 8255 HAT_UNLOAD); 8256 /* FALLTHROUGH */ 8257 case MADV_NORMAL: 8258 case MADV_RANDOM: 8259 svd->advice = (uchar_t)behav; 8260 svd->pageadvice = 0; 8261 break; 8262 case MADV_WILLNEED: /* handled in memcntl */ 8263 case MADV_DONTNEED: /* handled in memcntl */ 8264 case MADV_FREE: /* handled above */ 8265 break; 8266 default: 8267 err = EINVAL; 8268 } 8269 } else { 8270 caddr_t eaddr; 8271 struct seg *new_seg; 8272 struct segvn_data *new_svd; 8273 u_offset_t off; 8274 caddr_t oldeaddr; 8275 8276 page = seg_page(seg, addr); 8277 8278 segvn_vpage(seg); 8279 if (svd->vpage == NULL) { 8280 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8281 return (ENOMEM); 8282 } 8283 8284 switch (behav) { 8285 struct vpage *bvpp, *evpp; 8286 8287 case MADV_ACCESS_LWP: 8288 case MADV_ACCESS_MANY: 8289 case MADV_ACCESS_DEFAULT: 8290 /* 8291 * Set memory allocation policy for portion of this 8292 * segment 8293 */ 8294 8295 /* 8296 * Align address and length of advice to page 8297 * boundaries for large pages 8298 */ 8299 if (seg->s_szc != 0) { 8300 size_t pgsz; 8301 8302 pgsz = page_get_pagesize(seg->s_szc); 8303 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 8304 len = P2ROUNDUP(len, pgsz); 8305 } 8306 8307 /* 8308 * Check to see whether policy is set already 8309 */ 8310 policy = lgrp_madv_to_policy(behav, len, svd->type); 8311 8312 anon_index = svd->anon_index + page; 8313 off = svd->offset + (uintptr_t)(addr - seg->s_base); 8314 8315 if (svd->type == MAP_SHARED) 8316 already_set = lgrp_shm_policy_set(policy, amp, 8317 anon_index, vp, off, len); 8318 else 8319 already_set = 8320 (policy == svd->policy_info.mem_policy); 8321 8322 /* 8323 * If policy set already and it shouldn't be reapplied, 8324 * don't do anything. 8325 */ 8326 if (already_set && 8327 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8328 break; 8329 8330 /* 8331 * For private memory, need writers lock on 8332 * address space because the segment may be 8333 * split or concatenated when changing policy 8334 */ 8335 if (svd->type == MAP_PRIVATE && 8336 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) { 8337 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8338 return (IE_RETRY); 8339 } 8340 8341 /* 8342 * Mark any existing pages in given range for 8343 * migration 8344 */ 8345 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8346 vp, svd->offset, 1); 8347 8348 /* 8349 * Don't need to try to split or concatenate 8350 * segments, since policy is same or this is a shared 8351 * memory segment 8352 */ 8353 if (already_set || svd->type == MAP_SHARED) 8354 break; 8355 8356 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 8357 ASSERT(svd->amp == NULL); 8358 ASSERT(svd->tr_state == SEGVN_TR_OFF); 8359 ASSERT(svd->softlockcnt == 0); 8360 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 8361 HAT_REGION_TEXT); 8362 svd->rcookie = HAT_INVALID_REGION_COOKIE; 8363 } 8364 8365 /* 8366 * Split off new segment if advice only applies to a 8367 * portion of existing segment starting in middle 8368 */ 8369 new_seg = NULL; 8370 eaddr = addr + len; 8371 oldeaddr = seg->s_base + seg->s_size; 8372 if (addr > seg->s_base) { 8373 /* 8374 * Must flush I/O page cache 8375 * before splitting segment 8376 */ 8377 if (svd->softlockcnt > 0) 8378 segvn_purge(seg); 8379 8380 /* 8381 * Split segment and return IE_REATTACH to tell 8382 * as_ctl() that current segment changed 8383 */ 8384 new_seg = segvn_split_seg(seg, addr); 8385 new_svd = (struct segvn_data *)new_seg->s_data; 8386 err = IE_REATTACH; 8387 8388 /* 8389 * If new segment ends where old one 8390 * did, try to concatenate the new 8391 * segment with next one. 8392 */ 8393 if (eaddr == oldeaddr) { 8394 /* 8395 * Set policy for new segment 8396 */ 8397 (void) lgrp_privm_policy_set(policy, 8398 &new_svd->policy_info, 8399 new_seg->s_size); 8400 8401 next = AS_SEGNEXT(new_seg->s_as, 8402 new_seg); 8403 8404 if (next && 8405 next->s_ops == &segvn_ops && 8406 eaddr == next->s_base) 8407 (void) segvn_concat(new_seg, 8408 next, 1); 8409 } 8410 } 8411 8412 /* 8413 * Split off end of existing segment if advice only 8414 * applies to a portion of segment ending before 8415 * end of the existing segment 8416 */ 8417 if (eaddr < oldeaddr) { 8418 /* 8419 * Must flush I/O page cache 8420 * before splitting segment 8421 */ 8422 if (svd->softlockcnt > 0) 8423 segvn_purge(seg); 8424 8425 /* 8426 * If beginning of old segment was already 8427 * split off, use new segment to split end off 8428 * from. 8429 */ 8430 if (new_seg != NULL && new_seg != seg) { 8431 /* 8432 * Split segment 8433 */ 8434 (void) segvn_split_seg(new_seg, eaddr); 8435 8436 /* 8437 * Set policy for new segment 8438 */ 8439 (void) lgrp_privm_policy_set(policy, 8440 &new_svd->policy_info, 8441 new_seg->s_size); 8442 } else { 8443 /* 8444 * Split segment and return IE_REATTACH 8445 * to tell as_ctl() that current 8446 * segment changed 8447 */ 8448 (void) segvn_split_seg(seg, eaddr); 8449 err = IE_REATTACH; 8450 8451 (void) lgrp_privm_policy_set(policy, 8452 &svd->policy_info, seg->s_size); 8453 8454 /* 8455 * If new segment starts where old one 8456 * did, try to concatenate it with 8457 * previous segment. 8458 */ 8459 if (addr == seg->s_base) { 8460 prev = AS_SEGPREV(seg->s_as, 8461 seg); 8462 8463 /* 8464 * Drop lock for private data 8465 * of current segment before 8466 * concatenating (deleting) it 8467 */ 8468 if (prev && 8469 prev->s_ops == 8470 &segvn_ops && 8471 addr == prev->s_base + 8472 prev->s_size) { 8473 SEGVN_LOCK_EXIT( 8474 seg->s_as, 8475 &svd->lock); 8476 (void) segvn_concat( 8477 prev, seg, 1); 8478 return (err); 8479 } 8480 } 8481 } 8482 } 8483 break; 8484 case MADV_SEQUENTIAL: 8485 ASSERT(seg->s_szc == 0); 8486 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8487 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 8488 /* FALLTHROUGH */ 8489 case MADV_NORMAL: 8490 case MADV_RANDOM: 8491 bvpp = &svd->vpage[page]; 8492 evpp = &svd->vpage[page + (len >> PAGESHIFT)]; 8493 for (; bvpp < evpp; bvpp++) 8494 VPP_SETADVICE(bvpp, behav); 8495 svd->advice = MADV_NORMAL; 8496 break; 8497 case MADV_WILLNEED: /* handled in memcntl */ 8498 case MADV_DONTNEED: /* handled in memcntl */ 8499 case MADV_FREE: /* handled above */ 8500 break; 8501 default: 8502 err = EINVAL; 8503 } 8504 } 8505 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8506 return (err); 8507 } 8508 8509 /* 8510 * Create a vpage structure for this seg. 8511 */ 8512 static void 8513 segvn_vpage(struct seg *seg) 8514 { 8515 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8516 struct vpage *vp, *evp; 8517 static pgcnt_t page_limit = 0; 8518 8519 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 8520 8521 /* 8522 * If no vpage structure exists, allocate one. Copy the protections 8523 * and the advice from the segment itself to the individual pages. 8524 */ 8525 if (svd->vpage == NULL) { 8526 /* 8527 * Start by calculating the number of pages we must allocate to 8528 * track the per-page vpage structs needs for this entire 8529 * segment. If we know now that it will require more than our 8530 * heuristic for the maximum amount of kmem we can consume then 8531 * fail. We do this here, instead of trying to detect this deep 8532 * in page_resv and propagating the error up, since the entire 8533 * memory allocation stack is not amenable to passing this 8534 * back. Instead, it wants to keep trying. 8535 * 8536 * As a heuristic we set a page limit of 5/8s of total_pages 8537 * for this allocation. We use shifts so that no floating 8538 * point conversion takes place and only need to do the 8539 * calculation once. 8540 */ 8541 ulong_t mem_needed = seg_pages(seg) * sizeof (struct vpage); 8542 pgcnt_t npages = mem_needed >> PAGESHIFT; 8543 8544 if (page_limit == 0) 8545 page_limit = (total_pages >> 1) + (total_pages >> 3); 8546 8547 if (npages > page_limit) 8548 return; 8549 8550 svd->pageadvice = 1; 8551 svd->vpage = kmem_zalloc(mem_needed, KM_SLEEP); 8552 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 8553 for (vp = svd->vpage; vp < evp; vp++) { 8554 VPP_SETPROT(vp, svd->prot); 8555 VPP_SETADVICE(vp, svd->advice); 8556 } 8557 } 8558 } 8559 8560 /* 8561 * Dump the pages belonging to this segvn segment. 8562 */ 8563 static void 8564 segvn_dump(struct seg *seg) 8565 { 8566 struct segvn_data *svd; 8567 page_t *pp; 8568 struct anon_map *amp; 8569 ulong_t anon_index; 8570 struct vnode *vp; 8571 u_offset_t off, offset; 8572 pfn_t pfn; 8573 pgcnt_t page, npages; 8574 caddr_t addr; 8575 8576 npages = seg_pages(seg); 8577 svd = (struct segvn_data *)seg->s_data; 8578 vp = svd->vp; 8579 off = offset = svd->offset; 8580 addr = seg->s_base; 8581 8582 if ((amp = svd->amp) != NULL) { 8583 anon_index = svd->anon_index; 8584 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8585 } 8586 8587 for (page = 0; page < npages; page++, offset += PAGESIZE) { 8588 struct anon *ap; 8589 int we_own_it = 0; 8590 8591 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) { 8592 swap_xlate_nopanic(ap, &vp, &off); 8593 } else { 8594 vp = svd->vp; 8595 off = offset; 8596 } 8597 8598 /* 8599 * If pp == NULL, the page either does not exist 8600 * or is exclusively locked. So determine if it 8601 * exists before searching for it. 8602 */ 8603 8604 if ((pp = page_lookup_nowait(vp, off, SE_SHARED))) 8605 we_own_it = 1; 8606 else 8607 pp = page_exists(vp, off); 8608 8609 if (pp) { 8610 pfn = page_pptonum(pp); 8611 dump_addpage(seg->s_as, addr, pfn); 8612 if (we_own_it) 8613 page_unlock(pp); 8614 } 8615 addr += PAGESIZE; 8616 dump_timeleft = dump_timeout; 8617 } 8618 8619 if (amp != NULL) 8620 ANON_LOCK_EXIT(&->a_rwlock); 8621 } 8622 8623 #ifdef DEBUG 8624 static uint32_t segvn_pglock_mtbf = 0; 8625 #endif 8626 8627 #define PCACHE_SHWLIST ((page_t *)-2) 8628 #define NOPCACHE_SHWLIST ((page_t *)-1) 8629 8630 /* 8631 * Lock/Unlock anon pages over a given range. Return shadow list. This routine 8632 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages 8633 * to avoid the overhead of per page locking, unlocking for subsequent IOs to 8634 * the same parts of the segment. Currently shadow list creation is only 8635 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are 8636 * tagged with segment pointer, starting virtual address and length. This 8637 * approach for MAP_SHARED segments may add many pcache entries for the same 8638 * set of pages and lead to long hash chains that decrease pcache lookup 8639 * performance. To avoid this issue for shared segments shared anon map and 8640 * starting anon index are used for pcache entry tagging. This allows all 8641 * segments to share pcache entries for the same anon range and reduces pcache 8642 * chain's length as well as memory overhead from duplicate shadow lists and 8643 * pcache entries. 8644 * 8645 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd 8646 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock 8647 * part of softlockcnt accounting is done differently for private and shared 8648 * segments. In private segment case softlock is only incremented when a new 8649 * shadow list is created but not when an existing one is found via 8650 * seg_plookup(). pcache entries have reference count incremented/decremented 8651 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0 8652 * reference count can be purged (and purging is needed before segment can be 8653 * freed). When a private segment pcache entry is purged segvn_reclaim() will 8654 * decrement softlockcnt. Since in private segment case each of its pcache 8655 * entries only belongs to this segment we can expect that when 8656 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this 8657 * segment purge will succeed and softlockcnt will drop to 0. In shared 8658 * segment case reference count in pcache entry counts active locks from many 8659 * different segments so we can't expect segment purging to succeed even when 8660 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this 8661 * segment. To be able to determine when there're no pending pagelocks in 8662 * shared segment case we don't rely on purging to make softlockcnt drop to 0 8663 * but instead softlockcnt is incremented and decremented for every 8664 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow 8665 * list was created or an existing one was found. When softlockcnt drops to 0 8666 * this segment no longer has any claims for pcached shadow lists and the 8667 * segment can be freed even if there're still active pcache entries 8668 * shared by this segment anon map. Shared segment pcache entries belong to 8669 * anon map and are typically removed when anon map is freed after all 8670 * processes destroy the segments that use this anon map. 8671 */ 8672 static int 8673 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp, 8674 enum lock_type type, enum seg_rw rw) 8675 { 8676 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8677 size_t np; 8678 pgcnt_t adjustpages; 8679 pgcnt_t npages; 8680 ulong_t anon_index; 8681 uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE; 8682 uint_t error; 8683 struct anon_map *amp; 8684 pgcnt_t anpgcnt; 8685 struct page **pplist, **pl, *pp; 8686 caddr_t a; 8687 size_t page; 8688 caddr_t lpgaddr, lpgeaddr; 8689 anon_sync_obj_t cookie; 8690 int anlock; 8691 struct anon_map *pamp; 8692 caddr_t paddr; 8693 seg_preclaim_cbfunc_t preclaim_callback; 8694 size_t pgsz; 8695 int use_pcache; 8696 size_t wlen; 8697 uint_t pflags = 0; 8698 int sftlck_sbase = 0; 8699 int sftlck_send = 0; 8700 8701 #ifdef DEBUG 8702 if (type == L_PAGELOCK && segvn_pglock_mtbf) { 8703 hrtime_t ts = gethrtime(); 8704 if ((ts % segvn_pglock_mtbf) == 0) { 8705 return (ENOTSUP); 8706 } 8707 if ((ts % segvn_pglock_mtbf) == 1) { 8708 return (EFAULT); 8709 } 8710 } 8711 #endif 8712 8713 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START, 8714 "segvn_pagelock: start seg %p addr %p", seg, addr); 8715 8716 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8717 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); 8718 8719 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8720 8721 /* 8722 * for now we only support pagelock to anon memory. We would have to 8723 * check protections for vnode objects and call into the vnode driver. 8724 * That's too much for a fast path. Let the fault entry point handle 8725 * it. 8726 */ 8727 if (svd->vp != NULL) { 8728 if (type == L_PAGELOCK) { 8729 error = ENOTSUP; 8730 goto out; 8731 } 8732 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL"); 8733 } 8734 if ((amp = svd->amp) == NULL) { 8735 if (type == L_PAGELOCK) { 8736 error = EFAULT; 8737 goto out; 8738 } 8739 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL"); 8740 } 8741 if (rw != S_READ && rw != S_WRITE) { 8742 if (type == L_PAGELOCK) { 8743 error = ENOTSUP; 8744 goto out; 8745 } 8746 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw"); 8747 } 8748 8749 if (seg->s_szc != 0) { 8750 /* 8751 * We are adjusting the pagelock region to the large page size 8752 * boundary because the unlocked part of a large page cannot 8753 * be freed anyway unless all constituent pages of a large 8754 * page are locked. Bigger regions reduce pcache chain length 8755 * and improve lookup performance. The tradeoff is that the 8756 * very first segvn_pagelock() call for a given page is more 8757 * expensive if only 1 page_t is needed for IO. This is only 8758 * an issue if pcache entry doesn't get reused by several 8759 * subsequent calls. We optimize here for the case when pcache 8760 * is heavily used by repeated IOs to the same address range. 8761 * 8762 * Note segment's page size cannot change while we are holding 8763 * as lock. And then it cannot change while softlockcnt is 8764 * not 0. This will allow us to correctly recalculate large 8765 * page size region for the matching pageunlock/reclaim call 8766 * since as_pageunlock() caller must always match 8767 * as_pagelock() call's addr and len. 8768 * 8769 * For pageunlock *ppp points to the pointer of page_t that 8770 * corresponds to the real unadjusted start address. Similar 8771 * for pagelock *ppp must point to the pointer of page_t that 8772 * corresponds to the real unadjusted start address. 8773 */ 8774 pgsz = page_get_pagesize(seg->s_szc); 8775 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 8776 adjustpages = btop((uintptr_t)(addr - lpgaddr)); 8777 } else if (len < segvn_pglock_comb_thrshld) { 8778 lpgaddr = addr; 8779 lpgeaddr = addr + len; 8780 adjustpages = 0; 8781 pgsz = PAGESIZE; 8782 } else { 8783 /* 8784 * Align the address range of large enough requests to allow 8785 * combining of different shadow lists into 1 to reduce memory 8786 * overhead from potentially overlapping large shadow lists 8787 * (worst case is we have a 1MB IO into buffers with start 8788 * addresses separated by 4K). Alignment is only possible if 8789 * padded chunks have sufficient access permissions. Note 8790 * permissions won't change between L_PAGELOCK and 8791 * L_PAGEUNLOCK calls since non 0 softlockcnt will force 8792 * segvn_setprot() to wait until softlockcnt drops to 0. This 8793 * allows us to determine in L_PAGEUNLOCK the same range we 8794 * computed in L_PAGELOCK. 8795 * 8796 * If alignment is limited by segment ends set 8797 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when 8798 * these flags are set bump softlockcnt_sbase/softlockcnt_send 8799 * per segment counters. In L_PAGEUNLOCK case decrease 8800 * softlockcnt_sbase/softlockcnt_send counters if 8801 * sftlck_sbase/sftlck_send flags are set. When 8802 * softlockcnt_sbase/softlockcnt_send are non 0 8803 * segvn_concat()/segvn_extend_prev()/segvn_extend_next() 8804 * won't merge the segments. This restriction combined with 8805 * restriction on segment unmapping and splitting for segments 8806 * that have non 0 softlockcnt allows L_PAGEUNLOCK to 8807 * correctly determine the same range that was previously 8808 * locked by matching L_PAGELOCK. 8809 */ 8810 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16); 8811 pgsz = PAGESIZE; 8812 if (svd->type == MAP_PRIVATE) { 8813 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr, 8814 segvn_pglock_comb_balign); 8815 if (lpgaddr < seg->s_base) { 8816 lpgaddr = seg->s_base; 8817 sftlck_sbase = 1; 8818 } 8819 } else { 8820 ulong_t aix = svd->anon_index + seg_page(seg, addr); 8821 ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign); 8822 if (aaix < svd->anon_index) { 8823 lpgaddr = seg->s_base; 8824 sftlck_sbase = 1; 8825 } else { 8826 lpgaddr = addr - ptob(aix - aaix); 8827 ASSERT(lpgaddr >= seg->s_base); 8828 } 8829 } 8830 if (svd->pageprot && lpgaddr != addr) { 8831 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)]; 8832 struct vpage *evp = &svd->vpage[seg_page(seg, addr)]; 8833 while (vp < evp) { 8834 if ((VPP_PROT(vp) & protchk) == 0) { 8835 break; 8836 } 8837 vp++; 8838 } 8839 if (vp < evp) { 8840 lpgaddr = addr; 8841 pflags = 0; 8842 } 8843 } 8844 lpgeaddr = addr + len; 8845 if (pflags) { 8846 if (svd->type == MAP_PRIVATE) { 8847 lpgeaddr = (caddr_t)P2ROUNDUP( 8848 (uintptr_t)lpgeaddr, 8849 segvn_pglock_comb_balign); 8850 } else { 8851 ulong_t aix = svd->anon_index + 8852 seg_page(seg, lpgeaddr); 8853 ulong_t aaix = P2ROUNDUP(aix, 8854 segvn_pglock_comb_palign); 8855 if (aaix < aix) { 8856 lpgeaddr = 0; 8857 } else { 8858 lpgeaddr += ptob(aaix - aix); 8859 } 8860 } 8861 if (lpgeaddr == 0 || 8862 lpgeaddr > seg->s_base + seg->s_size) { 8863 lpgeaddr = seg->s_base + seg->s_size; 8864 sftlck_send = 1; 8865 } 8866 } 8867 if (svd->pageprot && lpgeaddr != addr + len) { 8868 struct vpage *vp; 8869 struct vpage *evp; 8870 8871 vp = &svd->vpage[seg_page(seg, addr + len)]; 8872 evp = &svd->vpage[seg_page(seg, lpgeaddr)]; 8873 8874 while (vp < evp) { 8875 if ((VPP_PROT(vp) & protchk) == 0) { 8876 break; 8877 } 8878 vp++; 8879 } 8880 if (vp < evp) { 8881 lpgeaddr = addr + len; 8882 } 8883 } 8884 adjustpages = btop((uintptr_t)(addr - lpgaddr)); 8885 } 8886 8887 /* 8888 * For MAP_SHARED segments we create pcache entries tagged by amp and 8889 * anon index so that we can share pcache entries with other segments 8890 * that map this amp. For private segments pcache entries are tagged 8891 * with segment and virtual address. 8892 */ 8893 if (svd->type == MAP_SHARED) { 8894 pamp = amp; 8895 paddr = (caddr_t)((lpgaddr - seg->s_base) + 8896 ptob(svd->anon_index)); 8897 preclaim_callback = shamp_reclaim; 8898 } else { 8899 pamp = NULL; 8900 paddr = lpgaddr; 8901 preclaim_callback = segvn_reclaim; 8902 } 8903 8904 if (type == L_PAGEUNLOCK) { 8905 VM_STAT_ADD(segvnvmstats.pagelock[0]); 8906 8907 /* 8908 * update hat ref bits for /proc. We need to make sure 8909 * that threads tracing the ref and mod bits of the 8910 * address space get the right data. 8911 * Note: page ref and mod bits are updated at reclaim time 8912 */ 8913 if (seg->s_as->a_vbits) { 8914 for (a = addr; a < addr + len; a += PAGESIZE) { 8915 if (rw == S_WRITE) { 8916 hat_setstat(seg->s_as, a, 8917 PAGESIZE, P_REF | P_MOD); 8918 } else { 8919 hat_setstat(seg->s_as, a, 8920 PAGESIZE, P_REF); 8921 } 8922 } 8923 } 8924 8925 /* 8926 * Check the shadow list entry after the last page used in 8927 * this IO request. If it's NOPCACHE_SHWLIST the shadow list 8928 * was not inserted into pcache and is not large page 8929 * adjusted. In this case call reclaim callback directly and 8930 * don't adjust the shadow list start and size for large 8931 * pages. 8932 */ 8933 npages = btop(len); 8934 if ((*ppp)[npages] == NOPCACHE_SHWLIST) { 8935 void *ptag; 8936 if (pamp != NULL) { 8937 ASSERT(svd->type == MAP_SHARED); 8938 ptag = (void *)pamp; 8939 paddr = (caddr_t)((addr - seg->s_base) + 8940 ptob(svd->anon_index)); 8941 } else { 8942 ptag = (void *)seg; 8943 paddr = addr; 8944 } 8945 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0); 8946 } else { 8947 ASSERT((*ppp)[npages] == PCACHE_SHWLIST || 8948 IS_SWAPFSVP((*ppp)[npages]->p_vnode)); 8949 len = lpgeaddr - lpgaddr; 8950 npages = btop(len); 8951 seg_pinactive(seg, pamp, paddr, len, 8952 *ppp - adjustpages, rw, pflags, preclaim_callback); 8953 } 8954 8955 if (pamp != NULL) { 8956 ASSERT(svd->type == MAP_SHARED); 8957 ASSERT(svd->softlockcnt >= npages); 8958 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages); 8959 } 8960 8961 if (sftlck_sbase) { 8962 ASSERT(svd->softlockcnt_sbase > 0); 8963 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_sbase); 8964 } 8965 if (sftlck_send) { 8966 ASSERT(svd->softlockcnt_send > 0); 8967 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_send); 8968 } 8969 8970 /* 8971 * If someone is blocked while unmapping, we purge 8972 * segment page cache and thus reclaim pplist synchronously 8973 * without waiting for seg_pasync_thread. This speeds up 8974 * unmapping in cases where munmap(2) is called, while 8975 * raw async i/o is still in progress or where a thread 8976 * exits on data fault in a multithreaded application. 8977 */ 8978 if (AS_ISUNMAPWAIT(seg->s_as)) { 8979 if (svd->softlockcnt == 0) { 8980 mutex_enter(&seg->s_as->a_contents); 8981 if (AS_ISUNMAPWAIT(seg->s_as)) { 8982 AS_CLRUNMAPWAIT(seg->s_as); 8983 cv_broadcast(&seg->s_as->a_cv); 8984 } 8985 mutex_exit(&seg->s_as->a_contents); 8986 } else if (pamp == NULL) { 8987 /* 8988 * softlockcnt is not 0 and this is a 8989 * MAP_PRIVATE segment. Try to purge its 8990 * pcache entries to reduce softlockcnt. 8991 * If it drops to 0 segvn_reclaim() 8992 * will wake up a thread waiting on 8993 * unmapwait flag. 8994 * 8995 * We don't purge MAP_SHARED segments with non 8996 * 0 softlockcnt since IO is still in progress 8997 * for such segments. 8998 */ 8999 ASSERT(svd->type == MAP_PRIVATE); 9000 segvn_purge(seg); 9001 } 9002 } 9003 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9004 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END, 9005 "segvn_pagelock: unlock seg %p addr %p", seg, addr); 9006 return (0); 9007 } 9008 9009 /* The L_PAGELOCK case ... */ 9010 9011 VM_STAT_ADD(segvnvmstats.pagelock[1]); 9012 9013 /* 9014 * For MAP_SHARED segments we have to check protections before 9015 * seg_plookup() since pcache entries may be shared by many segments 9016 * with potentially different page protections. 9017 */ 9018 if (pamp != NULL) { 9019 ASSERT(svd->type == MAP_SHARED); 9020 if (svd->pageprot == 0) { 9021 if ((svd->prot & protchk) == 0) { 9022 error = EACCES; 9023 goto out; 9024 } 9025 } else { 9026 /* 9027 * check page protections 9028 */ 9029 caddr_t ea; 9030 9031 if (seg->s_szc) { 9032 a = lpgaddr; 9033 ea = lpgeaddr; 9034 } else { 9035 a = addr; 9036 ea = addr + len; 9037 } 9038 for (; a < ea; a += pgsz) { 9039 struct vpage *vp; 9040 9041 ASSERT(seg->s_szc == 0 || 9042 sameprot(seg, a, pgsz)); 9043 vp = &svd->vpage[seg_page(seg, a)]; 9044 if ((VPP_PROT(vp) & protchk) == 0) { 9045 error = EACCES; 9046 goto out; 9047 } 9048 } 9049 } 9050 } 9051 9052 /* 9053 * try to find pages in segment page cache 9054 */ 9055 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags); 9056 if (pplist != NULL) { 9057 if (pamp != NULL) { 9058 npages = btop((uintptr_t)(lpgeaddr - lpgaddr)); 9059 ASSERT(svd->type == MAP_SHARED); 9060 atomic_add_long((ulong_t *)&svd->softlockcnt, 9061 npages); 9062 } 9063 if (sftlck_sbase) { 9064 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase); 9065 } 9066 if (sftlck_send) { 9067 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send); 9068 } 9069 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9070 *ppp = pplist + adjustpages; 9071 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END, 9072 "segvn_pagelock: cache hit seg %p addr %p", seg, addr); 9073 return (0); 9074 } 9075 9076 /* 9077 * For MAP_SHARED segments we already verified above that segment 9078 * protections allow this pagelock operation. 9079 */ 9080 if (pamp == NULL) { 9081 ASSERT(svd->type == MAP_PRIVATE); 9082 if (svd->pageprot == 0) { 9083 if ((svd->prot & protchk) == 0) { 9084 error = EACCES; 9085 goto out; 9086 } 9087 if (svd->prot & PROT_WRITE) { 9088 wlen = lpgeaddr - lpgaddr; 9089 } else { 9090 wlen = 0; 9091 ASSERT(rw == S_READ); 9092 } 9093 } else { 9094 int wcont = 1; 9095 /* 9096 * check page protections 9097 */ 9098 for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) { 9099 struct vpage *vp; 9100 9101 ASSERT(seg->s_szc == 0 || 9102 sameprot(seg, a, pgsz)); 9103 vp = &svd->vpage[seg_page(seg, a)]; 9104 if ((VPP_PROT(vp) & protchk) == 0) { 9105 error = EACCES; 9106 goto out; 9107 } 9108 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) { 9109 wlen += pgsz; 9110 } else { 9111 wcont = 0; 9112 ASSERT(rw == S_READ); 9113 } 9114 } 9115 } 9116 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr); 9117 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr); 9118 } 9119 9120 /* 9121 * Only build large page adjusted shadow list if we expect to insert 9122 * it into pcache. For large enough pages it's a big overhead to 9123 * create a shadow list of the entire large page. But this overhead 9124 * should be amortized over repeated pcache hits on subsequent reuse 9125 * of this shadow list (IO into any range within this shadow list will 9126 * find it in pcache since we large page align the request for pcache 9127 * lookups). pcache performance is improved with bigger shadow lists 9128 * as it reduces the time to pcache the entire big segment and reduces 9129 * pcache chain length. 9130 */ 9131 if (seg_pinsert_check(seg, pamp, paddr, 9132 lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) { 9133 addr = lpgaddr; 9134 len = lpgeaddr - lpgaddr; 9135 use_pcache = 1; 9136 } else { 9137 use_pcache = 0; 9138 /* 9139 * Since this entry will not be inserted into the pcache, we 9140 * will not do any adjustments to the starting address or 9141 * size of the memory to be locked. 9142 */ 9143 adjustpages = 0; 9144 } 9145 npages = btop(len); 9146 9147 pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP); 9148 pl = pplist; 9149 *ppp = pplist + adjustpages; 9150 /* 9151 * If use_pcache is 0 this shadow list is not large page adjusted. 9152 * Record this info in the last entry of shadow array so that 9153 * L_PAGEUNLOCK can determine if it should large page adjust the 9154 * address range to find the real range that was locked. 9155 */ 9156 pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST; 9157 9158 page = seg_page(seg, addr); 9159 anon_index = svd->anon_index + page; 9160 9161 anlock = 0; 9162 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 9163 ASSERT(amp->a_szc >= seg->s_szc); 9164 anpgcnt = page_get_pagecnt(amp->a_szc); 9165 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) { 9166 struct anon *ap; 9167 struct vnode *vp; 9168 u_offset_t off; 9169 9170 /* 9171 * Lock and unlock anon array only once per large page. 9172 * anon_array_enter() locks the root anon slot according to 9173 * a_szc which can't change while anon map is locked. We lock 9174 * anon the first time through this loop and each time we 9175 * reach anon index that corresponds to a root of a large 9176 * page. 9177 */ 9178 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) { 9179 ASSERT(anlock == 0); 9180 anon_array_enter(amp, anon_index, &cookie); 9181 anlock = 1; 9182 } 9183 ap = anon_get_ptr(amp->ahp, anon_index); 9184 9185 /* 9186 * We must never use seg_pcache for COW pages 9187 * because we might end up with original page still 9188 * lying in seg_pcache even after private page is 9189 * created. This leads to data corruption as 9190 * aio_write refers to the page still in cache 9191 * while all other accesses refer to the private 9192 * page. 9193 */ 9194 if (ap == NULL || ap->an_refcnt != 1) { 9195 struct vpage *vpage; 9196 9197 if (seg->s_szc) { 9198 error = EFAULT; 9199 break; 9200 } 9201 if (svd->vpage != NULL) { 9202 vpage = &svd->vpage[seg_page(seg, a)]; 9203 } else { 9204 vpage = NULL; 9205 } 9206 ASSERT(anlock); 9207 anon_array_exit(&cookie); 9208 anlock = 0; 9209 pp = NULL; 9210 error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0, 9211 vpage, &pp, 0, F_INVAL, rw, 1); 9212 if (error) { 9213 error = fc_decode(error); 9214 break; 9215 } 9216 anon_array_enter(amp, anon_index, &cookie); 9217 anlock = 1; 9218 ap = anon_get_ptr(amp->ahp, anon_index); 9219 if (ap == NULL || ap->an_refcnt != 1) { 9220 error = EFAULT; 9221 break; 9222 } 9223 } 9224 swap_xlate(ap, &vp, &off); 9225 pp = page_lookup_nowait(vp, off, SE_SHARED); 9226 if (pp == NULL) { 9227 error = EFAULT; 9228 break; 9229 } 9230 if (ap->an_pvp != NULL) { 9231 anon_swap_free(ap, pp); 9232 } 9233 /* 9234 * Unlock anon if this is the last slot in a large page. 9235 */ 9236 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) { 9237 ASSERT(anlock); 9238 anon_array_exit(&cookie); 9239 anlock = 0; 9240 } 9241 *pplist++ = pp; 9242 } 9243 if (anlock) { /* Ensure the lock is dropped */ 9244 anon_array_exit(&cookie); 9245 } 9246 ANON_LOCK_EXIT(&->a_rwlock); 9247 9248 if (a >= addr + len) { 9249 atomic_add_long((ulong_t *)&svd->softlockcnt, npages); 9250 if (pamp != NULL) { 9251 ASSERT(svd->type == MAP_SHARED); 9252 atomic_add_long((ulong_t *)&pamp->a_softlockcnt, 9253 npages); 9254 wlen = len; 9255 } 9256 if (sftlck_sbase) { 9257 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase); 9258 } 9259 if (sftlck_send) { 9260 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send); 9261 } 9262 if (use_pcache) { 9263 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl, 9264 rw, pflags, preclaim_callback); 9265 } 9266 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9267 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END, 9268 "segvn_pagelock: cache fill seg %p addr %p", seg, addr); 9269 return (0); 9270 } 9271 9272 pplist = pl; 9273 np = ((uintptr_t)(a - addr)) >> PAGESHIFT; 9274 while (np > (uint_t)0) { 9275 ASSERT(PAGE_LOCKED(*pplist)); 9276 page_unlock(*pplist); 9277 np--; 9278 pplist++; 9279 } 9280 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9281 out: 9282 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9283 *ppp = NULL; 9284 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END, 9285 "segvn_pagelock: cache miss seg %p addr %p", seg, addr); 9286 return (error); 9287 } 9288 9289 /* 9290 * purge any cached pages in the I/O page cache 9291 */ 9292 static void 9293 segvn_purge(struct seg *seg) 9294 { 9295 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9296 9297 /* 9298 * pcache is only used by pure anon segments. 9299 */ 9300 if (svd->amp == NULL || svd->vp != NULL) { 9301 return; 9302 } 9303 9304 /* 9305 * For MAP_SHARED segments non 0 segment's softlockcnt means 9306 * active IO is still in progress via this segment. So we only 9307 * purge MAP_SHARED segments when their softlockcnt is 0. 9308 */ 9309 if (svd->type == MAP_PRIVATE) { 9310 if (svd->softlockcnt) { 9311 seg_ppurge(seg, NULL, 0); 9312 } 9313 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) { 9314 seg_ppurge(seg, svd->amp, 0); 9315 } 9316 } 9317 9318 /* 9319 * If async argument is not 0 we are called from pcache async thread and don't 9320 * hold AS lock. 9321 */ 9322 9323 /*ARGSUSED*/ 9324 static int 9325 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 9326 enum seg_rw rw, int async) 9327 { 9328 struct seg *seg = (struct seg *)ptag; 9329 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9330 pgcnt_t np, npages; 9331 struct page **pl; 9332 9333 npages = np = btop(len); 9334 ASSERT(npages); 9335 9336 ASSERT(svd->vp == NULL && svd->amp != NULL); 9337 ASSERT(svd->softlockcnt >= npages); 9338 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9339 9340 pl = pplist; 9341 9342 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST); 9343 ASSERT(!async || pl[np] == PCACHE_SHWLIST); 9344 9345 while (np > (uint_t)0) { 9346 if (rw == S_WRITE) { 9347 hat_setrefmod(*pplist); 9348 } else { 9349 hat_setref(*pplist); 9350 } 9351 page_unlock(*pplist); 9352 np--; 9353 pplist++; 9354 } 9355 9356 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9357 9358 /* 9359 * If we are pcache async thread we don't hold AS lock. This means if 9360 * softlockcnt drops to 0 after the decrement below address space may 9361 * get freed. We can't allow it since after softlock derement to 0 we 9362 * still need to access as structure for possible wakeup of unmap 9363 * waiters. To prevent the disappearance of as we take this segment 9364 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to 9365 * make sure this routine completes before segment is freed. 9366 * 9367 * The second complication we have to deal with in async case is a 9368 * possibility of missed wake up of unmap wait thread. When we don't 9369 * hold as lock here we may take a_contents lock before unmap wait 9370 * thread that was first to see softlockcnt was still not 0. As a 9371 * result we'll fail to wake up an unmap wait thread. To avoid this 9372 * race we set nounmapwait flag in as structure if we drop softlockcnt 9373 * to 0 when we were called by pcache async thread. unmapwait thread 9374 * will not block if this flag is set. 9375 */ 9376 if (async) { 9377 mutex_enter(&svd->segfree_syncmtx); 9378 } 9379 9380 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) { 9381 if (async || AS_ISUNMAPWAIT(seg->s_as)) { 9382 mutex_enter(&seg->s_as->a_contents); 9383 if (async) { 9384 AS_SETNOUNMAPWAIT(seg->s_as); 9385 } 9386 if (AS_ISUNMAPWAIT(seg->s_as)) { 9387 AS_CLRUNMAPWAIT(seg->s_as); 9388 cv_broadcast(&seg->s_as->a_cv); 9389 } 9390 mutex_exit(&seg->s_as->a_contents); 9391 } 9392 } 9393 9394 if (async) { 9395 mutex_exit(&svd->segfree_syncmtx); 9396 } 9397 return (0); 9398 } 9399 9400 /*ARGSUSED*/ 9401 static int 9402 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 9403 enum seg_rw rw, int async) 9404 { 9405 amp_t *amp = (amp_t *)ptag; 9406 pgcnt_t np, npages; 9407 struct page **pl; 9408 9409 npages = np = btop(len); 9410 ASSERT(npages); 9411 ASSERT(amp->a_softlockcnt >= npages); 9412 9413 pl = pplist; 9414 9415 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST); 9416 ASSERT(!async || pl[np] == PCACHE_SHWLIST); 9417 9418 while (np > (uint_t)0) { 9419 if (rw == S_WRITE) { 9420 hat_setrefmod(*pplist); 9421 } else { 9422 hat_setref(*pplist); 9423 } 9424 page_unlock(*pplist); 9425 np--; 9426 pplist++; 9427 } 9428 9429 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9430 9431 /* 9432 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt 9433 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0 9434 * and anonmap_purge() acquires a_purgemtx. 9435 */ 9436 mutex_enter(&->a_purgemtx); 9437 if (!atomic_add_long_nv((ulong_t *)&->a_softlockcnt, -npages) && 9438 amp->a_purgewait) { 9439 amp->a_purgewait = 0; 9440 cv_broadcast(&->a_purgecv); 9441 } 9442 mutex_exit(&->a_purgemtx); 9443 return (0); 9444 } 9445 9446 /* 9447 * get a memory ID for an addr in a given segment 9448 * 9449 * XXX only creates PAGESIZE pages if anon slots are not initialized. 9450 * At fault time they will be relocated into larger pages. 9451 */ 9452 static int 9453 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 9454 { 9455 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9456 struct anon *ap = NULL; 9457 ulong_t anon_index; 9458 struct anon_map *amp; 9459 anon_sync_obj_t cookie; 9460 9461 if (svd->type == MAP_PRIVATE) { 9462 memidp->val[0] = (uintptr_t)seg->s_as; 9463 memidp->val[1] = (uintptr_t)addr; 9464 return (0); 9465 } 9466 9467 if (svd->type == MAP_SHARED) { 9468 if (svd->vp) { 9469 memidp->val[0] = (uintptr_t)svd->vp; 9470 memidp->val[1] = (u_longlong_t)svd->offset + 9471 (uintptr_t)(addr - seg->s_base); 9472 return (0); 9473 } else { 9474 9475 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 9476 if ((amp = svd->amp) != NULL) { 9477 anon_index = svd->anon_index + 9478 seg_page(seg, addr); 9479 } 9480 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9481 9482 ASSERT(amp != NULL); 9483 9484 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 9485 anon_array_enter(amp, anon_index, &cookie); 9486 ap = anon_get_ptr(amp->ahp, anon_index); 9487 if (ap == NULL) { 9488 page_t *pp; 9489 9490 pp = anon_zero(seg, addr, &ap, svd->cred); 9491 if (pp == NULL) { 9492 anon_array_exit(&cookie); 9493 ANON_LOCK_EXIT(&->a_rwlock); 9494 return (ENOMEM); 9495 } 9496 ASSERT(anon_get_ptr(amp->ahp, anon_index) 9497 == NULL); 9498 (void) anon_set_ptr(amp->ahp, anon_index, 9499 ap, ANON_SLEEP); 9500 page_unlock(pp); 9501 } 9502 9503 anon_array_exit(&cookie); 9504 ANON_LOCK_EXIT(&->a_rwlock); 9505 9506 memidp->val[0] = (uintptr_t)ap; 9507 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 9508 return (0); 9509 } 9510 } 9511 return (EINVAL); 9512 } 9513 9514 static int 9515 sameprot(struct seg *seg, caddr_t a, size_t len) 9516 { 9517 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9518 struct vpage *vpage; 9519 spgcnt_t pages = btop(len); 9520 uint_t prot; 9521 9522 if (svd->pageprot == 0) 9523 return (1); 9524 9525 ASSERT(svd->vpage != NULL); 9526 9527 vpage = &svd->vpage[seg_page(seg, a)]; 9528 prot = VPP_PROT(vpage); 9529 vpage++; 9530 pages--; 9531 while (pages-- > 0) { 9532 if (prot != VPP_PROT(vpage)) 9533 return (0); 9534 vpage++; 9535 } 9536 return (1); 9537 } 9538 9539 /* 9540 * Get memory allocation policy info for specified address in given segment 9541 */ 9542 static lgrp_mem_policy_info_t * 9543 segvn_getpolicy(struct seg *seg, caddr_t addr) 9544 { 9545 struct anon_map *amp; 9546 ulong_t anon_index; 9547 lgrp_mem_policy_info_t *policy_info; 9548 struct segvn_data *svn_data; 9549 u_offset_t vn_off; 9550 vnode_t *vp; 9551 9552 ASSERT(seg != NULL); 9553 9554 svn_data = (struct segvn_data *)seg->s_data; 9555 if (svn_data == NULL) 9556 return (NULL); 9557 9558 /* 9559 * Get policy info for private or shared memory 9560 */ 9561 if (svn_data->type != MAP_SHARED) { 9562 if (svn_data->tr_state != SEGVN_TR_ON) { 9563 policy_info = &svn_data->policy_info; 9564 } else { 9565 policy_info = &svn_data->tr_policy_info; 9566 ASSERT(policy_info->mem_policy == 9567 LGRP_MEM_POLICY_NEXT_SEG); 9568 } 9569 } else { 9570 amp = svn_data->amp; 9571 anon_index = svn_data->anon_index + seg_page(seg, addr); 9572 vp = svn_data->vp; 9573 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base); 9574 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off); 9575 } 9576 9577 return (policy_info); 9578 } 9579 9580 /*ARGSUSED*/ 9581 static int 9582 segvn_capable(struct seg *seg, segcapability_t capability) 9583 { 9584 return (0); 9585 } 9586 9587 /* 9588 * Bind text vnode segment to an amp. If we bind successfully mappings will be 9589 * established to per vnode mapping per lgroup amp pages instead of to vnode 9590 * pages. There's one amp per vnode text mapping per lgroup. Many processes 9591 * may share the same text replication amp. If a suitable amp doesn't already 9592 * exist in svntr hash table create a new one. We may fail to bind to amp if 9593 * segment is not eligible for text replication. Code below first checks for 9594 * these conditions. If binding is successful segment tr_state is set to on 9595 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and 9596 * svd->amp remains as NULL. 9597 */ 9598 static void 9599 segvn_textrepl(struct seg *seg) 9600 { 9601 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9602 vnode_t *vp = svd->vp; 9603 u_offset_t off = svd->offset; 9604 size_t size = seg->s_size; 9605 u_offset_t eoff = off + size; 9606 uint_t szc = seg->s_szc; 9607 ulong_t hash = SVNTR_HASH_FUNC(vp); 9608 svntr_t *svntrp; 9609 struct vattr va; 9610 proc_t *p = seg->s_as->a_proc; 9611 lgrp_id_t lgrp_id; 9612 lgrp_id_t olid; 9613 int first; 9614 struct anon_map *amp; 9615 9616 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9617 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 9618 ASSERT(p != NULL); 9619 ASSERT(svd->tr_state == SEGVN_TR_INIT); 9620 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9621 ASSERT(svd->flags & MAP_TEXT); 9622 ASSERT(svd->type == MAP_PRIVATE); 9623 ASSERT(vp != NULL && svd->amp == NULL); 9624 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 9625 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0); 9626 ASSERT(seg->s_as != &kas); 9627 ASSERT(off < eoff); 9628 ASSERT(svntr_hashtab != NULL); 9629 9630 /* 9631 * If numa optimizations are no longer desired bail out. 9632 */ 9633 if (!lgrp_optimizations()) { 9634 svd->tr_state = SEGVN_TR_OFF; 9635 return; 9636 } 9637 9638 /* 9639 * Avoid creating anon maps with size bigger than the file size. 9640 * If VOP_GETATTR() call fails bail out. 9641 */ 9642 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME; 9643 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) { 9644 svd->tr_state = SEGVN_TR_OFF; 9645 SEGVN_TR_ADDSTAT(gaerr); 9646 return; 9647 } 9648 if (btopr(va.va_size) < btopr(eoff)) { 9649 svd->tr_state = SEGVN_TR_OFF; 9650 SEGVN_TR_ADDSTAT(overmap); 9651 return; 9652 } 9653 9654 /* 9655 * VVMEXEC may not be set yet if exec() prefaults text segment. Set 9656 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED 9657 * mapping that checks if trcache for this vnode needs to be 9658 * invalidated can't miss us. 9659 */ 9660 if (!(vp->v_flag & VVMEXEC)) { 9661 mutex_enter(&vp->v_lock); 9662 vp->v_flag |= VVMEXEC; 9663 mutex_exit(&vp->v_lock); 9664 } 9665 mutex_enter(&svntr_hashtab[hash].tr_lock); 9666 /* 9667 * Bail out if potentially MAP_SHARED writable mappings exist to this 9668 * vnode. We don't want to use old file contents from existing 9669 * replicas if this mapping was established after the original file 9670 * was changed. 9671 */ 9672 if (vn_is_mapped(vp, V_WRITE)) { 9673 mutex_exit(&svntr_hashtab[hash].tr_lock); 9674 svd->tr_state = SEGVN_TR_OFF; 9675 SEGVN_TR_ADDSTAT(wrcnt); 9676 return; 9677 } 9678 svntrp = svntr_hashtab[hash].tr_head; 9679 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9680 ASSERT(svntrp->tr_refcnt != 0); 9681 if (svntrp->tr_vp != vp) { 9682 continue; 9683 } 9684 9685 /* 9686 * Bail out if the file or its attributes were changed after 9687 * this replication entry was created since we need to use the 9688 * latest file contents. Note that mtime test alone is not 9689 * sufficient because a user can explicitly change mtime via 9690 * utimes(2) interfaces back to the old value after modifiying 9691 * the file contents. To detect this case we also have to test 9692 * ctime which among other things records the time of the last 9693 * mtime change by utimes(2). ctime is not changed when the file 9694 * is only read or executed so we expect that typically existing 9695 * replication amp's can be used most of the time. 9696 */ 9697 if (!svntrp->tr_valid || 9698 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec || 9699 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec || 9700 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec || 9701 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) { 9702 mutex_exit(&svntr_hashtab[hash].tr_lock); 9703 svd->tr_state = SEGVN_TR_OFF; 9704 SEGVN_TR_ADDSTAT(stale); 9705 return; 9706 } 9707 /* 9708 * if off, eoff and szc match current segment we found the 9709 * existing entry we can use. 9710 */ 9711 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff && 9712 svntrp->tr_szc == szc) { 9713 break; 9714 } 9715 /* 9716 * Don't create different but overlapping in file offsets 9717 * entries to avoid replication of the same file pages more 9718 * than once per lgroup. 9719 */ 9720 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) || 9721 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) { 9722 mutex_exit(&svntr_hashtab[hash].tr_lock); 9723 svd->tr_state = SEGVN_TR_OFF; 9724 SEGVN_TR_ADDSTAT(overlap); 9725 return; 9726 } 9727 } 9728 /* 9729 * If we didn't find existing entry create a new one. 9730 */ 9731 if (svntrp == NULL) { 9732 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP); 9733 if (svntrp == NULL) { 9734 mutex_exit(&svntr_hashtab[hash].tr_lock); 9735 svd->tr_state = SEGVN_TR_OFF; 9736 SEGVN_TR_ADDSTAT(nokmem); 9737 return; 9738 } 9739 #ifdef DEBUG 9740 { 9741 lgrp_id_t i; 9742 for (i = 0; i < NLGRPS_MAX; i++) { 9743 ASSERT(svntrp->tr_amp[i] == NULL); 9744 } 9745 } 9746 #endif /* DEBUG */ 9747 svntrp->tr_vp = vp; 9748 svntrp->tr_off = off; 9749 svntrp->tr_eoff = eoff; 9750 svntrp->tr_szc = szc; 9751 svntrp->tr_valid = 1; 9752 svntrp->tr_mtime = va.va_mtime; 9753 svntrp->tr_ctime = va.va_ctime; 9754 svntrp->tr_refcnt = 0; 9755 svntrp->tr_next = svntr_hashtab[hash].tr_head; 9756 svntr_hashtab[hash].tr_head = svntrp; 9757 } 9758 first = 1; 9759 again: 9760 /* 9761 * We want to pick a replica with pages on main thread's (t_tid = 1, 9762 * aka T1) lgrp. Currently text replication is only optimized for 9763 * workloads that either have all threads of a process on the same 9764 * lgrp or execute their large text primarily on main thread. 9765 */ 9766 lgrp_id = p->p_t1_lgrpid; 9767 if (lgrp_id == LGRP_NONE) { 9768 /* 9769 * In case exec() prefaults text on non main thread use 9770 * current thread lgrpid. It will become main thread anyway 9771 * soon. 9772 */ 9773 lgrp_id = lgrp_home_id(curthread); 9774 } 9775 /* 9776 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise 9777 * just set it to NLGRPS_MAX if it's different from current process T1 9778 * home lgrp. p_tr_lgrpid is used to detect if process uses text 9779 * replication and T1 new home is different from lgrp used for text 9780 * replication. When this happens asyncronous segvn thread rechecks if 9781 * segments should change lgrps used for text replication. If we fail 9782 * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX 9783 * without cas if it's not already NLGRPS_MAX and not equal lgrp_id 9784 * we want to use. We don't need to use cas in this case because 9785 * another thread that races in between our non atomic check and set 9786 * may only change p_tr_lgrpid to NLGRPS_MAX at this point. 9787 */ 9788 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9789 olid = p->p_tr_lgrpid; 9790 if (lgrp_id != olid && olid != NLGRPS_MAX) { 9791 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX; 9792 if (atomic_cas_32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) != 9793 olid) { 9794 olid = p->p_tr_lgrpid; 9795 ASSERT(olid != LGRP_NONE); 9796 if (olid != lgrp_id && olid != NLGRPS_MAX) { 9797 p->p_tr_lgrpid = NLGRPS_MAX; 9798 } 9799 } 9800 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 9801 membar_producer(); 9802 /* 9803 * lgrp_move_thread() won't schedule async recheck after 9804 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not 9805 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid 9806 * is not LGRP_NONE. 9807 */ 9808 if (first && p->p_t1_lgrpid != LGRP_NONE && 9809 p->p_t1_lgrpid != lgrp_id) { 9810 first = 0; 9811 goto again; 9812 } 9813 } 9814 /* 9815 * If no amp was created yet for lgrp_id create a new one as long as 9816 * we have enough memory to afford it. 9817 */ 9818 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) { 9819 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 9820 if (trmem > segvn_textrepl_max_bytes) { 9821 SEGVN_TR_ADDSTAT(normem); 9822 goto fail; 9823 } 9824 if (anon_try_resv_zone(size, NULL) == 0) { 9825 SEGVN_TR_ADDSTAT(noanon); 9826 goto fail; 9827 } 9828 amp = anonmap_alloc(size, size, ANON_NOSLEEP); 9829 if (amp == NULL) { 9830 anon_unresv_zone(size, NULL); 9831 SEGVN_TR_ADDSTAT(nokmem); 9832 goto fail; 9833 } 9834 ASSERT(amp->refcnt == 1); 9835 amp->a_szc = szc; 9836 svntrp->tr_amp[lgrp_id] = amp; 9837 SEGVN_TR_ADDSTAT(newamp); 9838 } 9839 svntrp->tr_refcnt++; 9840 ASSERT(svd->svn_trnext == NULL); 9841 ASSERT(svd->svn_trprev == NULL); 9842 svd->svn_trnext = svntrp->tr_svnhead; 9843 svd->svn_trprev = NULL; 9844 if (svntrp->tr_svnhead != NULL) { 9845 svntrp->tr_svnhead->svn_trprev = svd; 9846 } 9847 svntrp->tr_svnhead = svd; 9848 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size); 9849 ASSERT(amp->refcnt >= 1); 9850 svd->amp = amp; 9851 svd->anon_index = 0; 9852 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG; 9853 svd->tr_policy_info.mem_lgrpid = lgrp_id; 9854 svd->tr_state = SEGVN_TR_ON; 9855 mutex_exit(&svntr_hashtab[hash].tr_lock); 9856 SEGVN_TR_ADDSTAT(repl); 9857 return; 9858 fail: 9859 ASSERT(segvn_textrepl_bytes >= size); 9860 atomic_add_long(&segvn_textrepl_bytes, -size); 9861 ASSERT(svntrp != NULL); 9862 ASSERT(svntrp->tr_amp[lgrp_id] == NULL); 9863 if (svntrp->tr_refcnt == 0) { 9864 ASSERT(svntrp == svntr_hashtab[hash].tr_head); 9865 svntr_hashtab[hash].tr_head = svntrp->tr_next; 9866 mutex_exit(&svntr_hashtab[hash].tr_lock); 9867 kmem_cache_free(svntr_cache, svntrp); 9868 } else { 9869 mutex_exit(&svntr_hashtab[hash].tr_lock); 9870 } 9871 svd->tr_state = SEGVN_TR_OFF; 9872 } 9873 9874 /* 9875 * Convert seg back to regular vnode mapping seg by unbinding it from its text 9876 * replication amp. This routine is most typically called when segment is 9877 * unmapped but can also be called when segment no longer qualifies for text 9878 * replication (e.g. due to protection changes). If unload_unmap is set use 9879 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of 9880 * svntr free all its anon maps and remove it from the hash table. 9881 */ 9882 static void 9883 segvn_textunrepl(struct seg *seg, int unload_unmap) 9884 { 9885 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9886 vnode_t *vp = svd->vp; 9887 u_offset_t off = svd->offset; 9888 size_t size = seg->s_size; 9889 u_offset_t eoff = off + size; 9890 uint_t szc = seg->s_szc; 9891 ulong_t hash = SVNTR_HASH_FUNC(vp); 9892 svntr_t *svntrp; 9893 svntr_t **prv_svntrp; 9894 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid; 9895 lgrp_id_t i; 9896 9897 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9898 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 9899 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 9900 ASSERT(svd->tr_state == SEGVN_TR_ON); 9901 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9902 ASSERT(svd->amp != NULL); 9903 ASSERT(svd->amp->refcnt >= 1); 9904 ASSERT(svd->anon_index == 0); 9905 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9906 ASSERT(svntr_hashtab != NULL); 9907 9908 mutex_enter(&svntr_hashtab[hash].tr_lock); 9909 prv_svntrp = &svntr_hashtab[hash].tr_head; 9910 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) { 9911 ASSERT(svntrp->tr_refcnt != 0); 9912 if (svntrp->tr_vp == vp && svntrp->tr_off == off && 9913 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) { 9914 break; 9915 } 9916 } 9917 if (svntrp == NULL) { 9918 panic("segvn_textunrepl: svntr record not found"); 9919 } 9920 if (svntrp->tr_amp[lgrp_id] != svd->amp) { 9921 panic("segvn_textunrepl: amp mismatch"); 9922 } 9923 svd->tr_state = SEGVN_TR_OFF; 9924 svd->amp = NULL; 9925 if (svd->svn_trprev == NULL) { 9926 ASSERT(svntrp->tr_svnhead == svd); 9927 svntrp->tr_svnhead = svd->svn_trnext; 9928 if (svntrp->tr_svnhead != NULL) { 9929 svntrp->tr_svnhead->svn_trprev = NULL; 9930 } 9931 svd->svn_trnext = NULL; 9932 } else { 9933 svd->svn_trprev->svn_trnext = svd->svn_trnext; 9934 if (svd->svn_trnext != NULL) { 9935 svd->svn_trnext->svn_trprev = svd->svn_trprev; 9936 svd->svn_trnext = NULL; 9937 } 9938 svd->svn_trprev = NULL; 9939 } 9940 if (--svntrp->tr_refcnt) { 9941 mutex_exit(&svntr_hashtab[hash].tr_lock); 9942 goto done; 9943 } 9944 *prv_svntrp = svntrp->tr_next; 9945 mutex_exit(&svntr_hashtab[hash].tr_lock); 9946 for (i = 0; i < NLGRPS_MAX; i++) { 9947 struct anon_map *amp = svntrp->tr_amp[i]; 9948 if (amp == NULL) { 9949 continue; 9950 } 9951 ASSERT(amp->refcnt == 1); 9952 ASSERT(amp->swresv == size); 9953 ASSERT(amp->size == size); 9954 ASSERT(amp->a_szc == szc); 9955 if (amp->a_szc != 0) { 9956 anon_free_pages(amp->ahp, 0, size, szc); 9957 } else { 9958 anon_free(amp->ahp, 0, size); 9959 } 9960 svntrp->tr_amp[i] = NULL; 9961 ASSERT(segvn_textrepl_bytes >= size); 9962 atomic_add_long(&segvn_textrepl_bytes, -size); 9963 anon_unresv_zone(amp->swresv, NULL); 9964 amp->refcnt = 0; 9965 anonmap_free(amp); 9966 } 9967 kmem_cache_free(svntr_cache, svntrp); 9968 done: 9969 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size, 9970 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL); 9971 } 9972 9973 /* 9974 * This is called when a MAP_SHARED writable mapping is created to a vnode 9975 * that is currently used for execution (VVMEXEC flag is set). In this case we 9976 * need to prevent further use of existing replicas. 9977 */ 9978 static void 9979 segvn_inval_trcache(vnode_t *vp) 9980 { 9981 ulong_t hash = SVNTR_HASH_FUNC(vp); 9982 svntr_t *svntrp; 9983 9984 ASSERT(vp->v_flag & VVMEXEC); 9985 9986 if (svntr_hashtab == NULL) { 9987 return; 9988 } 9989 9990 mutex_enter(&svntr_hashtab[hash].tr_lock); 9991 svntrp = svntr_hashtab[hash].tr_head; 9992 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9993 ASSERT(svntrp->tr_refcnt != 0); 9994 if (svntrp->tr_vp == vp && svntrp->tr_valid) { 9995 svntrp->tr_valid = 0; 9996 } 9997 } 9998 mutex_exit(&svntr_hashtab[hash].tr_lock); 9999 } 10000 10001 static void 10002 segvn_trasync_thread(void) 10003 { 10004 callb_cpr_t cpr_info; 10005 kmutex_t cpr_lock; /* just for CPR stuff */ 10006 10007 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL); 10008 10009 CALLB_CPR_INIT(&cpr_info, &cpr_lock, 10010 callb_generic_cpr, "segvn_async"); 10011 10012 if (segvn_update_textrepl_interval == 0) { 10013 segvn_update_textrepl_interval = segvn_update_tr_time * hz; 10014 } else { 10015 segvn_update_textrepl_interval *= hz; 10016 } 10017 (void) timeout(segvn_trupdate_wakeup, NULL, 10018 segvn_update_textrepl_interval); 10019 10020 for (;;) { 10021 mutex_enter(&cpr_lock); 10022 CALLB_CPR_SAFE_BEGIN(&cpr_info); 10023 mutex_exit(&cpr_lock); 10024 sema_p(&segvn_trasync_sem); 10025 mutex_enter(&cpr_lock); 10026 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 10027 mutex_exit(&cpr_lock); 10028 segvn_trupdate(); 10029 } 10030 } 10031 10032 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0; 10033 10034 static void 10035 segvn_trupdate_wakeup(void *dummy) 10036 { 10037 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations(); 10038 10039 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) { 10040 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs; 10041 sema_v(&segvn_trasync_sem); 10042 } 10043 10044 if (!segvn_disable_textrepl_update && 10045 segvn_update_textrepl_interval != 0) { 10046 (void) timeout(segvn_trupdate_wakeup, dummy, 10047 segvn_update_textrepl_interval); 10048 } 10049 } 10050 10051 static void 10052 segvn_trupdate(void) 10053 { 10054 ulong_t hash; 10055 svntr_t *svntrp; 10056 segvn_data_t *svd; 10057 10058 ASSERT(svntr_hashtab != NULL); 10059 10060 for (hash = 0; hash < svntr_hashtab_sz; hash++) { 10061 mutex_enter(&svntr_hashtab[hash].tr_lock); 10062 svntrp = svntr_hashtab[hash].tr_head; 10063 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 10064 ASSERT(svntrp->tr_refcnt != 0); 10065 svd = svntrp->tr_svnhead; 10066 for (; svd != NULL; svd = svd->svn_trnext) { 10067 segvn_trupdate_seg(svd->seg, svd, svntrp, 10068 hash); 10069 } 10070 } 10071 mutex_exit(&svntr_hashtab[hash].tr_lock); 10072 } 10073 } 10074 10075 static void 10076 segvn_trupdate_seg(struct seg *seg, 10077 segvn_data_t *svd, 10078 svntr_t *svntrp, 10079 ulong_t hash) 10080 { 10081 proc_t *p; 10082 lgrp_id_t lgrp_id; 10083 struct as *as; 10084 size_t size; 10085 struct anon_map *amp; 10086 10087 ASSERT(svd->vp != NULL); 10088 ASSERT(svd->vp == svntrp->tr_vp); 10089 ASSERT(svd->offset == svntrp->tr_off); 10090 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff); 10091 ASSERT(seg != NULL); 10092 ASSERT(svd->seg == seg); 10093 ASSERT(seg->s_data == (void *)svd); 10094 ASSERT(seg->s_szc == svntrp->tr_szc); 10095 ASSERT(svd->tr_state == SEGVN_TR_ON); 10096 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 10097 ASSERT(svd->amp != NULL); 10098 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 10099 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE); 10100 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX); 10101 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp); 10102 ASSERT(svntrp->tr_refcnt != 0); 10103 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock)); 10104 10105 as = seg->s_as; 10106 ASSERT(as != NULL && as != &kas); 10107 p = as->a_proc; 10108 ASSERT(p != NULL); 10109 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 10110 lgrp_id = p->p_t1_lgrpid; 10111 if (lgrp_id == LGRP_NONE) { 10112 return; 10113 } 10114 ASSERT(lgrp_id < NLGRPS_MAX); 10115 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) { 10116 return; 10117 } 10118 10119 /* 10120 * Use tryenter locking since we are locking as/seg and svntr hash 10121 * lock in reverse from syncrounous thread order. 10122 */ 10123 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) { 10124 SEGVN_TR_ADDSTAT(nolock); 10125 if (segvn_lgrp_trthr_migrs_snpsht) { 10126 segvn_lgrp_trthr_migrs_snpsht = 0; 10127 } 10128 return; 10129 } 10130 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) { 10131 AS_LOCK_EXIT(as, &as->a_lock); 10132 SEGVN_TR_ADDSTAT(nolock); 10133 if (segvn_lgrp_trthr_migrs_snpsht) { 10134 segvn_lgrp_trthr_migrs_snpsht = 0; 10135 } 10136 return; 10137 } 10138 size = seg->s_size; 10139 if (svntrp->tr_amp[lgrp_id] == NULL) { 10140 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 10141 if (trmem > segvn_textrepl_max_bytes) { 10142 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10143 AS_LOCK_EXIT(as, &as->a_lock); 10144 atomic_add_long(&segvn_textrepl_bytes, -size); 10145 SEGVN_TR_ADDSTAT(normem); 10146 return; 10147 } 10148 if (anon_try_resv_zone(size, NULL) == 0) { 10149 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10150 AS_LOCK_EXIT(as, &as->a_lock); 10151 atomic_add_long(&segvn_textrepl_bytes, -size); 10152 SEGVN_TR_ADDSTAT(noanon); 10153 return; 10154 } 10155 amp = anonmap_alloc(size, size, KM_NOSLEEP); 10156 if (amp == NULL) { 10157 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10158 AS_LOCK_EXIT(as, &as->a_lock); 10159 atomic_add_long(&segvn_textrepl_bytes, -size); 10160 anon_unresv_zone(size, NULL); 10161 SEGVN_TR_ADDSTAT(nokmem); 10162 return; 10163 } 10164 ASSERT(amp->refcnt == 1); 10165 amp->a_szc = seg->s_szc; 10166 svntrp->tr_amp[lgrp_id] = amp; 10167 } 10168 /* 10169 * We don't need to drop the bucket lock but here we give other 10170 * threads a chance. svntr and svd can't be unlinked as long as 10171 * segment lock is held as a writer and AS held as well. After we 10172 * retake bucket lock we'll continue from where we left. We'll be able 10173 * to reach the end of either list since new entries are always added 10174 * to the beginning of the lists. 10175 */ 10176 mutex_exit(&svntr_hashtab[hash].tr_lock); 10177 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL); 10178 mutex_enter(&svntr_hashtab[hash].tr_lock); 10179 10180 ASSERT(svd->tr_state == SEGVN_TR_ON); 10181 ASSERT(svd->amp != NULL); 10182 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 10183 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id); 10184 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]); 10185 10186 svd->tr_policy_info.mem_lgrpid = lgrp_id; 10187 svd->amp = svntrp->tr_amp[lgrp_id]; 10188 p->p_tr_lgrpid = NLGRPS_MAX; 10189 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10190 AS_LOCK_EXIT(as, &as->a_lock); 10191 10192 ASSERT(svntrp->tr_refcnt != 0); 10193 ASSERT(svd->vp == svntrp->tr_vp); 10194 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id); 10195 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]); 10196 ASSERT(svd->seg == seg); 10197 ASSERT(svd->tr_state == SEGVN_TR_ON); 10198 10199 SEGVN_TR_ADDSTAT(asyncrepl); 10200 } 10201