17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 53cff2f43Sstans * Common Development and Distribution License (the "License"). 63cff2f43Sstans * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 2211494be0SStan Studzinski * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved. 23e7c874afSJosef 'Jeff' Sipek * Copyright (c) 2015, Josef 'Jeff' Sipek <jeffpc@josefsipek.net> 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 277c478bd9Sstevel@tonic-gate /* All Rights Reserved */ 287c478bd9Sstevel@tonic-gate 297c478bd9Sstevel@tonic-gate /* 307c478bd9Sstevel@tonic-gate * University Copyright- Copyright (c) 1982, 1986, 1988 317c478bd9Sstevel@tonic-gate * The Regents of the University of California 327c478bd9Sstevel@tonic-gate * All Rights Reserved 337c478bd9Sstevel@tonic-gate * 347c478bd9Sstevel@tonic-gate * University Acknowledgment- Portions of this document are derived from 357c478bd9Sstevel@tonic-gate * software developed by the University of California, Berkeley, and its 367c478bd9Sstevel@tonic-gate * contributors. 377c478bd9Sstevel@tonic-gate */ 387c478bd9Sstevel@tonic-gate 397c478bd9Sstevel@tonic-gate /* 407c478bd9Sstevel@tonic-gate * VM - physical page management. 417c478bd9Sstevel@tonic-gate */ 427c478bd9Sstevel@tonic-gate 437c478bd9Sstevel@tonic-gate #include <sys/types.h> 447c478bd9Sstevel@tonic-gate #include <sys/t_lock.h> 457c478bd9Sstevel@tonic-gate #include <sys/param.h> 467c478bd9Sstevel@tonic-gate #include <sys/systm.h> 477c478bd9Sstevel@tonic-gate #include <sys/errno.h> 487c478bd9Sstevel@tonic-gate #include <sys/time.h> 497c478bd9Sstevel@tonic-gate #include <sys/vnode.h> 507c478bd9Sstevel@tonic-gate #include <sys/vm.h> 517c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 527c478bd9Sstevel@tonic-gate #include <sys/swap.h> 537c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 547c478bd9Sstevel@tonic-gate #include <sys/tuneable.h> 557c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 567c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 577c478bd9Sstevel@tonic-gate #include <sys/callb.h> 587c478bd9Sstevel@tonic-gate #include <sys/debug.h> 597c478bd9Sstevel@tonic-gate #include <sys/tnf_probe.h> 607c478bd9Sstevel@tonic-gate #include <sys/condvar_impl.h> 617c478bd9Sstevel@tonic-gate #include <sys/mem_config.h> 627c478bd9Sstevel@tonic-gate #include <sys/mem_cage.h> 637c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 647c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 657c478bd9Sstevel@tonic-gate #include <sys/strlog.h> 667c478bd9Sstevel@tonic-gate #include <sys/mman.h> 677c478bd9Sstevel@tonic-gate #include <sys/ontrap.h> 687c478bd9Sstevel@tonic-gate #include <sys/lgrp.h> 697c478bd9Sstevel@tonic-gate #include <sys/vfs.h> 707c478bd9Sstevel@tonic-gate 717c478bd9Sstevel@tonic-gate #include <vm/hat.h> 727c478bd9Sstevel@tonic-gate #include <vm/anon.h> 737c478bd9Sstevel@tonic-gate #include <vm/page.h> 747c478bd9Sstevel@tonic-gate #include <vm/seg.h> 757c478bd9Sstevel@tonic-gate #include <vm/pvn.h> 767c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 777c478bd9Sstevel@tonic-gate #include <vm/vm_dep.h> 780209230bSgjelinek #include <sys/vm_usage.h> 797c478bd9Sstevel@tonic-gate #include <fs/fs_subr.h> 80cee1d74bSjfrank #include <sys/ddi.h> 81cee1d74bSjfrank #include <sys/modctl.h> 827c478bd9Sstevel@tonic-gate 837c478bd9Sstevel@tonic-gate static pgcnt_t max_page_get; /* max page_get request size in pages */ 847c478bd9Sstevel@tonic-gate pgcnt_t total_pages = 0; /* total number of pages (used by /proc) */ 857c478bd9Sstevel@tonic-gate 867c478bd9Sstevel@tonic-gate /* 877c478bd9Sstevel@tonic-gate * freemem_lock protects all freemem variables: 887c478bd9Sstevel@tonic-gate * availrmem. Also this lock protects the globals which track the 897c478bd9Sstevel@tonic-gate * availrmem changes for accurate kernel footprint calculation. 907c478bd9Sstevel@tonic-gate * See below for an explanation of these 917c478bd9Sstevel@tonic-gate * globals. 927c478bd9Sstevel@tonic-gate */ 937c478bd9Sstevel@tonic-gate kmutex_t freemem_lock; 947c478bd9Sstevel@tonic-gate pgcnt_t availrmem; 957c478bd9Sstevel@tonic-gate pgcnt_t availrmem_initial; 967c478bd9Sstevel@tonic-gate 977c478bd9Sstevel@tonic-gate /* 987c478bd9Sstevel@tonic-gate * These globals track availrmem changes to get a more accurate 997c478bd9Sstevel@tonic-gate * estimate of tke kernel size. Historically pp_kernel is used for 1007c478bd9Sstevel@tonic-gate * kernel size and is based on availrmem. But availrmem is adjusted for 1017c478bd9Sstevel@tonic-gate * locked pages in the system not just for kernel locked pages. 1027c478bd9Sstevel@tonic-gate * These new counters will track the pages locked through segvn and 1037c478bd9Sstevel@tonic-gate * by explicit user locking. 1047c478bd9Sstevel@tonic-gate * 105da6c28aaSamw * pages_locked : How many pages are locked because of user specified 1067c478bd9Sstevel@tonic-gate * locking through mlock or plock. 1077c478bd9Sstevel@tonic-gate * 1087c478bd9Sstevel@tonic-gate * pages_useclaim,pages_claimed : These two variables track the 109da6c28aaSamw * claim adjustments because of the protection changes on a segvn segment. 1107c478bd9Sstevel@tonic-gate * 1117c478bd9Sstevel@tonic-gate * All these globals are protected by the same lock which protects availrmem. 1127c478bd9Sstevel@tonic-gate */ 113a98e9dbfSaguzovsk pgcnt_t pages_locked = 0; 114a98e9dbfSaguzovsk pgcnt_t pages_useclaim = 0; 115a98e9dbfSaguzovsk pgcnt_t pages_claimed = 0; 1167c478bd9Sstevel@tonic-gate 1177c478bd9Sstevel@tonic-gate 1187c478bd9Sstevel@tonic-gate /* 1197c478bd9Sstevel@tonic-gate * new_freemem_lock protects freemem, freemem_wait & freemem_cv. 1207c478bd9Sstevel@tonic-gate */ 1217c478bd9Sstevel@tonic-gate static kmutex_t new_freemem_lock; 1227c478bd9Sstevel@tonic-gate static uint_t freemem_wait; /* someone waiting for freemem */ 1237c478bd9Sstevel@tonic-gate static kcondvar_t freemem_cv; 1247c478bd9Sstevel@tonic-gate 1257c478bd9Sstevel@tonic-gate /* 1267c478bd9Sstevel@tonic-gate * The logical page free list is maintained as two lists, the 'free' 1277c478bd9Sstevel@tonic-gate * and the 'cache' lists. 1287c478bd9Sstevel@tonic-gate * The free list contains those pages that should be reused first. 1297c478bd9Sstevel@tonic-gate * 1307c478bd9Sstevel@tonic-gate * The implementation of the lists is machine dependent. 131d94ffb28Sjmcp * page_get_freelist(), page_get_cachelist(), 1327c478bd9Sstevel@tonic-gate * page_list_sub(), and page_list_add() 1337c478bd9Sstevel@tonic-gate * form the interface to the machine dependent implementation. 1347c478bd9Sstevel@tonic-gate * 1357c478bd9Sstevel@tonic-gate * Pages with p_free set are on the cache list. 1367c478bd9Sstevel@tonic-gate * Pages with p_free and p_age set are on the free list, 1377c478bd9Sstevel@tonic-gate * 1387c478bd9Sstevel@tonic-gate * A page may be locked while on either list. 1397c478bd9Sstevel@tonic-gate */ 1407c478bd9Sstevel@tonic-gate 1417c478bd9Sstevel@tonic-gate /* 1427c478bd9Sstevel@tonic-gate * free list accounting stuff. 1437c478bd9Sstevel@tonic-gate * 1447c478bd9Sstevel@tonic-gate * 1457c478bd9Sstevel@tonic-gate * Spread out the value for the number of pages on the 1467c478bd9Sstevel@tonic-gate * page free and page cache lists. If there is just one 1477c478bd9Sstevel@tonic-gate * value, then it must be under just one lock. 1487c478bd9Sstevel@tonic-gate * The lock contention and cache traffic are a real bother. 1497c478bd9Sstevel@tonic-gate * 1507c478bd9Sstevel@tonic-gate * When we acquire and then drop a single pcf lock 1517c478bd9Sstevel@tonic-gate * we can start in the middle of the array of pcf structures. 1527c478bd9Sstevel@tonic-gate * If we acquire more than one pcf lock at a time, we need to 1537c478bd9Sstevel@tonic-gate * start at the front to avoid deadlocking. 1547c478bd9Sstevel@tonic-gate * 1557c478bd9Sstevel@tonic-gate * pcf_count holds the number of pages in each pool. 1567c478bd9Sstevel@tonic-gate * 1577c478bd9Sstevel@tonic-gate * pcf_block is set when page_create_get_something() has asked the 1587c478bd9Sstevel@tonic-gate * PSM page freelist and page cachelist routines without specifying 1597c478bd9Sstevel@tonic-gate * a color and nothing came back. This is used to block anything 1607c478bd9Sstevel@tonic-gate * else from moving pages from one list to the other while the 1617c478bd9Sstevel@tonic-gate * lists are searched again. If a page is freeed while pcf_block is 1627c478bd9Sstevel@tonic-gate * set, then pcf_reserve is incremented. pcgs_unblock() takes care 1637c478bd9Sstevel@tonic-gate * of clearning pcf_block, doing the wakeups, etc. 1647c478bd9Sstevel@tonic-gate */ 1657c478bd9Sstevel@tonic-gate 16606fb6a36Sdv142724 #define MAX_PCF_FANOUT NCPU 16706fb6a36Sdv142724 static uint_t pcf_fanout = 1; /* Will get changed at boot time */ 16806fb6a36Sdv142724 static uint_t pcf_fanout_mask = 0; 1697c478bd9Sstevel@tonic-gate 1707c478bd9Sstevel@tonic-gate struct pcf { 1717c478bd9Sstevel@tonic-gate kmutex_t pcf_lock; /* protects the structure */ 172f2b37d75Sfr157268 uint_t pcf_count; /* page count */ 1737c478bd9Sstevel@tonic-gate uint_t pcf_wait; /* number of waiters */ 1747c478bd9Sstevel@tonic-gate uint_t pcf_block; /* pcgs flag to page_free() */ 1757c478bd9Sstevel@tonic-gate uint_t pcf_reserve; /* pages freed after pcf_block set */ 17606fb6a36Sdv142724 uint_t pcf_fill[10]; /* to line up on the caches */ 1777c478bd9Sstevel@tonic-gate }; 1787c478bd9Sstevel@tonic-gate 17906fb6a36Sdv142724 /* 18006fb6a36Sdv142724 * PCF_INDEX hash needs to be dynamic (every so often the hash changes where 18106fb6a36Sdv142724 * it will hash the cpu to). This is done to prevent a drain condition 18206fb6a36Sdv142724 * from happening. This drain condition will occur when pcf_count decrement 18306fb6a36Sdv142724 * occurs on cpu A and the increment of pcf_count always occurs on cpu B. An 18406fb6a36Sdv142724 * example of this shows up with device interrupts. The dma buffer is allocated 18506fb6a36Sdv142724 * by the cpu requesting the IO thus the pcf_count is decremented based on that. 18606fb6a36Sdv142724 * When the memory is returned by the interrupt thread, the pcf_count will be 18706fb6a36Sdv142724 * incremented based on the cpu servicing the interrupt. 18806fb6a36Sdv142724 */ 18906fb6a36Sdv142724 static struct pcf pcf[MAX_PCF_FANOUT]; 19006fb6a36Sdv142724 #define PCF_INDEX() ((int)(((long)CPU->cpu_seqid) + \ 19106fb6a36Sdv142724 (randtick() >> 24)) & (pcf_fanout_mask)) 19206fb6a36Sdv142724 19306fb6a36Sdv142724 static int pcf_decrement_bucket(pgcnt_t); 19406fb6a36Sdv142724 static int pcf_decrement_multiple(pgcnt_t *, pgcnt_t, int); 1957c478bd9Sstevel@tonic-gate 1967c478bd9Sstevel@tonic-gate kmutex_t pcgs_lock; /* serializes page_create_get_ */ 1977c478bd9Sstevel@tonic-gate kmutex_t pcgs_cagelock; /* serializes NOSLEEP cage allocs */ 1987c478bd9Sstevel@tonic-gate kmutex_t pcgs_wait_lock; /* used for delay in pcgs */ 1997c478bd9Sstevel@tonic-gate static kcondvar_t pcgs_cv; /* cv for delay in pcgs */ 2007c478bd9Sstevel@tonic-gate 2017c478bd9Sstevel@tonic-gate #ifdef VM_STATS 2027c478bd9Sstevel@tonic-gate 2037c478bd9Sstevel@tonic-gate /* 2047c478bd9Sstevel@tonic-gate * No locks, but so what, they are only statistics. 2057c478bd9Sstevel@tonic-gate */ 2067c478bd9Sstevel@tonic-gate 2077c478bd9Sstevel@tonic-gate static struct page_tcnt { 2087c478bd9Sstevel@tonic-gate int pc_free_cache; /* free's into cache list */ 2097c478bd9Sstevel@tonic-gate int pc_free_dontneed; /* free's with dontneed */ 2107c478bd9Sstevel@tonic-gate int pc_free_pageout; /* free's from pageout */ 2117c478bd9Sstevel@tonic-gate int pc_free_free; /* free's into free list */ 2127c478bd9Sstevel@tonic-gate int pc_free_pages; /* free's into large page free list */ 2137c478bd9Sstevel@tonic-gate int pc_destroy_pages; /* large page destroy's */ 2147c478bd9Sstevel@tonic-gate int pc_get_cache; /* get's from cache list */ 2157c478bd9Sstevel@tonic-gate int pc_get_free; /* get's from free list */ 2167c478bd9Sstevel@tonic-gate int pc_reclaim; /* reclaim's */ 2177c478bd9Sstevel@tonic-gate int pc_abortfree; /* abort's of free pages */ 2187c478bd9Sstevel@tonic-gate int pc_find_hit; /* find's that find page */ 2197c478bd9Sstevel@tonic-gate int pc_find_miss; /* find's that don't find page */ 2207c478bd9Sstevel@tonic-gate int pc_destroy_free; /* # of free pages destroyed */ 2217c478bd9Sstevel@tonic-gate #define PC_HASH_CNT (4*PAGE_HASHAVELEN) 2227c478bd9Sstevel@tonic-gate int pc_find_hashlen[PC_HASH_CNT+1]; 2237c478bd9Sstevel@tonic-gate int pc_addclaim_pages; 2247c478bd9Sstevel@tonic-gate int pc_subclaim_pages; 2257c478bd9Sstevel@tonic-gate int pc_free_replacement_page[2]; 2267c478bd9Sstevel@tonic-gate int pc_try_demote_pages[6]; 2277c478bd9Sstevel@tonic-gate int pc_demote_pages[2]; 2287c478bd9Sstevel@tonic-gate } pagecnt; 2297c478bd9Sstevel@tonic-gate 2307c478bd9Sstevel@tonic-gate uint_t hashin_count; 2317c478bd9Sstevel@tonic-gate uint_t hashin_not_held; 2327c478bd9Sstevel@tonic-gate uint_t hashin_already; 2337c478bd9Sstevel@tonic-gate 2347c478bd9Sstevel@tonic-gate uint_t hashout_count; 2357c478bd9Sstevel@tonic-gate uint_t hashout_not_held; 2367c478bd9Sstevel@tonic-gate 2377c478bd9Sstevel@tonic-gate uint_t page_create_count; 2387c478bd9Sstevel@tonic-gate uint_t page_create_not_enough; 2397c478bd9Sstevel@tonic-gate uint_t page_create_not_enough_again; 2407c478bd9Sstevel@tonic-gate uint_t page_create_zero; 2417c478bd9Sstevel@tonic-gate uint_t page_create_hashout; 2427c478bd9Sstevel@tonic-gate uint_t page_create_page_lock_failed; 2437c478bd9Sstevel@tonic-gate uint_t page_create_trylock_failed; 2447c478bd9Sstevel@tonic-gate uint_t page_create_found_one; 2457c478bd9Sstevel@tonic-gate uint_t page_create_hashin_failed; 2467c478bd9Sstevel@tonic-gate uint_t page_create_dropped_phm; 2477c478bd9Sstevel@tonic-gate 2487c478bd9Sstevel@tonic-gate uint_t page_create_new; 2497c478bd9Sstevel@tonic-gate uint_t page_create_exists; 2507c478bd9Sstevel@tonic-gate uint_t page_create_putbacks; 2517c478bd9Sstevel@tonic-gate uint_t page_create_overshoot; 2527c478bd9Sstevel@tonic-gate 2537c478bd9Sstevel@tonic-gate uint_t page_reclaim_zero; 2547c478bd9Sstevel@tonic-gate uint_t page_reclaim_zero_locked; 2557c478bd9Sstevel@tonic-gate 2567c478bd9Sstevel@tonic-gate uint_t page_rename_exists; 2577c478bd9Sstevel@tonic-gate uint_t page_rename_count; 2587c478bd9Sstevel@tonic-gate 2597c478bd9Sstevel@tonic-gate uint_t page_lookup_cnt[20]; 2607c478bd9Sstevel@tonic-gate uint_t page_lookup_nowait_cnt[10]; 2617c478bd9Sstevel@tonic-gate uint_t page_find_cnt; 2627c478bd9Sstevel@tonic-gate uint_t page_exists_cnt; 2637c478bd9Sstevel@tonic-gate uint_t page_exists_forreal_cnt; 2647c478bd9Sstevel@tonic-gate uint_t page_lookup_dev_cnt; 2657c478bd9Sstevel@tonic-gate uint_t get_cachelist_cnt; 2667c478bd9Sstevel@tonic-gate uint_t page_create_cnt[10]; 26778b03d3aSkchow uint_t alloc_pages[9]; 2687c478bd9Sstevel@tonic-gate uint_t page_exphcontg[19]; 2697c478bd9Sstevel@tonic-gate uint_t page_create_large_cnt[10]; 2707c478bd9Sstevel@tonic-gate 271e7c874afSJosef 'Jeff' Sipek #endif 272e7c874afSJosef 'Jeff' Sipek 273e7c874afSJosef 'Jeff' Sipek static inline page_t * 274e7c874afSJosef 'Jeff' Sipek page_hash_search(ulong_t index, vnode_t *vnode, u_offset_t off) 275e7c874afSJosef 'Jeff' Sipek { 276e7c874afSJosef 'Jeff' Sipek uint_t mylen = 0; 277e7c874afSJosef 'Jeff' Sipek page_t *page; 278e7c874afSJosef 'Jeff' Sipek 279e7c874afSJosef 'Jeff' Sipek for (page = page_hash[index]; page; page = page->p_hash, mylen++) 280e7c874afSJosef 'Jeff' Sipek if (page->p_vnode == vnode && page->p_offset == off) 281e7c874afSJosef 'Jeff' Sipek break; 282e7c874afSJosef 'Jeff' Sipek 283e7c874afSJosef 'Jeff' Sipek #ifdef VM_STATS 284e7c874afSJosef 'Jeff' Sipek if (page != NULL) 285e7c874afSJosef 'Jeff' Sipek pagecnt.pc_find_hit++; 286e7c874afSJosef 'Jeff' Sipek else 287e7c874afSJosef 'Jeff' Sipek pagecnt.pc_find_miss++; 288e7c874afSJosef 'Jeff' Sipek 289e7c874afSJosef 'Jeff' Sipek pagecnt.pc_find_hashlen[MIN(mylen, PC_HASH_CNT)]++; 290e7c874afSJosef 'Jeff' Sipek #endif 291e7c874afSJosef 'Jeff' Sipek 292e7c874afSJosef 'Jeff' Sipek return (page); 2937c478bd9Sstevel@tonic-gate } 2947c478bd9Sstevel@tonic-gate 2957c478bd9Sstevel@tonic-gate 2967c478bd9Sstevel@tonic-gate #ifdef DEBUG 2977c478bd9Sstevel@tonic-gate #define MEMSEG_SEARCH_STATS 2987c478bd9Sstevel@tonic-gate #endif 2997c478bd9Sstevel@tonic-gate 3007c478bd9Sstevel@tonic-gate #ifdef MEMSEG_SEARCH_STATS 3017c478bd9Sstevel@tonic-gate struct memseg_stats { 3027c478bd9Sstevel@tonic-gate uint_t nsearch; 3037c478bd9Sstevel@tonic-gate uint_t nlastwon; 3047c478bd9Sstevel@tonic-gate uint_t nhashwon; 3057c478bd9Sstevel@tonic-gate uint_t nnotfound; 3067c478bd9Sstevel@tonic-gate } memseg_stats; 3077c478bd9Sstevel@tonic-gate 3087c478bd9Sstevel@tonic-gate #define MEMSEG_STAT_INCR(v) \ 3091a5e258fSJosef 'Jeff' Sipek atomic_inc_32(&memseg_stats.v) 3107c478bd9Sstevel@tonic-gate #else 3117c478bd9Sstevel@tonic-gate #define MEMSEG_STAT_INCR(x) 3127c478bd9Sstevel@tonic-gate #endif 3137c478bd9Sstevel@tonic-gate 3147c478bd9Sstevel@tonic-gate struct memseg *memsegs; /* list of memory segments */ 3157c478bd9Sstevel@tonic-gate 3162be2af34Smec /* 3172be2af34Smec * /etc/system tunable to control large page allocation hueristic. 3182be2af34Smec * 3192be2af34Smec * Setting to LPAP_LOCAL will heavily prefer the local lgroup over remote lgroup 3202be2af34Smec * for large page allocation requests. If a large page is not readily 3212be2af34Smec * avaliable on the local freelists we will go through additional effort 3222be2af34Smec * to create a large page, potentially moving smaller pages around to coalesce 3232be2af34Smec * larger pages in the local lgroup. 3242be2af34Smec * Default value of LPAP_DEFAULT will go to remote freelists if large pages 3252be2af34Smec * are not readily available in the local lgroup. 3262be2af34Smec */ 3272be2af34Smec enum lpap { 3282be2af34Smec LPAP_DEFAULT, /* default large page allocation policy */ 3292be2af34Smec LPAP_LOCAL /* local large page allocation policy */ 3302be2af34Smec }; 3312be2af34Smec 3322be2af34Smec enum lpap lpg_alloc_prefer = LPAP_DEFAULT; 3337c478bd9Sstevel@tonic-gate 3347c478bd9Sstevel@tonic-gate static void page_init_mem_config(void); 3357c478bd9Sstevel@tonic-gate static int page_do_hashin(page_t *, vnode_t *, u_offset_t); 3367c478bd9Sstevel@tonic-gate static void page_do_hashout(page_t *); 3378b464eb8Smec static void page_capture_init(); 3388b464eb8Smec int page_capture_take_action(page_t *, uint_t, void *); 3397c478bd9Sstevel@tonic-gate 3407c478bd9Sstevel@tonic-gate static void page_demote_vp_pages(page_t *); 3417c478bd9Sstevel@tonic-gate 34206fb6a36Sdv142724 34306fb6a36Sdv142724 void 34406fb6a36Sdv142724 pcf_init(void) 34506fb6a36Sdv142724 34606fb6a36Sdv142724 { 34706fb6a36Sdv142724 if (boot_ncpus != -1) { 34806fb6a36Sdv142724 pcf_fanout = boot_ncpus; 34906fb6a36Sdv142724 } else { 35006fb6a36Sdv142724 pcf_fanout = max_ncpus; 35106fb6a36Sdv142724 } 35206fb6a36Sdv142724 #ifdef sun4v 35306fb6a36Sdv142724 /* 35406fb6a36Sdv142724 * Force at least 4 buckets if possible for sun4v. 35506fb6a36Sdv142724 */ 35606fb6a36Sdv142724 pcf_fanout = MAX(pcf_fanout, 4); 35706fb6a36Sdv142724 #endif /* sun4v */ 35806fb6a36Sdv142724 35906fb6a36Sdv142724 /* 36006fb6a36Sdv142724 * Round up to the nearest power of 2. 36106fb6a36Sdv142724 */ 36206fb6a36Sdv142724 pcf_fanout = MIN(pcf_fanout, MAX_PCF_FANOUT); 36306fb6a36Sdv142724 if (!ISP2(pcf_fanout)) { 36406fb6a36Sdv142724 pcf_fanout = 1 << highbit(pcf_fanout); 36506fb6a36Sdv142724 36606fb6a36Sdv142724 if (pcf_fanout > MAX_PCF_FANOUT) { 36706fb6a36Sdv142724 pcf_fanout = 1 << (highbit(MAX_PCF_FANOUT) - 1); 36806fb6a36Sdv142724 } 36906fb6a36Sdv142724 } 37006fb6a36Sdv142724 pcf_fanout_mask = pcf_fanout - 1; 37106fb6a36Sdv142724 } 37206fb6a36Sdv142724 3737c478bd9Sstevel@tonic-gate /* 3747c478bd9Sstevel@tonic-gate * vm subsystem related initialization 3757c478bd9Sstevel@tonic-gate */ 3767c478bd9Sstevel@tonic-gate void 3777c478bd9Sstevel@tonic-gate vm_init(void) 3787c478bd9Sstevel@tonic-gate { 3797c478bd9Sstevel@tonic-gate boolean_t callb_vm_cpr(void *, int); 3807c478bd9Sstevel@tonic-gate 3817c478bd9Sstevel@tonic-gate (void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm"); 3827c478bd9Sstevel@tonic-gate page_init_mem_config(); 383db874c57Selowe page_retire_init(); 3840209230bSgjelinek vm_usage_init(); 3858b464eb8Smec page_capture_init(); 3867c478bd9Sstevel@tonic-gate } 3877c478bd9Sstevel@tonic-gate 3887c478bd9Sstevel@tonic-gate /* 3897c478bd9Sstevel@tonic-gate * This function is called at startup and when memory is added or deleted. 3907c478bd9Sstevel@tonic-gate */ 3917c478bd9Sstevel@tonic-gate void 3927c478bd9Sstevel@tonic-gate init_pages_pp_maximum() 3937c478bd9Sstevel@tonic-gate { 3947c478bd9Sstevel@tonic-gate static pgcnt_t p_min; 3957c478bd9Sstevel@tonic-gate static pgcnt_t pages_pp_maximum_startup; 3967c478bd9Sstevel@tonic-gate static pgcnt_t avrmem_delta; 3977c478bd9Sstevel@tonic-gate static int init_done; 3987c478bd9Sstevel@tonic-gate static int user_set; /* true if set in /etc/system */ 3997c478bd9Sstevel@tonic-gate 4007c478bd9Sstevel@tonic-gate if (init_done == 0) { 4017c478bd9Sstevel@tonic-gate 4027c478bd9Sstevel@tonic-gate /* If the user specified a value, save it */ 4037c478bd9Sstevel@tonic-gate if (pages_pp_maximum != 0) { 4047c478bd9Sstevel@tonic-gate user_set = 1; 4057c478bd9Sstevel@tonic-gate pages_pp_maximum_startup = pages_pp_maximum; 4067c478bd9Sstevel@tonic-gate } 4077c478bd9Sstevel@tonic-gate 4087c478bd9Sstevel@tonic-gate /* 4097c478bd9Sstevel@tonic-gate * Setting of pages_pp_maximum is based first time 4107c478bd9Sstevel@tonic-gate * on the value of availrmem just after the start-up 4117c478bd9Sstevel@tonic-gate * allocations. To preserve this relationship at run 4127c478bd9Sstevel@tonic-gate * time, use a delta from availrmem_initial. 4137c478bd9Sstevel@tonic-gate */ 4147c478bd9Sstevel@tonic-gate ASSERT(availrmem_initial >= availrmem); 4157c478bd9Sstevel@tonic-gate avrmem_delta = availrmem_initial - availrmem; 4167c478bd9Sstevel@tonic-gate 4177c478bd9Sstevel@tonic-gate /* The allowable floor of pages_pp_maximum */ 4187c478bd9Sstevel@tonic-gate p_min = tune.t_minarmem + 100; 4197c478bd9Sstevel@tonic-gate 4207c478bd9Sstevel@tonic-gate /* Make sure we don't come through here again. */ 4217c478bd9Sstevel@tonic-gate init_done = 1; 4227c478bd9Sstevel@tonic-gate } 4237c478bd9Sstevel@tonic-gate /* 4247c478bd9Sstevel@tonic-gate * Determine pages_pp_maximum, the number of currently available 4257c478bd9Sstevel@tonic-gate * pages (availrmem) that can't be `locked'. If not set by 4267c478bd9Sstevel@tonic-gate * the user, we set it to 4% of the currently available memory 4277c478bd9Sstevel@tonic-gate * plus 4MB. 4287c478bd9Sstevel@tonic-gate * But we also insist that it be greater than tune.t_minarmem; 4297c478bd9Sstevel@tonic-gate * otherwise a process could lock down a lot of memory, get swapped 4307c478bd9Sstevel@tonic-gate * out, and never have enough to get swapped back in. 4317c478bd9Sstevel@tonic-gate */ 4327c478bd9Sstevel@tonic-gate if (user_set) 4337c478bd9Sstevel@tonic-gate pages_pp_maximum = pages_pp_maximum_startup; 4347c478bd9Sstevel@tonic-gate else 4357c478bd9Sstevel@tonic-gate pages_pp_maximum = ((availrmem_initial - avrmem_delta) / 25) 4367c478bd9Sstevel@tonic-gate + btop(4 * 1024 * 1024); 4377c478bd9Sstevel@tonic-gate 4387c478bd9Sstevel@tonic-gate if (pages_pp_maximum <= p_min) { 4397c478bd9Sstevel@tonic-gate pages_pp_maximum = p_min; 4407c478bd9Sstevel@tonic-gate } 4417c478bd9Sstevel@tonic-gate } 4427c478bd9Sstevel@tonic-gate 4437c478bd9Sstevel@tonic-gate void 4447c478bd9Sstevel@tonic-gate set_max_page_get(pgcnt_t target_total_pages) 4457c478bd9Sstevel@tonic-gate { 4467c478bd9Sstevel@tonic-gate max_page_get = target_total_pages / 2; 4477c478bd9Sstevel@tonic-gate } 4487c478bd9Sstevel@tonic-gate 4497c478bd9Sstevel@tonic-gate static pgcnt_t pending_delete; 4507c478bd9Sstevel@tonic-gate 4517c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 4527c478bd9Sstevel@tonic-gate static void 4537c478bd9Sstevel@tonic-gate page_mem_config_post_add( 4547c478bd9Sstevel@tonic-gate void *arg, 4557c478bd9Sstevel@tonic-gate pgcnt_t delta_pages) 4567c478bd9Sstevel@tonic-gate { 4577c478bd9Sstevel@tonic-gate set_max_page_get(total_pages - pending_delete); 4587c478bd9Sstevel@tonic-gate init_pages_pp_maximum(); 4597c478bd9Sstevel@tonic-gate } 4607c478bd9Sstevel@tonic-gate 4617c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 4627c478bd9Sstevel@tonic-gate static int 4637c478bd9Sstevel@tonic-gate page_mem_config_pre_del( 4647c478bd9Sstevel@tonic-gate void *arg, 4657c478bd9Sstevel@tonic-gate pgcnt_t delta_pages) 4667c478bd9Sstevel@tonic-gate { 4677c478bd9Sstevel@tonic-gate pgcnt_t nv; 4687c478bd9Sstevel@tonic-gate 4697c478bd9Sstevel@tonic-gate nv = atomic_add_long_nv(&pending_delete, (spgcnt_t)delta_pages); 4707c478bd9Sstevel@tonic-gate set_max_page_get(total_pages - nv); 4717c478bd9Sstevel@tonic-gate return (0); 4727c478bd9Sstevel@tonic-gate } 4737c478bd9Sstevel@tonic-gate 4747c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 4757c478bd9Sstevel@tonic-gate static void 4767c478bd9Sstevel@tonic-gate page_mem_config_post_del( 4777c478bd9Sstevel@tonic-gate void *arg, 4787c478bd9Sstevel@tonic-gate pgcnt_t delta_pages, 4797c478bd9Sstevel@tonic-gate int cancelled) 4807c478bd9Sstevel@tonic-gate { 4817c478bd9Sstevel@tonic-gate pgcnt_t nv; 4827c478bd9Sstevel@tonic-gate 4837c478bd9Sstevel@tonic-gate nv = atomic_add_long_nv(&pending_delete, -(spgcnt_t)delta_pages); 4847c478bd9Sstevel@tonic-gate set_max_page_get(total_pages - nv); 4857c478bd9Sstevel@tonic-gate if (!cancelled) 4867c478bd9Sstevel@tonic-gate init_pages_pp_maximum(); 4877c478bd9Sstevel@tonic-gate } 4887c478bd9Sstevel@tonic-gate 4897c478bd9Sstevel@tonic-gate static kphysm_setup_vector_t page_mem_config_vec = { 4907c478bd9Sstevel@tonic-gate KPHYSM_SETUP_VECTOR_VERSION, 4917c478bd9Sstevel@tonic-gate page_mem_config_post_add, 4927c478bd9Sstevel@tonic-gate page_mem_config_pre_del, 4937c478bd9Sstevel@tonic-gate page_mem_config_post_del, 4947c478bd9Sstevel@tonic-gate }; 4957c478bd9Sstevel@tonic-gate 4967c478bd9Sstevel@tonic-gate static void 4977c478bd9Sstevel@tonic-gate page_init_mem_config(void) 4987c478bd9Sstevel@tonic-gate { 499d94ffb28Sjmcp int ret; 5007c478bd9Sstevel@tonic-gate 501d94ffb28Sjmcp ret = kphysm_setup_func_register(&page_mem_config_vec, (void *)NULL); 502d94ffb28Sjmcp ASSERT(ret == 0); 5037c478bd9Sstevel@tonic-gate } 5047c478bd9Sstevel@tonic-gate 5057c478bd9Sstevel@tonic-gate /* 5067c478bd9Sstevel@tonic-gate * Evenly spread out the PCF counters for large free pages 5077c478bd9Sstevel@tonic-gate */ 5087c478bd9Sstevel@tonic-gate static void 5097c478bd9Sstevel@tonic-gate page_free_large_ctr(pgcnt_t npages) 5107c478bd9Sstevel@tonic-gate { 5117c478bd9Sstevel@tonic-gate static struct pcf *p = pcf; 5127c478bd9Sstevel@tonic-gate pgcnt_t lump; 5137c478bd9Sstevel@tonic-gate 5147c478bd9Sstevel@tonic-gate freemem += npages; 5157c478bd9Sstevel@tonic-gate 51606fb6a36Sdv142724 lump = roundup(npages, pcf_fanout) / pcf_fanout; 5177c478bd9Sstevel@tonic-gate 5187c478bd9Sstevel@tonic-gate while (npages > 0) { 5197c478bd9Sstevel@tonic-gate 5207c478bd9Sstevel@tonic-gate ASSERT(!p->pcf_block); 5217c478bd9Sstevel@tonic-gate 5227c478bd9Sstevel@tonic-gate if (lump < npages) { 5237c478bd9Sstevel@tonic-gate p->pcf_count += (uint_t)lump; 5247c478bd9Sstevel@tonic-gate npages -= lump; 5257c478bd9Sstevel@tonic-gate } else { 5267c478bd9Sstevel@tonic-gate p->pcf_count += (uint_t)npages; 5277c478bd9Sstevel@tonic-gate npages = 0; 5287c478bd9Sstevel@tonic-gate } 5297c478bd9Sstevel@tonic-gate 5307c478bd9Sstevel@tonic-gate ASSERT(!p->pcf_wait); 5317c478bd9Sstevel@tonic-gate 53206fb6a36Sdv142724 if (++p > &pcf[pcf_fanout - 1]) 5337c478bd9Sstevel@tonic-gate p = pcf; 5347c478bd9Sstevel@tonic-gate } 5357c478bd9Sstevel@tonic-gate 5367c478bd9Sstevel@tonic-gate ASSERT(npages == 0); 5377c478bd9Sstevel@tonic-gate } 5387c478bd9Sstevel@tonic-gate 5397c478bd9Sstevel@tonic-gate /* 540da6c28aaSamw * Add a physical chunk of memory to the system free lists during startup. 5417c478bd9Sstevel@tonic-gate * Platform specific startup() allocates the memory for the page structs. 5427c478bd9Sstevel@tonic-gate * 5437c478bd9Sstevel@tonic-gate * num - number of page structures 5447c478bd9Sstevel@tonic-gate * base - page number (pfn) to be associated with the first page. 5457c478bd9Sstevel@tonic-gate * 5467c478bd9Sstevel@tonic-gate * Since we are doing this during startup (ie. single threaded), we will 5477c478bd9Sstevel@tonic-gate * use shortcut routines to avoid any locking overhead while putting all 5487c478bd9Sstevel@tonic-gate * these pages on the freelists. 5497c478bd9Sstevel@tonic-gate * 5507c478bd9Sstevel@tonic-gate * NOTE: Any changes performed to page_free(), must also be performed to 5517c478bd9Sstevel@tonic-gate * add_physmem() since this is how we initialize all page_t's at 5527c478bd9Sstevel@tonic-gate * boot time. 5537c478bd9Sstevel@tonic-gate */ 5547c478bd9Sstevel@tonic-gate void 5557c478bd9Sstevel@tonic-gate add_physmem( 5567c478bd9Sstevel@tonic-gate page_t *pp, 5577c478bd9Sstevel@tonic-gate pgcnt_t num, 5587c478bd9Sstevel@tonic-gate pfn_t pnum) 5597c478bd9Sstevel@tonic-gate { 5607c478bd9Sstevel@tonic-gate page_t *root = NULL; 5617c478bd9Sstevel@tonic-gate uint_t szc = page_num_pagesizes() - 1; 5627c478bd9Sstevel@tonic-gate pgcnt_t large = page_get_pagecnt(szc); 5637c478bd9Sstevel@tonic-gate pgcnt_t cnt = 0; 5647c478bd9Sstevel@tonic-gate 5657c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_INIT, 5667c478bd9Sstevel@tonic-gate "add_physmem:pp %p num %lu", pp, num); 5677c478bd9Sstevel@tonic-gate 5687c478bd9Sstevel@tonic-gate /* 5697c478bd9Sstevel@tonic-gate * Arbitrarily limit the max page_get request 5707c478bd9Sstevel@tonic-gate * to 1/2 of the page structs we have. 5717c478bd9Sstevel@tonic-gate */ 5727c478bd9Sstevel@tonic-gate total_pages += num; 5737c478bd9Sstevel@tonic-gate set_max_page_get(total_pages); 5747c478bd9Sstevel@tonic-gate 575e21bae1bSkchow PLCNT_MODIFY_MAX(pnum, (long)num); 576e21bae1bSkchow 5777c478bd9Sstevel@tonic-gate /* 5787c478bd9Sstevel@tonic-gate * The physical space for the pages array 5797c478bd9Sstevel@tonic-gate * representing ram pages has already been 5807c478bd9Sstevel@tonic-gate * allocated. Here we initialize each lock 5817c478bd9Sstevel@tonic-gate * in the page structure, and put each on 5827c478bd9Sstevel@tonic-gate * the free list 5837c478bd9Sstevel@tonic-gate */ 584affbd3ccSkchow for (; num; pp++, pnum++, num--) { 5857c478bd9Sstevel@tonic-gate 5867c478bd9Sstevel@tonic-gate /* 5877c478bd9Sstevel@tonic-gate * this needs to fill in the page number 5887c478bd9Sstevel@tonic-gate * and do any other arch specific initialization 5897c478bd9Sstevel@tonic-gate */ 5907c478bd9Sstevel@tonic-gate add_physmem_cb(pp, pnum); 5917c478bd9Sstevel@tonic-gate 59207b65a64Saguzovsk pp->p_lckcnt = 0; 59307b65a64Saguzovsk pp->p_cowcnt = 0; 59407b65a64Saguzovsk pp->p_slckcnt = 0; 59507b65a64Saguzovsk 5967c478bd9Sstevel@tonic-gate /* 5977c478bd9Sstevel@tonic-gate * Initialize the page lock as unlocked, since nobody 5987c478bd9Sstevel@tonic-gate * can see or access this page yet. 5997c478bd9Sstevel@tonic-gate */ 6007c478bd9Sstevel@tonic-gate pp->p_selock = 0; 6017c478bd9Sstevel@tonic-gate 6027c478bd9Sstevel@tonic-gate /* 6037c478bd9Sstevel@tonic-gate * Initialize IO lock 6047c478bd9Sstevel@tonic-gate */ 6057c478bd9Sstevel@tonic-gate page_iolock_init(pp); 6067c478bd9Sstevel@tonic-gate 6077c478bd9Sstevel@tonic-gate /* 6087c478bd9Sstevel@tonic-gate * initialize other fields in the page_t 6097c478bd9Sstevel@tonic-gate */ 6107c478bd9Sstevel@tonic-gate PP_SETFREE(pp); 6119d0d62adSJason Beloro page_clr_all_props(pp); 6127c478bd9Sstevel@tonic-gate PP_SETAGED(pp); 6137c478bd9Sstevel@tonic-gate pp->p_offset = (u_offset_t)-1; 6147c478bd9Sstevel@tonic-gate pp->p_next = pp; 6157c478bd9Sstevel@tonic-gate pp->p_prev = pp; 6167c478bd9Sstevel@tonic-gate 6177c478bd9Sstevel@tonic-gate /* 6187c478bd9Sstevel@tonic-gate * Simple case: System doesn't support large pages. 6197c478bd9Sstevel@tonic-gate */ 6207c478bd9Sstevel@tonic-gate if (szc == 0) { 6217c478bd9Sstevel@tonic-gate pp->p_szc = 0; 6227c478bd9Sstevel@tonic-gate page_free_at_startup(pp); 6237c478bd9Sstevel@tonic-gate continue; 6247c478bd9Sstevel@tonic-gate } 6257c478bd9Sstevel@tonic-gate 6267c478bd9Sstevel@tonic-gate /* 6277c478bd9Sstevel@tonic-gate * Handle unaligned pages, we collect them up onto 6287c478bd9Sstevel@tonic-gate * the root page until we have a full large page. 6297c478bd9Sstevel@tonic-gate */ 6307c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(pnum, large)) { 6317c478bd9Sstevel@tonic-gate 6327c478bd9Sstevel@tonic-gate /* 6337c478bd9Sstevel@tonic-gate * If not in a large page, 6347c478bd9Sstevel@tonic-gate * just free as small page. 6357c478bd9Sstevel@tonic-gate */ 6367c478bd9Sstevel@tonic-gate if (root == NULL) { 6377c478bd9Sstevel@tonic-gate pp->p_szc = 0; 6387c478bd9Sstevel@tonic-gate page_free_at_startup(pp); 6397c478bd9Sstevel@tonic-gate continue; 6407c478bd9Sstevel@tonic-gate } 6417c478bd9Sstevel@tonic-gate 6427c478bd9Sstevel@tonic-gate /* 6437c478bd9Sstevel@tonic-gate * Link a constituent page into the large page. 6447c478bd9Sstevel@tonic-gate */ 6457c478bd9Sstevel@tonic-gate pp->p_szc = szc; 6467c478bd9Sstevel@tonic-gate page_list_concat(&root, &pp); 6477c478bd9Sstevel@tonic-gate 6487c478bd9Sstevel@tonic-gate /* 6497c478bd9Sstevel@tonic-gate * When large page is fully formed, free it. 6507c478bd9Sstevel@tonic-gate */ 6517c478bd9Sstevel@tonic-gate if (++cnt == large) { 6527c478bd9Sstevel@tonic-gate page_free_large_ctr(cnt); 6537c478bd9Sstevel@tonic-gate page_list_add_pages(root, PG_LIST_ISINIT); 6547c478bd9Sstevel@tonic-gate root = NULL; 6557c478bd9Sstevel@tonic-gate cnt = 0; 6567c478bd9Sstevel@tonic-gate } 6577c478bd9Sstevel@tonic-gate continue; 6587c478bd9Sstevel@tonic-gate } 6597c478bd9Sstevel@tonic-gate 6607c478bd9Sstevel@tonic-gate /* 6617c478bd9Sstevel@tonic-gate * At this point we have a page number which 6627c478bd9Sstevel@tonic-gate * is aligned. We assert that we aren't already 6637c478bd9Sstevel@tonic-gate * in a different large page. 6647c478bd9Sstevel@tonic-gate */ 6657c478bd9Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(pnum, large)); 6667c478bd9Sstevel@tonic-gate ASSERT(root == NULL && cnt == 0); 6677c478bd9Sstevel@tonic-gate 6687c478bd9Sstevel@tonic-gate /* 6697c478bd9Sstevel@tonic-gate * If insufficient number of pages left to form 6707c478bd9Sstevel@tonic-gate * a large page, just free the small page. 6717c478bd9Sstevel@tonic-gate */ 6727c478bd9Sstevel@tonic-gate if (num < large) { 6737c478bd9Sstevel@tonic-gate pp->p_szc = 0; 6747c478bd9Sstevel@tonic-gate page_free_at_startup(pp); 6757c478bd9Sstevel@tonic-gate continue; 6767c478bd9Sstevel@tonic-gate } 6777c478bd9Sstevel@tonic-gate 6787c478bd9Sstevel@tonic-gate /* 6797c478bd9Sstevel@tonic-gate * Otherwise start a new large page. 6807c478bd9Sstevel@tonic-gate */ 6817c478bd9Sstevel@tonic-gate pp->p_szc = szc; 6827c478bd9Sstevel@tonic-gate cnt++; 6837c478bd9Sstevel@tonic-gate root = pp; 6847c478bd9Sstevel@tonic-gate } 6857c478bd9Sstevel@tonic-gate ASSERT(root == NULL && cnt == 0); 6867c478bd9Sstevel@tonic-gate } 6877c478bd9Sstevel@tonic-gate 6887c478bd9Sstevel@tonic-gate /* 6897c478bd9Sstevel@tonic-gate * Find a page representing the specified [vp, offset]. 6907c478bd9Sstevel@tonic-gate * If we find the page but it is intransit coming in, 6917c478bd9Sstevel@tonic-gate * it will have an "exclusive" lock and we wait for 6927c478bd9Sstevel@tonic-gate * the i/o to complete. A page found on the free list 6937c478bd9Sstevel@tonic-gate * is always reclaimed and then locked. On success, the page 6947c478bd9Sstevel@tonic-gate * is locked, its data is valid and it isn't on the free 6957c478bd9Sstevel@tonic-gate * list, while a NULL is returned if the page doesn't exist. 6967c478bd9Sstevel@tonic-gate */ 6977c478bd9Sstevel@tonic-gate page_t * 6987c478bd9Sstevel@tonic-gate page_lookup(vnode_t *vp, u_offset_t off, se_t se) 6997c478bd9Sstevel@tonic-gate { 7007c478bd9Sstevel@tonic-gate return (page_lookup_create(vp, off, se, NULL, NULL, 0)); 7017c478bd9Sstevel@tonic-gate } 7027c478bd9Sstevel@tonic-gate 7037c478bd9Sstevel@tonic-gate /* 7047c478bd9Sstevel@tonic-gate * Find a page representing the specified [vp, offset]. 7057c478bd9Sstevel@tonic-gate * We either return the one we found or, if passed in, 7067c478bd9Sstevel@tonic-gate * create one with identity of [vp, offset] of the 707da6c28aaSamw * pre-allocated page. If we find existing page but it is 7087c478bd9Sstevel@tonic-gate * intransit coming in, it will have an "exclusive" lock 7097c478bd9Sstevel@tonic-gate * and we wait for the i/o to complete. A page found on 7107c478bd9Sstevel@tonic-gate * the free list is always reclaimed and then locked. 7117c478bd9Sstevel@tonic-gate * On success, the page is locked, its data is valid and 7127c478bd9Sstevel@tonic-gate * it isn't on the free list, while a NULL is returned 7137c478bd9Sstevel@tonic-gate * if the page doesn't exist and newpp is NULL; 7147c478bd9Sstevel@tonic-gate */ 7157c478bd9Sstevel@tonic-gate page_t * 7167c478bd9Sstevel@tonic-gate page_lookup_create( 7177c478bd9Sstevel@tonic-gate vnode_t *vp, 7187c478bd9Sstevel@tonic-gate u_offset_t off, 7197c478bd9Sstevel@tonic-gate se_t se, 7207c478bd9Sstevel@tonic-gate page_t *newpp, 7217c478bd9Sstevel@tonic-gate spgcnt_t *nrelocp, 7227c478bd9Sstevel@tonic-gate int flags) 7237c478bd9Sstevel@tonic-gate { 7247c478bd9Sstevel@tonic-gate page_t *pp; 7257c478bd9Sstevel@tonic-gate kmutex_t *phm; 7267c478bd9Sstevel@tonic-gate ulong_t index; 7277c478bd9Sstevel@tonic-gate uint_t hash_locked; 7287c478bd9Sstevel@tonic-gate uint_t es; 7297c478bd9Sstevel@tonic-gate 7307c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 7317c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[0]); 7327c478bd9Sstevel@tonic-gate ASSERT(newpp ? PAGE_EXCL(newpp) : 1); 7337c478bd9Sstevel@tonic-gate 7347c478bd9Sstevel@tonic-gate /* 7357c478bd9Sstevel@tonic-gate * Acquire the appropriate page hash lock since 7367c478bd9Sstevel@tonic-gate * we have to search the hash list. Pages that 7377c478bd9Sstevel@tonic-gate * hash to this list can't change identity while 7387c478bd9Sstevel@tonic-gate * this lock is held. 7397c478bd9Sstevel@tonic-gate */ 7407c478bd9Sstevel@tonic-gate hash_locked = 0; 7417c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off); 7427c478bd9Sstevel@tonic-gate phm = NULL; 7437c478bd9Sstevel@tonic-gate top: 744e7c874afSJosef 'Jeff' Sipek pp = page_hash_search(index, vp, off); 7457c478bd9Sstevel@tonic-gate if (pp != NULL) { 7467c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[1]); 7477c478bd9Sstevel@tonic-gate es = (newpp != NULL) ? 1 : 0; 7487c478bd9Sstevel@tonic-gate es |= flags; 7497c478bd9Sstevel@tonic-gate if (!hash_locked) { 7507c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[2]); 7517c478bd9Sstevel@tonic-gate if (!page_try_reclaim_lock(pp, se, es)) { 7527c478bd9Sstevel@tonic-gate /* 7537c478bd9Sstevel@tonic-gate * On a miss, acquire the phm. Then 7547c478bd9Sstevel@tonic-gate * next time, page_lock() will be called, 7557c478bd9Sstevel@tonic-gate * causing a wait if the page is busy. 7567c478bd9Sstevel@tonic-gate * just looping with page_trylock() would 7577c478bd9Sstevel@tonic-gate * get pretty boring. 7587c478bd9Sstevel@tonic-gate */ 7597c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[3]); 7607c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 7617c478bd9Sstevel@tonic-gate mutex_enter(phm); 7627c478bd9Sstevel@tonic-gate hash_locked = 1; 7637c478bd9Sstevel@tonic-gate goto top; 7647c478bd9Sstevel@tonic-gate } 7657c478bd9Sstevel@tonic-gate } else { 7667c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[4]); 7677c478bd9Sstevel@tonic-gate if (!page_lock_es(pp, se, phm, P_RECLAIM, es)) { 7687c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[5]); 7697c478bd9Sstevel@tonic-gate goto top; 7707c478bd9Sstevel@tonic-gate } 7717c478bd9Sstevel@tonic-gate } 7727c478bd9Sstevel@tonic-gate 7737c478bd9Sstevel@tonic-gate /* 7747c478bd9Sstevel@tonic-gate * Since `pp' is locked it can not change identity now. 7757c478bd9Sstevel@tonic-gate * Reconfirm we locked the correct page. 7767c478bd9Sstevel@tonic-gate * 7777c478bd9Sstevel@tonic-gate * Both the p_vnode and p_offset *must* be cast volatile 778e7c874afSJosef 'Jeff' Sipek * to force a reload of their values: The page_hash_search 779e7c874afSJosef 'Jeff' Sipek * function will have stuffed p_vnode and p_offset into 7807c478bd9Sstevel@tonic-gate * registers before calling page_trylock(); another thread, 7817c478bd9Sstevel@tonic-gate * actually holding the hash lock, could have changed the 7827c478bd9Sstevel@tonic-gate * page's identity in memory, but our registers would not 7837c478bd9Sstevel@tonic-gate * be changed, fooling the reconfirmation. If the hash 7847c478bd9Sstevel@tonic-gate * lock was held during the search, the casting would 7857c478bd9Sstevel@tonic-gate * not be needed. 7867c478bd9Sstevel@tonic-gate */ 7877c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[6]); 7887c478bd9Sstevel@tonic-gate if (((volatile struct vnode *)(pp->p_vnode) != vp) || 7897c478bd9Sstevel@tonic-gate ((volatile u_offset_t)(pp->p_offset) != off)) { 7907c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[7]); 7917c478bd9Sstevel@tonic-gate if (hash_locked) { 7927c478bd9Sstevel@tonic-gate panic("page_lookup_create: lost page %p", 7937c478bd9Sstevel@tonic-gate (void *)pp); 7947c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 7957c478bd9Sstevel@tonic-gate } 7967c478bd9Sstevel@tonic-gate page_unlock(pp); 7977c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 7987c478bd9Sstevel@tonic-gate mutex_enter(phm); 7997c478bd9Sstevel@tonic-gate hash_locked = 1; 8007c478bd9Sstevel@tonic-gate goto top; 8017c478bd9Sstevel@tonic-gate } 8027c478bd9Sstevel@tonic-gate 8037c478bd9Sstevel@tonic-gate /* 8047c478bd9Sstevel@tonic-gate * If page_trylock() was called, then pp may still be on 8057c478bd9Sstevel@tonic-gate * the cachelist (can't be on the free list, it would not 8067c478bd9Sstevel@tonic-gate * have been found in the search). If it is on the 8077c478bd9Sstevel@tonic-gate * cachelist it must be pulled now. To pull the page from 8087c478bd9Sstevel@tonic-gate * the cachelist, it must be exclusively locked. 8097c478bd9Sstevel@tonic-gate * 8107c478bd9Sstevel@tonic-gate * The other big difference between page_trylock() and 8117c478bd9Sstevel@tonic-gate * page_lock(), is that page_lock() will pull the 8127c478bd9Sstevel@tonic-gate * page from whatever free list (the cache list in this 8137c478bd9Sstevel@tonic-gate * case) the page is on. If page_trylock() was used 8147c478bd9Sstevel@tonic-gate * above, then we have to do the reclaim ourselves. 8157c478bd9Sstevel@tonic-gate */ 8167c478bd9Sstevel@tonic-gate if ((!hash_locked) && (PP_ISFREE(pp))) { 8177c478bd9Sstevel@tonic-gate ASSERT(PP_ISAGED(pp) == 0); 8187c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[8]); 8197c478bd9Sstevel@tonic-gate 8207c478bd9Sstevel@tonic-gate /* 8217c478bd9Sstevel@tonic-gate * page_relcaim will insure that we 8227c478bd9Sstevel@tonic-gate * have this page exclusively 8237c478bd9Sstevel@tonic-gate */ 8247c478bd9Sstevel@tonic-gate 8257c478bd9Sstevel@tonic-gate if (!page_reclaim(pp, NULL)) { 8267c478bd9Sstevel@tonic-gate /* 8277c478bd9Sstevel@tonic-gate * Page_reclaim dropped whatever lock 8287c478bd9Sstevel@tonic-gate * we held. 8297c478bd9Sstevel@tonic-gate */ 8307c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[9]); 8317c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 8327c478bd9Sstevel@tonic-gate mutex_enter(phm); 8337c478bd9Sstevel@tonic-gate hash_locked = 1; 8347c478bd9Sstevel@tonic-gate goto top; 8357c478bd9Sstevel@tonic-gate } else if (se == SE_SHARED && newpp == NULL) { 8367c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[10]); 8377c478bd9Sstevel@tonic-gate page_downgrade(pp); 8387c478bd9Sstevel@tonic-gate } 8397c478bd9Sstevel@tonic-gate } 8407c478bd9Sstevel@tonic-gate 8417c478bd9Sstevel@tonic-gate if (hash_locked) { 8427c478bd9Sstevel@tonic-gate mutex_exit(phm); 8437c478bd9Sstevel@tonic-gate } 8447c478bd9Sstevel@tonic-gate 8457c478bd9Sstevel@tonic-gate if (newpp != NULL && pp->p_szc < newpp->p_szc && 8467c478bd9Sstevel@tonic-gate PAGE_EXCL(pp) && nrelocp != NULL) { 8477c478bd9Sstevel@tonic-gate ASSERT(nrelocp != NULL); 8487c478bd9Sstevel@tonic-gate (void) page_relocate(&pp, &newpp, 1, 1, nrelocp, 8497c478bd9Sstevel@tonic-gate NULL); 8507c478bd9Sstevel@tonic-gate if (*nrelocp > 0) { 8517c478bd9Sstevel@tonic-gate VM_STAT_COND_ADD(*nrelocp == 1, 8527c478bd9Sstevel@tonic-gate page_lookup_cnt[11]); 8537c478bd9Sstevel@tonic-gate VM_STAT_COND_ADD(*nrelocp > 1, 8547c478bd9Sstevel@tonic-gate page_lookup_cnt[12]); 8557c478bd9Sstevel@tonic-gate pp = newpp; 8567c478bd9Sstevel@tonic-gate se = SE_EXCL; 8577c478bd9Sstevel@tonic-gate } else { 8587c478bd9Sstevel@tonic-gate if (se == SE_SHARED) { 8597c478bd9Sstevel@tonic-gate page_downgrade(pp); 8607c478bd9Sstevel@tonic-gate } 8617c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[13]); 8627c478bd9Sstevel@tonic-gate } 8637c478bd9Sstevel@tonic-gate } else if (newpp != NULL && nrelocp != NULL) { 8647c478bd9Sstevel@tonic-gate if (PAGE_EXCL(pp) && se == SE_SHARED) { 8657c478bd9Sstevel@tonic-gate page_downgrade(pp); 8667c478bd9Sstevel@tonic-gate } 8677c478bd9Sstevel@tonic-gate VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc, 8687c478bd9Sstevel@tonic-gate page_lookup_cnt[14]); 8697c478bd9Sstevel@tonic-gate VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc, 8707c478bd9Sstevel@tonic-gate page_lookup_cnt[15]); 8717c478bd9Sstevel@tonic-gate VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc, 8727c478bd9Sstevel@tonic-gate page_lookup_cnt[16]); 8737c478bd9Sstevel@tonic-gate } else if (newpp != NULL && PAGE_EXCL(pp)) { 8747c478bd9Sstevel@tonic-gate se = SE_EXCL; 8757c478bd9Sstevel@tonic-gate } 8767c478bd9Sstevel@tonic-gate } else if (!hash_locked) { 8777c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[17]); 8787c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 8797c478bd9Sstevel@tonic-gate mutex_enter(phm); 8807c478bd9Sstevel@tonic-gate hash_locked = 1; 8817c478bd9Sstevel@tonic-gate goto top; 8827c478bd9Sstevel@tonic-gate } else if (newpp != NULL) { 8837c478bd9Sstevel@tonic-gate /* 8847c478bd9Sstevel@tonic-gate * If we have a preallocated page then 8857c478bd9Sstevel@tonic-gate * insert it now and basically behave like 8867c478bd9Sstevel@tonic-gate * page_create. 8877c478bd9Sstevel@tonic-gate */ 8887c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[18]); 8897c478bd9Sstevel@tonic-gate /* 8907c478bd9Sstevel@tonic-gate * Since we hold the page hash mutex and 8917c478bd9Sstevel@tonic-gate * just searched for this page, page_hashin 8927c478bd9Sstevel@tonic-gate * had better not fail. If it does, that 8937c478bd9Sstevel@tonic-gate * means some thread did not follow the 8947c478bd9Sstevel@tonic-gate * page hash mutex rules. Panic now and 8957c478bd9Sstevel@tonic-gate * get it over with. As usual, go down 8967c478bd9Sstevel@tonic-gate * holding all the locks. 8977c478bd9Sstevel@tonic-gate */ 8987c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm)); 8997c478bd9Sstevel@tonic-gate if (!page_hashin(newpp, vp, off, phm)) { 9007c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm)); 9017c478bd9Sstevel@tonic-gate panic("page_lookup_create: hashin failed %p %p %llx %p", 9027c478bd9Sstevel@tonic-gate (void *)newpp, (void *)vp, off, (void *)phm); 9037c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 9047c478bd9Sstevel@tonic-gate } 9057c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm)); 9067c478bd9Sstevel@tonic-gate mutex_exit(phm); 9077c478bd9Sstevel@tonic-gate phm = NULL; 9087c478bd9Sstevel@tonic-gate page_set_props(newpp, P_REF); 9097c478bd9Sstevel@tonic-gate page_io_lock(newpp); 9107c478bd9Sstevel@tonic-gate pp = newpp; 9117c478bd9Sstevel@tonic-gate se = SE_EXCL; 9127c478bd9Sstevel@tonic-gate } else { 9137c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[19]); 9147c478bd9Sstevel@tonic-gate mutex_exit(phm); 9157c478bd9Sstevel@tonic-gate } 9167c478bd9Sstevel@tonic-gate 9177c478bd9Sstevel@tonic-gate ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 9187c478bd9Sstevel@tonic-gate 9197c478bd9Sstevel@tonic-gate ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1); 9207c478bd9Sstevel@tonic-gate 9217c478bd9Sstevel@tonic-gate return (pp); 9227c478bd9Sstevel@tonic-gate } 9237c478bd9Sstevel@tonic-gate 9247c478bd9Sstevel@tonic-gate /* 9257c478bd9Sstevel@tonic-gate * Search the hash list for the page representing the 9267c478bd9Sstevel@tonic-gate * specified [vp, offset] and return it locked. Skip 9277c478bd9Sstevel@tonic-gate * free pages and pages that cannot be locked as requested. 9287c478bd9Sstevel@tonic-gate * Used while attempting to kluster pages. 9297c478bd9Sstevel@tonic-gate */ 9307c478bd9Sstevel@tonic-gate page_t * 9317c478bd9Sstevel@tonic-gate page_lookup_nowait(vnode_t *vp, u_offset_t off, se_t se) 9327c478bd9Sstevel@tonic-gate { 9337c478bd9Sstevel@tonic-gate page_t *pp; 9347c478bd9Sstevel@tonic-gate kmutex_t *phm; 9357c478bd9Sstevel@tonic-gate ulong_t index; 9367c478bd9Sstevel@tonic-gate uint_t locked; 9377c478bd9Sstevel@tonic-gate 9387c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 9397c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[0]); 9407c478bd9Sstevel@tonic-gate 9417c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off); 942e7c874afSJosef 'Jeff' Sipek pp = page_hash_search(index, vp, off); 9437c478bd9Sstevel@tonic-gate locked = 0; 9447c478bd9Sstevel@tonic-gate if (pp == NULL) { 9457c478bd9Sstevel@tonic-gate top: 9467c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[1]); 9477c478bd9Sstevel@tonic-gate locked = 1; 9487c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 9497c478bd9Sstevel@tonic-gate mutex_enter(phm); 950e7c874afSJosef 'Jeff' Sipek pp = page_hash_search(index, vp, off); 9517c478bd9Sstevel@tonic-gate } 9527c478bd9Sstevel@tonic-gate 9537c478bd9Sstevel@tonic-gate if (pp == NULL || PP_ISFREE(pp)) { 9547c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[2]); 9557c478bd9Sstevel@tonic-gate pp = NULL; 9567c478bd9Sstevel@tonic-gate } else { 9577c478bd9Sstevel@tonic-gate if (!page_trylock(pp, se)) { 9587c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[3]); 9597c478bd9Sstevel@tonic-gate pp = NULL; 9607c478bd9Sstevel@tonic-gate } else { 9617c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[4]); 9627c478bd9Sstevel@tonic-gate /* 9637c478bd9Sstevel@tonic-gate * See the comment in page_lookup() 9647c478bd9Sstevel@tonic-gate */ 9657c478bd9Sstevel@tonic-gate if (((volatile struct vnode *)(pp->p_vnode) != vp) || 9667c478bd9Sstevel@tonic-gate ((u_offset_t)(pp->p_offset) != off)) { 9677c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[5]); 9687c478bd9Sstevel@tonic-gate if (locked) { 9697c478bd9Sstevel@tonic-gate panic("page_lookup_nowait %p", 9707c478bd9Sstevel@tonic-gate (void *)pp); 9717c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 9727c478bd9Sstevel@tonic-gate } 9737c478bd9Sstevel@tonic-gate page_unlock(pp); 9747c478bd9Sstevel@tonic-gate goto top; 9757c478bd9Sstevel@tonic-gate } 9767c478bd9Sstevel@tonic-gate if (PP_ISFREE(pp)) { 9777c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[6]); 9787c478bd9Sstevel@tonic-gate page_unlock(pp); 9797c478bd9Sstevel@tonic-gate pp = NULL; 9807c478bd9Sstevel@tonic-gate } 9817c478bd9Sstevel@tonic-gate } 9827c478bd9Sstevel@tonic-gate } 9837c478bd9Sstevel@tonic-gate if (locked) { 9847c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[7]); 9857c478bd9Sstevel@tonic-gate mutex_exit(phm); 9867c478bd9Sstevel@tonic-gate } 9877c478bd9Sstevel@tonic-gate 9887c478bd9Sstevel@tonic-gate ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 9897c478bd9Sstevel@tonic-gate 9907c478bd9Sstevel@tonic-gate return (pp); 9917c478bd9Sstevel@tonic-gate } 9927c478bd9Sstevel@tonic-gate 9937c478bd9Sstevel@tonic-gate /* 9947c478bd9Sstevel@tonic-gate * Search the hash list for a page with the specified [vp, off] 9957c478bd9Sstevel@tonic-gate * that is known to exist and is already locked. This routine 9967c478bd9Sstevel@tonic-gate * is typically used by segment SOFTUNLOCK routines. 9977c478bd9Sstevel@tonic-gate */ 9987c478bd9Sstevel@tonic-gate page_t * 9997c478bd9Sstevel@tonic-gate page_find(vnode_t *vp, u_offset_t off) 10007c478bd9Sstevel@tonic-gate { 10017c478bd9Sstevel@tonic-gate page_t *pp; 10027c478bd9Sstevel@tonic-gate kmutex_t *phm; 10037c478bd9Sstevel@tonic-gate ulong_t index; 10047c478bd9Sstevel@tonic-gate 10057c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 10067c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_find_cnt); 10077c478bd9Sstevel@tonic-gate 10087c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off); 10097c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 10107c478bd9Sstevel@tonic-gate 10117c478bd9Sstevel@tonic-gate mutex_enter(phm); 1012e7c874afSJosef 'Jeff' Sipek pp = page_hash_search(index, vp, off); 10137c478bd9Sstevel@tonic-gate mutex_exit(phm); 10147c478bd9Sstevel@tonic-gate 10154fc2445aSelowe ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr); 10167c478bd9Sstevel@tonic-gate return (pp); 10177c478bd9Sstevel@tonic-gate } 10187c478bd9Sstevel@tonic-gate 10197c478bd9Sstevel@tonic-gate /* 10207c478bd9Sstevel@tonic-gate * Determine whether a page with the specified [vp, off] 10217c478bd9Sstevel@tonic-gate * currently exists in the system. Obviously this should 10227c478bd9Sstevel@tonic-gate * only be considered as a hint since nothing prevents the 10237c478bd9Sstevel@tonic-gate * page from disappearing or appearing immediately after 10247c478bd9Sstevel@tonic-gate * the return from this routine. Subsequently, we don't 10257c478bd9Sstevel@tonic-gate * even bother to lock the list. 10267c478bd9Sstevel@tonic-gate */ 10277c478bd9Sstevel@tonic-gate page_t * 10287c478bd9Sstevel@tonic-gate page_exists(vnode_t *vp, u_offset_t off) 10297c478bd9Sstevel@tonic-gate { 10307c478bd9Sstevel@tonic-gate ulong_t index; 10317c478bd9Sstevel@tonic-gate 10327c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 10337c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exists_cnt); 10347c478bd9Sstevel@tonic-gate 10357c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off); 10367c478bd9Sstevel@tonic-gate 1037e7c874afSJosef 'Jeff' Sipek return (page_hash_search(index, vp, off)); 10387c478bd9Sstevel@tonic-gate } 10397c478bd9Sstevel@tonic-gate 10407c478bd9Sstevel@tonic-gate /* 10417c478bd9Sstevel@tonic-gate * Determine if physically contiguous pages exist for [vp, off] - [vp, off + 10427c478bd9Sstevel@tonic-gate * page_size(szc)) range. if they exist and ppa is not NULL fill ppa array 10437c478bd9Sstevel@tonic-gate * with these pages locked SHARED. If necessary reclaim pages from 10447c478bd9Sstevel@tonic-gate * freelist. Return 1 if contiguous pages exist and 0 otherwise. 10457c478bd9Sstevel@tonic-gate * 10467c478bd9Sstevel@tonic-gate * If we fail to lock pages still return 1 if pages exist and contiguous. 10477c478bd9Sstevel@tonic-gate * But in this case return value is just a hint. ppa array won't be filled. 10487c478bd9Sstevel@tonic-gate * Caller should initialize ppa[0] as NULL to distinguish return value. 10497c478bd9Sstevel@tonic-gate * 10507c478bd9Sstevel@tonic-gate * Returns 0 if pages don't exist or not physically contiguous. 10517c478bd9Sstevel@tonic-gate * 10527c478bd9Sstevel@tonic-gate * This routine doesn't work for anonymous(swapfs) pages. 10537c478bd9Sstevel@tonic-gate */ 10547c478bd9Sstevel@tonic-gate int 10557c478bd9Sstevel@tonic-gate page_exists_physcontig(vnode_t *vp, u_offset_t off, uint_t szc, page_t *ppa[]) 10567c478bd9Sstevel@tonic-gate { 10577c478bd9Sstevel@tonic-gate pgcnt_t pages; 10587c478bd9Sstevel@tonic-gate pfn_t pfn; 10597c478bd9Sstevel@tonic-gate page_t *rootpp; 10607c478bd9Sstevel@tonic-gate pgcnt_t i; 10617c478bd9Sstevel@tonic-gate pgcnt_t j; 10627c478bd9Sstevel@tonic-gate u_offset_t save_off = off; 10637c478bd9Sstevel@tonic-gate ulong_t index; 10647c478bd9Sstevel@tonic-gate kmutex_t *phm; 10657c478bd9Sstevel@tonic-gate page_t *pp; 10667c478bd9Sstevel@tonic-gate uint_t pszc; 10677c478bd9Sstevel@tonic-gate int loopcnt = 0; 10687c478bd9Sstevel@tonic-gate 10697c478bd9Sstevel@tonic-gate ASSERT(szc != 0); 10707c478bd9Sstevel@tonic-gate ASSERT(vp != NULL); 10717c478bd9Sstevel@tonic-gate ASSERT(!IS_SWAPFSVP(vp)); 1072ad23a2dbSjohansen ASSERT(!VN_ISKAS(vp)); 10737c478bd9Sstevel@tonic-gate 10747c478bd9Sstevel@tonic-gate again: 10757c478bd9Sstevel@tonic-gate if (++loopcnt > 3) { 10767c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[0]); 10777c478bd9Sstevel@tonic-gate return (0); 10787c478bd9Sstevel@tonic-gate } 10797c478bd9Sstevel@tonic-gate 10807c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off); 10817c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 10827c478bd9Sstevel@tonic-gate 10837c478bd9Sstevel@tonic-gate mutex_enter(phm); 1084e7c874afSJosef 'Jeff' Sipek pp = page_hash_search(index, vp, off); 10857c478bd9Sstevel@tonic-gate mutex_exit(phm); 10867c478bd9Sstevel@tonic-gate 10877c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[1]); 10887c478bd9Sstevel@tonic-gate 10897c478bd9Sstevel@tonic-gate if (pp == NULL) { 10907c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[2]); 10917c478bd9Sstevel@tonic-gate return (0); 10927c478bd9Sstevel@tonic-gate } 10937c478bd9Sstevel@tonic-gate 10947c478bd9Sstevel@tonic-gate pages = page_get_pagecnt(szc); 10957c478bd9Sstevel@tonic-gate rootpp = pp; 10967c478bd9Sstevel@tonic-gate pfn = rootpp->p_pagenum; 10977c478bd9Sstevel@tonic-gate 10987c478bd9Sstevel@tonic-gate if ((pszc = pp->p_szc) >= szc && ppa != NULL) { 10997c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[3]); 11007c478bd9Sstevel@tonic-gate if (!page_trylock(pp, SE_SHARED)) { 11017c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[4]); 11027c478bd9Sstevel@tonic-gate return (1); 11037c478bd9Sstevel@tonic-gate } 11049853d9e8SJason Beloro /* 11059853d9e8SJason Beloro * Also check whether p_pagenum was modified by DR. 11069853d9e8SJason Beloro */ 11077c478bd9Sstevel@tonic-gate if (pp->p_szc != pszc || pp->p_vnode != vp || 11089853d9e8SJason Beloro pp->p_offset != off || pp->p_pagenum != pfn) { 11097c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[5]); 11107c478bd9Sstevel@tonic-gate page_unlock(pp); 11117c478bd9Sstevel@tonic-gate off = save_off; 11127c478bd9Sstevel@tonic-gate goto again; 11137c478bd9Sstevel@tonic-gate } 11147c478bd9Sstevel@tonic-gate /* 11157c478bd9Sstevel@tonic-gate * szc was non zero and vnode and offset matched after we 11167c478bd9Sstevel@tonic-gate * locked the page it means it can't become free on us. 11177c478bd9Sstevel@tonic-gate */ 11187c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 11197c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(pfn, pages)) { 11207c478bd9Sstevel@tonic-gate page_unlock(pp); 11217c478bd9Sstevel@tonic-gate return (0); 11227c478bd9Sstevel@tonic-gate } 11237c478bd9Sstevel@tonic-gate ppa[0] = pp; 11247c478bd9Sstevel@tonic-gate pp++; 11257c478bd9Sstevel@tonic-gate off += PAGESIZE; 11267c478bd9Sstevel@tonic-gate pfn++; 11277c478bd9Sstevel@tonic-gate for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 11287c478bd9Sstevel@tonic-gate if (!page_trylock(pp, SE_SHARED)) { 11297c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[6]); 11307c478bd9Sstevel@tonic-gate pp--; 11317c478bd9Sstevel@tonic-gate while (i-- > 0) { 11327c478bd9Sstevel@tonic-gate page_unlock(pp); 11337c478bd9Sstevel@tonic-gate pp--; 11347c478bd9Sstevel@tonic-gate } 11357c478bd9Sstevel@tonic-gate ppa[0] = NULL; 11367c478bd9Sstevel@tonic-gate return (1); 11377c478bd9Sstevel@tonic-gate } 11387c478bd9Sstevel@tonic-gate if (pp->p_szc != pszc) { 11397c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[7]); 11407c478bd9Sstevel@tonic-gate page_unlock(pp); 11417c478bd9Sstevel@tonic-gate pp--; 11427c478bd9Sstevel@tonic-gate while (i-- > 0) { 11437c478bd9Sstevel@tonic-gate page_unlock(pp); 11447c478bd9Sstevel@tonic-gate pp--; 11457c478bd9Sstevel@tonic-gate } 11467c478bd9Sstevel@tonic-gate ppa[0] = NULL; 11477c478bd9Sstevel@tonic-gate off = save_off; 11487c478bd9Sstevel@tonic-gate goto again; 11497c478bd9Sstevel@tonic-gate } 11507c478bd9Sstevel@tonic-gate /* 11517c478bd9Sstevel@tonic-gate * szc the same as for previous already locked pages 11527c478bd9Sstevel@tonic-gate * with right identity. Since this page had correct 11537c478bd9Sstevel@tonic-gate * szc after we locked it can't get freed or destroyed 11547c478bd9Sstevel@tonic-gate * and therefore must have the expected identity. 11557c478bd9Sstevel@tonic-gate */ 11567c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 11577c478bd9Sstevel@tonic-gate if (pp->p_vnode != vp || 11587c478bd9Sstevel@tonic-gate pp->p_offset != off) { 11597c478bd9Sstevel@tonic-gate panic("page_exists_physcontig: " 11607c478bd9Sstevel@tonic-gate "large page identity doesn't match"); 11617c478bd9Sstevel@tonic-gate } 11627c478bd9Sstevel@tonic-gate ppa[i] = pp; 11637c478bd9Sstevel@tonic-gate ASSERT(pp->p_pagenum == pfn); 11647c478bd9Sstevel@tonic-gate } 11657c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[8]); 11667c478bd9Sstevel@tonic-gate ppa[pages] = NULL; 11677c478bd9Sstevel@tonic-gate return (1); 11687c478bd9Sstevel@tonic-gate } else if (pszc >= szc) { 11697c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[9]); 11707c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(pfn, pages)) { 11717c478bd9Sstevel@tonic-gate return (0); 11727c478bd9Sstevel@tonic-gate } 11737c478bd9Sstevel@tonic-gate return (1); 11747c478bd9Sstevel@tonic-gate } 11757c478bd9Sstevel@tonic-gate 11767c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(pfn, pages)) { 11777c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[10]); 11787c478bd9Sstevel@tonic-gate return (0); 11797c478bd9Sstevel@tonic-gate } 11807c478bd9Sstevel@tonic-gate 11817c478bd9Sstevel@tonic-gate if (page_numtomemseg_nolock(pfn) != 11827c478bd9Sstevel@tonic-gate page_numtomemseg_nolock(pfn + pages - 1)) { 11837c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[11]); 11847c478bd9Sstevel@tonic-gate return (0); 11857c478bd9Sstevel@tonic-gate } 11867c478bd9Sstevel@tonic-gate 11877c478bd9Sstevel@tonic-gate /* 11887c478bd9Sstevel@tonic-gate * We loop up 4 times across pages to promote page size. 11897c478bd9Sstevel@tonic-gate * We're extra cautious to promote page size atomically with respect 11907c478bd9Sstevel@tonic-gate * to everybody else. But we can probably optimize into 1 loop if 11917c478bd9Sstevel@tonic-gate * this becomes an issue. 11927c478bd9Sstevel@tonic-gate */ 11937c478bd9Sstevel@tonic-gate 11947c478bd9Sstevel@tonic-gate for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 11957c478bd9Sstevel@tonic-gate if (!page_trylock(pp, SE_EXCL)) { 11967c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[12]); 11977c478bd9Sstevel@tonic-gate break; 11987c478bd9Sstevel@tonic-gate } 11999853d9e8SJason Beloro /* 12009853d9e8SJason Beloro * Check whether p_pagenum was modified by DR. 12019853d9e8SJason Beloro */ 12029853d9e8SJason Beloro if (pp->p_pagenum != pfn) { 12039853d9e8SJason Beloro page_unlock(pp); 12049853d9e8SJason Beloro break; 12059853d9e8SJason Beloro } 12067c478bd9Sstevel@tonic-gate if (pp->p_vnode != vp || 12077c478bd9Sstevel@tonic-gate pp->p_offset != off) { 12087c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[13]); 12097c478bd9Sstevel@tonic-gate page_unlock(pp); 12107c478bd9Sstevel@tonic-gate break; 12117c478bd9Sstevel@tonic-gate } 12127c478bd9Sstevel@tonic-gate if (pp->p_szc >= szc) { 12137c478bd9Sstevel@tonic-gate ASSERT(i == 0); 12147c478bd9Sstevel@tonic-gate page_unlock(pp); 12157c478bd9Sstevel@tonic-gate off = save_off; 12167c478bd9Sstevel@tonic-gate goto again; 12177c478bd9Sstevel@tonic-gate } 12187c478bd9Sstevel@tonic-gate } 12197c478bd9Sstevel@tonic-gate 12207c478bd9Sstevel@tonic-gate if (i != pages) { 12217c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[14]); 12227c478bd9Sstevel@tonic-gate --pp; 12237c478bd9Sstevel@tonic-gate while (i-- > 0) { 12247c478bd9Sstevel@tonic-gate page_unlock(pp); 12257c478bd9Sstevel@tonic-gate --pp; 12267c478bd9Sstevel@tonic-gate } 12277c478bd9Sstevel@tonic-gate return (0); 12287c478bd9Sstevel@tonic-gate } 12297c478bd9Sstevel@tonic-gate 12307c478bd9Sstevel@tonic-gate pp = rootpp; 12317c478bd9Sstevel@tonic-gate for (i = 0; i < pages; i++, pp++) { 12327c478bd9Sstevel@tonic-gate if (PP_ISFREE(pp)) { 12337c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[15]); 12347c478bd9Sstevel@tonic-gate ASSERT(!PP_ISAGED(pp)); 12357c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 12367c478bd9Sstevel@tonic-gate if (!page_reclaim(pp, NULL)) { 12377c478bd9Sstevel@tonic-gate break; 12387c478bd9Sstevel@tonic-gate } 12397c478bd9Sstevel@tonic-gate } else { 12407c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc < szc); 12417c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[16]); 12427c478bd9Sstevel@tonic-gate (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 12437c478bd9Sstevel@tonic-gate } 12447c478bd9Sstevel@tonic-gate } 12457c478bd9Sstevel@tonic-gate if (i < pages) { 12467c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[17]); 12477c478bd9Sstevel@tonic-gate /* 12487c478bd9Sstevel@tonic-gate * page_reclaim failed because we were out of memory. 12497c478bd9Sstevel@tonic-gate * drop the rest of the locks and return because this page 12507c478bd9Sstevel@tonic-gate * must be already reallocated anyway. 12517c478bd9Sstevel@tonic-gate */ 12527c478bd9Sstevel@tonic-gate pp = rootpp; 12537c478bd9Sstevel@tonic-gate for (j = 0; j < pages; j++, pp++) { 12547c478bd9Sstevel@tonic-gate if (j != i) { 12557c478bd9Sstevel@tonic-gate page_unlock(pp); 12567c478bd9Sstevel@tonic-gate } 12577c478bd9Sstevel@tonic-gate } 12587c478bd9Sstevel@tonic-gate return (0); 12597c478bd9Sstevel@tonic-gate } 12607c478bd9Sstevel@tonic-gate 12617c478bd9Sstevel@tonic-gate off = save_off; 12627c478bd9Sstevel@tonic-gate pp = rootpp; 12637c478bd9Sstevel@tonic-gate for (i = 0; i < pages; i++, pp++, off += PAGESIZE) { 12647c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 12657c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 12667c478bd9Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(pp)); 12677c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode == vp); 12687c478bd9Sstevel@tonic-gate ASSERT(pp->p_offset == off); 12697c478bd9Sstevel@tonic-gate pp->p_szc = szc; 12707c478bd9Sstevel@tonic-gate } 12717c478bd9Sstevel@tonic-gate pp = rootpp; 12727c478bd9Sstevel@tonic-gate for (i = 0; i < pages; i++, pp++) { 12737c478bd9Sstevel@tonic-gate if (ppa == NULL) { 12747c478bd9Sstevel@tonic-gate page_unlock(pp); 12757c478bd9Sstevel@tonic-gate } else { 12767c478bd9Sstevel@tonic-gate ppa[i] = pp; 12777c478bd9Sstevel@tonic-gate page_downgrade(ppa[i]); 12787c478bd9Sstevel@tonic-gate } 12797c478bd9Sstevel@tonic-gate } 12807c478bd9Sstevel@tonic-gate if (ppa != NULL) { 12817c478bd9Sstevel@tonic-gate ppa[pages] = NULL; 12827c478bd9Sstevel@tonic-gate } 12837c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[18]); 12847c478bd9Sstevel@tonic-gate ASSERT(vp->v_pages != NULL); 12857c478bd9Sstevel@tonic-gate return (1); 12867c478bd9Sstevel@tonic-gate } 12877c478bd9Sstevel@tonic-gate 12887c478bd9Sstevel@tonic-gate /* 12897c478bd9Sstevel@tonic-gate * Determine whether a page with the specified [vp, off] 12907c478bd9Sstevel@tonic-gate * currently exists in the system and if so return its 12917c478bd9Sstevel@tonic-gate * size code. Obviously this should only be considered as 12927c478bd9Sstevel@tonic-gate * a hint since nothing prevents the page from disappearing 12937c478bd9Sstevel@tonic-gate * or appearing immediately after the return from this routine. 12947c478bd9Sstevel@tonic-gate */ 12957c478bd9Sstevel@tonic-gate int 12967c478bd9Sstevel@tonic-gate page_exists_forreal(vnode_t *vp, u_offset_t off, uint_t *szc) 12977c478bd9Sstevel@tonic-gate { 12987c478bd9Sstevel@tonic-gate page_t *pp; 12997c478bd9Sstevel@tonic-gate kmutex_t *phm; 13007c478bd9Sstevel@tonic-gate ulong_t index; 13017c478bd9Sstevel@tonic-gate int rc = 0; 13027c478bd9Sstevel@tonic-gate 13037c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 13047c478bd9Sstevel@tonic-gate ASSERT(szc != NULL); 13057c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exists_forreal_cnt); 13067c478bd9Sstevel@tonic-gate 13077c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off); 13087c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 13097c478bd9Sstevel@tonic-gate 13107c478bd9Sstevel@tonic-gate mutex_enter(phm); 1311e7c874afSJosef 'Jeff' Sipek pp = page_hash_search(index, vp, off); 13127c478bd9Sstevel@tonic-gate if (pp != NULL) { 13137c478bd9Sstevel@tonic-gate *szc = pp->p_szc; 13147c478bd9Sstevel@tonic-gate rc = 1; 13157c478bd9Sstevel@tonic-gate } 13167c478bd9Sstevel@tonic-gate mutex_exit(phm); 13177c478bd9Sstevel@tonic-gate return (rc); 13187c478bd9Sstevel@tonic-gate } 13197c478bd9Sstevel@tonic-gate 13207c478bd9Sstevel@tonic-gate /* wakeup threads waiting for pages in page_create_get_something() */ 13217c478bd9Sstevel@tonic-gate void 13227c478bd9Sstevel@tonic-gate wakeup_pcgs(void) 13237c478bd9Sstevel@tonic-gate { 13247c478bd9Sstevel@tonic-gate if (!CV_HAS_WAITERS(&pcgs_cv)) 13257c478bd9Sstevel@tonic-gate return; 13267c478bd9Sstevel@tonic-gate cv_broadcast(&pcgs_cv); 13277c478bd9Sstevel@tonic-gate } 13287c478bd9Sstevel@tonic-gate 13297c478bd9Sstevel@tonic-gate /* 13307c478bd9Sstevel@tonic-gate * 'freemem' is used all over the kernel as an indication of how many 13317c478bd9Sstevel@tonic-gate * pages are free (either on the cache list or on the free page list) 13327c478bd9Sstevel@tonic-gate * in the system. In very few places is a really accurate 'freemem' 13337c478bd9Sstevel@tonic-gate * needed. To avoid contention of the lock protecting a the 13347c478bd9Sstevel@tonic-gate * single freemem, it was spread out into NCPU buckets. Set_freemem 13357c478bd9Sstevel@tonic-gate * sets freemem to the total of all NCPU buckets. It is called from 13367c478bd9Sstevel@tonic-gate * clock() on each TICK. 13377c478bd9Sstevel@tonic-gate */ 13387c478bd9Sstevel@tonic-gate void 13397c478bd9Sstevel@tonic-gate set_freemem() 13407c478bd9Sstevel@tonic-gate { 13417c478bd9Sstevel@tonic-gate struct pcf *p; 13427c478bd9Sstevel@tonic-gate ulong_t t; 13437c478bd9Sstevel@tonic-gate uint_t i; 13447c478bd9Sstevel@tonic-gate 13457c478bd9Sstevel@tonic-gate t = 0; 13467c478bd9Sstevel@tonic-gate p = pcf; 134706fb6a36Sdv142724 for (i = 0; i < pcf_fanout; i++) { 13487c478bd9Sstevel@tonic-gate t += p->pcf_count; 13497c478bd9Sstevel@tonic-gate p++; 13507c478bd9Sstevel@tonic-gate } 13517c478bd9Sstevel@tonic-gate freemem = t; 13527c478bd9Sstevel@tonic-gate 13537c478bd9Sstevel@tonic-gate /* 13547c478bd9Sstevel@tonic-gate * Don't worry about grabbing mutex. It's not that 13557c478bd9Sstevel@tonic-gate * critical if we miss a tick or two. This is 13567c478bd9Sstevel@tonic-gate * where we wakeup possible delayers in 13577c478bd9Sstevel@tonic-gate * page_create_get_something(). 13587c478bd9Sstevel@tonic-gate */ 13597c478bd9Sstevel@tonic-gate wakeup_pcgs(); 13607c478bd9Sstevel@tonic-gate } 13617c478bd9Sstevel@tonic-gate 13627c478bd9Sstevel@tonic-gate ulong_t 13637c478bd9Sstevel@tonic-gate get_freemem() 13647c478bd9Sstevel@tonic-gate { 13657c478bd9Sstevel@tonic-gate struct pcf *p; 13667c478bd9Sstevel@tonic-gate ulong_t t; 13677c478bd9Sstevel@tonic-gate uint_t i; 13687c478bd9Sstevel@tonic-gate 13697c478bd9Sstevel@tonic-gate t = 0; 13707c478bd9Sstevel@tonic-gate p = pcf; 137106fb6a36Sdv142724 for (i = 0; i < pcf_fanout; i++) { 13727c478bd9Sstevel@tonic-gate t += p->pcf_count; 13737c478bd9Sstevel@tonic-gate p++; 13747c478bd9Sstevel@tonic-gate } 13757c478bd9Sstevel@tonic-gate /* 13767c478bd9Sstevel@tonic-gate * We just calculated it, might as well set it. 13777c478bd9Sstevel@tonic-gate */ 13787c478bd9Sstevel@tonic-gate freemem = t; 13797c478bd9Sstevel@tonic-gate return (t); 13807c478bd9Sstevel@tonic-gate } 13817c478bd9Sstevel@tonic-gate 13827c478bd9Sstevel@tonic-gate /* 13837c478bd9Sstevel@tonic-gate * Acquire all of the page cache & free (pcf) locks. 13847c478bd9Sstevel@tonic-gate */ 13857c478bd9Sstevel@tonic-gate void 13867c478bd9Sstevel@tonic-gate pcf_acquire_all() 13877c478bd9Sstevel@tonic-gate { 13887c478bd9Sstevel@tonic-gate struct pcf *p; 13897c478bd9Sstevel@tonic-gate uint_t i; 13907c478bd9Sstevel@tonic-gate 13917c478bd9Sstevel@tonic-gate p = pcf; 139206fb6a36Sdv142724 for (i = 0; i < pcf_fanout; i++) { 13937c478bd9Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 13947c478bd9Sstevel@tonic-gate p++; 13957c478bd9Sstevel@tonic-gate } 13967c478bd9Sstevel@tonic-gate } 13977c478bd9Sstevel@tonic-gate 13987c478bd9Sstevel@tonic-gate /* 13997c478bd9Sstevel@tonic-gate * Release all the pcf_locks. 14007c478bd9Sstevel@tonic-gate */ 14017c478bd9Sstevel@tonic-gate void 14027c478bd9Sstevel@tonic-gate pcf_release_all() 14037c478bd9Sstevel@tonic-gate { 14047c478bd9Sstevel@tonic-gate struct pcf *p; 14057c478bd9Sstevel@tonic-gate uint_t i; 14067c478bd9Sstevel@tonic-gate 14077c478bd9Sstevel@tonic-gate p = pcf; 140806fb6a36Sdv142724 for (i = 0; i < pcf_fanout; i++) { 14097c478bd9Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 14107c478bd9Sstevel@tonic-gate p++; 14117c478bd9Sstevel@tonic-gate } 14127c478bd9Sstevel@tonic-gate } 14137c478bd9Sstevel@tonic-gate 14147c478bd9Sstevel@tonic-gate /* 14157c478bd9Sstevel@tonic-gate * Inform the VM system that we need some pages freed up. 14167c478bd9Sstevel@tonic-gate * Calls must be symmetric, e.g.: 14177c478bd9Sstevel@tonic-gate * 14187c478bd9Sstevel@tonic-gate * page_needfree(100); 14197c478bd9Sstevel@tonic-gate * wait a bit; 14207c478bd9Sstevel@tonic-gate * page_needfree(-100); 14217c478bd9Sstevel@tonic-gate */ 14227c478bd9Sstevel@tonic-gate void 14237c478bd9Sstevel@tonic-gate page_needfree(spgcnt_t npages) 14247c478bd9Sstevel@tonic-gate { 14257c478bd9Sstevel@tonic-gate mutex_enter(&new_freemem_lock); 14267c478bd9Sstevel@tonic-gate needfree += npages; 14277c478bd9Sstevel@tonic-gate mutex_exit(&new_freemem_lock); 14287c478bd9Sstevel@tonic-gate } 14297c478bd9Sstevel@tonic-gate 14307c478bd9Sstevel@tonic-gate /* 14317c478bd9Sstevel@tonic-gate * Throttle for page_create(): try to prevent freemem from dropping 14327c478bd9Sstevel@tonic-gate * below throttlefree. We can't provide a 100% guarantee because 14337c478bd9Sstevel@tonic-gate * KM_NOSLEEP allocations, page_reclaim(), and various other things 14347c478bd9Sstevel@tonic-gate * nibble away at the freelist. However, we can block all PG_WAIT 14357c478bd9Sstevel@tonic-gate * allocations until memory becomes available. The motivation is 14367c478bd9Sstevel@tonic-gate * that several things can fall apart when there's no free memory: 14377c478bd9Sstevel@tonic-gate * 14387c478bd9Sstevel@tonic-gate * (1) If pageout() needs memory to push a page, the system deadlocks. 14397c478bd9Sstevel@tonic-gate * 14407c478bd9Sstevel@tonic-gate * (2) By (broken) specification, timeout(9F) can neither fail nor 14417c478bd9Sstevel@tonic-gate * block, so it has no choice but to panic the system if it 14427c478bd9Sstevel@tonic-gate * cannot allocate a callout structure. 14437c478bd9Sstevel@tonic-gate * 14447c478bd9Sstevel@tonic-gate * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block; 14457c478bd9Sstevel@tonic-gate * it panics if it cannot allocate a callback structure. 14467c478bd9Sstevel@tonic-gate * 14477c478bd9Sstevel@tonic-gate * (4) Untold numbers of third-party drivers have not yet been hardened 14487c478bd9Sstevel@tonic-gate * against KM_NOSLEEP and/or allocb() failures; they simply assume 14497c478bd9Sstevel@tonic-gate * success and panic the system with a data fault on failure. 14507c478bd9Sstevel@tonic-gate * (The long-term solution to this particular problem is to ship 14517c478bd9Sstevel@tonic-gate * hostile fault-injecting DEBUG kernels with the DDK.) 14527c478bd9Sstevel@tonic-gate * 14537c478bd9Sstevel@tonic-gate * It is theoretically impossible to guarantee success of non-blocking 14547c478bd9Sstevel@tonic-gate * allocations, but in practice, this throttle is very hard to break. 14557c478bd9Sstevel@tonic-gate */ 14567c478bd9Sstevel@tonic-gate static int 14577c478bd9Sstevel@tonic-gate page_create_throttle(pgcnt_t npages, int flags) 14587c478bd9Sstevel@tonic-gate { 14597c478bd9Sstevel@tonic-gate ulong_t fm; 14607c478bd9Sstevel@tonic-gate uint_t i; 14617c478bd9Sstevel@tonic-gate pgcnt_t tf; /* effective value of throttlefree */ 14627c478bd9Sstevel@tonic-gate 14637c478bd9Sstevel@tonic-gate /* 146423a80de1SStan Studzinski * Normal priority allocations. 146523a80de1SStan Studzinski */ 146623a80de1SStan Studzinski if ((flags & (PG_WAIT | PG_NORMALPRI)) == PG_NORMALPRI) { 146723a80de1SStan Studzinski ASSERT(!(flags & (PG_PANIC | PG_PUSHPAGE))); 146823a80de1SStan Studzinski return (freemem >= npages + throttlefree); 146923a80de1SStan Studzinski } 147023a80de1SStan Studzinski 147123a80de1SStan Studzinski /* 14727c478bd9Sstevel@tonic-gate * Never deny pages when: 14737c478bd9Sstevel@tonic-gate * - it's a thread that cannot block [NOMEMWAIT()] 14747c478bd9Sstevel@tonic-gate * - the allocation cannot block and must not fail 14757c478bd9Sstevel@tonic-gate * - the allocation cannot block and is pageout dispensated 14767c478bd9Sstevel@tonic-gate */ 14777c478bd9Sstevel@tonic-gate if (NOMEMWAIT() || 14787c478bd9Sstevel@tonic-gate ((flags & (PG_WAIT | PG_PANIC)) == PG_PANIC) || 14797c478bd9Sstevel@tonic-gate ((flags & (PG_WAIT | PG_PUSHPAGE)) == PG_PUSHPAGE)) 14807c478bd9Sstevel@tonic-gate return (1); 14817c478bd9Sstevel@tonic-gate 14827c478bd9Sstevel@tonic-gate /* 14837c478bd9Sstevel@tonic-gate * If the allocation can't block, we look favorably upon it 14847c478bd9Sstevel@tonic-gate * unless we're below pageout_reserve. In that case we fail 14857c478bd9Sstevel@tonic-gate * the allocation because we want to make sure there are a few 14867c478bd9Sstevel@tonic-gate * pages available for pageout. 14877c478bd9Sstevel@tonic-gate */ 14887c478bd9Sstevel@tonic-gate if ((flags & PG_WAIT) == 0) 14897c478bd9Sstevel@tonic-gate return (freemem >= npages + pageout_reserve); 14907c478bd9Sstevel@tonic-gate 14917c478bd9Sstevel@tonic-gate /* Calculate the effective throttlefree value */ 14927c478bd9Sstevel@tonic-gate tf = throttlefree - 14937c478bd9Sstevel@tonic-gate ((flags & PG_PUSHPAGE) ? pageout_reserve : 0); 14947c478bd9Sstevel@tonic-gate 14957c478bd9Sstevel@tonic-gate cv_signal(&proc_pageout->p_cv); 14967c478bd9Sstevel@tonic-gate 149778b03d3aSkchow for (;;) { 149878b03d3aSkchow fm = 0; 14997c478bd9Sstevel@tonic-gate pcf_acquire_all(); 15007c478bd9Sstevel@tonic-gate mutex_enter(&new_freemem_lock); 150106fb6a36Sdv142724 for (i = 0; i < pcf_fanout; i++) { 15027c478bd9Sstevel@tonic-gate fm += pcf[i].pcf_count; 15037c478bd9Sstevel@tonic-gate pcf[i].pcf_wait++; 15047c478bd9Sstevel@tonic-gate mutex_exit(&pcf[i].pcf_lock); 15057c478bd9Sstevel@tonic-gate } 15067c478bd9Sstevel@tonic-gate freemem = fm; 150778b03d3aSkchow if (freemem >= npages + tf) { 150878b03d3aSkchow mutex_exit(&new_freemem_lock); 150978b03d3aSkchow break; 151078b03d3aSkchow } 15117c478bd9Sstevel@tonic-gate needfree += npages; 15127c478bd9Sstevel@tonic-gate freemem_wait++; 15137c478bd9Sstevel@tonic-gate cv_wait(&freemem_cv, &new_freemem_lock); 15147c478bd9Sstevel@tonic-gate freemem_wait--; 15157c478bd9Sstevel@tonic-gate needfree -= npages; 15167c478bd9Sstevel@tonic-gate mutex_exit(&new_freemem_lock); 15177c478bd9Sstevel@tonic-gate } 15187c478bd9Sstevel@tonic-gate return (1); 15197c478bd9Sstevel@tonic-gate } 15207c478bd9Sstevel@tonic-gate 15217c478bd9Sstevel@tonic-gate /* 1522da6c28aaSamw * page_create_wait() is called to either coalesce pages from the 15237c478bd9Sstevel@tonic-gate * different pcf buckets or to wait because there simply are not 15247c478bd9Sstevel@tonic-gate * enough pages to satisfy the caller's request. 15257c478bd9Sstevel@tonic-gate * 15267c478bd9Sstevel@tonic-gate * Sadly, this is called from platform/vm/vm_machdep.c 15277c478bd9Sstevel@tonic-gate */ 15287c478bd9Sstevel@tonic-gate int 152906fb6a36Sdv142724 page_create_wait(pgcnt_t npages, uint_t flags) 15307c478bd9Sstevel@tonic-gate { 15317c478bd9Sstevel@tonic-gate pgcnt_t total; 15327c478bd9Sstevel@tonic-gate uint_t i; 15337c478bd9Sstevel@tonic-gate struct pcf *p; 15347c478bd9Sstevel@tonic-gate 15357c478bd9Sstevel@tonic-gate /* 15367c478bd9Sstevel@tonic-gate * Wait until there are enough free pages to satisfy our 15377c478bd9Sstevel@tonic-gate * entire request. 15387c478bd9Sstevel@tonic-gate * We set needfree += npages before prodding pageout, to make sure 15397c478bd9Sstevel@tonic-gate * it does real work when npages > lotsfree > freemem. 15407c478bd9Sstevel@tonic-gate */ 15417c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_not_enough); 15427c478bd9Sstevel@tonic-gate 15437c478bd9Sstevel@tonic-gate ASSERT(!kcage_on ? !(flags & PG_NORELOC) : 1); 15447c478bd9Sstevel@tonic-gate checkagain: 1545d94ffb28Sjmcp if ((flags & PG_NORELOC) && 1546d94ffb28Sjmcp kcage_freemem < kcage_throttlefree + npages) 1547d94ffb28Sjmcp (void) kcage_create_throttle(npages, flags); 15487c478bd9Sstevel@tonic-gate 15497c478bd9Sstevel@tonic-gate if (freemem < npages + throttlefree) 15507c478bd9Sstevel@tonic-gate if (!page_create_throttle(npages, flags)) 15517c478bd9Sstevel@tonic-gate return (0); 15527c478bd9Sstevel@tonic-gate 155306fb6a36Sdv142724 if (pcf_decrement_bucket(npages) || 155406fb6a36Sdv142724 pcf_decrement_multiple(&total, npages, 0)) 15557c478bd9Sstevel@tonic-gate return (1); 15567c478bd9Sstevel@tonic-gate 15577c478bd9Sstevel@tonic-gate /* 15587c478bd9Sstevel@tonic-gate * All of the pcf locks are held, there are not enough pages 15597c478bd9Sstevel@tonic-gate * to satisfy the request (npages < total). 15607c478bd9Sstevel@tonic-gate * Be sure to acquire the new_freemem_lock before dropping 15617c478bd9Sstevel@tonic-gate * the pcf locks. This prevents dropping wakeups in page_free(). 15627c478bd9Sstevel@tonic-gate * The order is always pcf_lock then new_freemem_lock. 15637c478bd9Sstevel@tonic-gate * 15647c478bd9Sstevel@tonic-gate * Since we hold all the pcf locks, it is a good time to set freemem. 15657c478bd9Sstevel@tonic-gate * 15667c478bd9Sstevel@tonic-gate * If the caller does not want to wait, return now. 15677c478bd9Sstevel@tonic-gate * Else turn the pageout daemon loose to find something 15687c478bd9Sstevel@tonic-gate * and wait till it does. 15697c478bd9Sstevel@tonic-gate * 15707c478bd9Sstevel@tonic-gate */ 15717c478bd9Sstevel@tonic-gate freemem = total; 15727c478bd9Sstevel@tonic-gate 15737c478bd9Sstevel@tonic-gate if ((flags & PG_WAIT) == 0) { 15747c478bd9Sstevel@tonic-gate pcf_release_all(); 15757c478bd9Sstevel@tonic-gate 15767c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_NOMEM, 15777c478bd9Sstevel@tonic-gate "page_create_nomem:npages %ld freemem %ld", npages, freemem); 15787c478bd9Sstevel@tonic-gate return (0); 15797c478bd9Sstevel@tonic-gate } 15807c478bd9Sstevel@tonic-gate 15817c478bd9Sstevel@tonic-gate ASSERT(proc_pageout != NULL); 15827c478bd9Sstevel@tonic-gate cv_signal(&proc_pageout->p_cv); 15837c478bd9Sstevel@tonic-gate 15847c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_START, 15857c478bd9Sstevel@tonic-gate "page_create_sleep_start: freemem %ld needfree %ld", 15867c478bd9Sstevel@tonic-gate freemem, needfree); 15877c478bd9Sstevel@tonic-gate 15887c478bd9Sstevel@tonic-gate /* 15897c478bd9Sstevel@tonic-gate * We are going to wait. 15907c478bd9Sstevel@tonic-gate * We currently hold all of the pcf_locks, 15917c478bd9Sstevel@tonic-gate * get the new_freemem_lock (it protects freemem_wait), 15927c478bd9Sstevel@tonic-gate * before dropping the pcf_locks. 15937c478bd9Sstevel@tonic-gate */ 15947c478bd9Sstevel@tonic-gate mutex_enter(&new_freemem_lock); 15957c478bd9Sstevel@tonic-gate 15967c478bd9Sstevel@tonic-gate p = pcf; 159706fb6a36Sdv142724 for (i = 0; i < pcf_fanout; i++) { 15987c478bd9Sstevel@tonic-gate p->pcf_wait++; 15997c478bd9Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 16007c478bd9Sstevel@tonic-gate p++; 16017c478bd9Sstevel@tonic-gate } 16027c478bd9Sstevel@tonic-gate 16037c478bd9Sstevel@tonic-gate needfree += npages; 16047c478bd9Sstevel@tonic-gate freemem_wait++; 16057c478bd9Sstevel@tonic-gate 16067c478bd9Sstevel@tonic-gate cv_wait(&freemem_cv, &new_freemem_lock); 16077c478bd9Sstevel@tonic-gate 16087c478bd9Sstevel@tonic-gate freemem_wait--; 16097c478bd9Sstevel@tonic-gate needfree -= npages; 16107c478bd9Sstevel@tonic-gate 16117c478bd9Sstevel@tonic-gate mutex_exit(&new_freemem_lock); 16127c478bd9Sstevel@tonic-gate 16137c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_END, 16147c478bd9Sstevel@tonic-gate "page_create_sleep_end: freemem %ld needfree %ld", 16157c478bd9Sstevel@tonic-gate freemem, needfree); 16167c478bd9Sstevel@tonic-gate 16177c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_not_enough_again); 16187c478bd9Sstevel@tonic-gate goto checkagain; 16197c478bd9Sstevel@tonic-gate } 16207c478bd9Sstevel@tonic-gate /* 16217c478bd9Sstevel@tonic-gate * A routine to do the opposite of page_create_wait(). 16227c478bd9Sstevel@tonic-gate */ 16237c478bd9Sstevel@tonic-gate void 16247c478bd9Sstevel@tonic-gate page_create_putback(spgcnt_t npages) 16257c478bd9Sstevel@tonic-gate { 16267c478bd9Sstevel@tonic-gate struct pcf *p; 16277c478bd9Sstevel@tonic-gate pgcnt_t lump; 16287c478bd9Sstevel@tonic-gate uint_t *which; 16297c478bd9Sstevel@tonic-gate 16307c478bd9Sstevel@tonic-gate /* 16317c478bd9Sstevel@tonic-gate * When a contiguous lump is broken up, we have to 16327c478bd9Sstevel@tonic-gate * deal with lots of pages (min 64) so lets spread 16337c478bd9Sstevel@tonic-gate * the wealth around. 16347c478bd9Sstevel@tonic-gate */ 163506fb6a36Sdv142724 lump = roundup(npages, pcf_fanout) / pcf_fanout; 16367c478bd9Sstevel@tonic-gate freemem += npages; 16377c478bd9Sstevel@tonic-gate 163806fb6a36Sdv142724 for (p = pcf; (npages > 0) && (p < &pcf[pcf_fanout]); p++) { 16397c478bd9Sstevel@tonic-gate which = &p->pcf_count; 16407c478bd9Sstevel@tonic-gate 16417c478bd9Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 16427c478bd9Sstevel@tonic-gate 16437c478bd9Sstevel@tonic-gate if (p->pcf_block) { 16447c478bd9Sstevel@tonic-gate which = &p->pcf_reserve; 16457c478bd9Sstevel@tonic-gate } 16467c478bd9Sstevel@tonic-gate 16477c478bd9Sstevel@tonic-gate if (lump < npages) { 16487c478bd9Sstevel@tonic-gate *which += (uint_t)lump; 16497c478bd9Sstevel@tonic-gate npages -= lump; 16507c478bd9Sstevel@tonic-gate } else { 16517c478bd9Sstevel@tonic-gate *which += (uint_t)npages; 16527c478bd9Sstevel@tonic-gate npages = 0; 16537c478bd9Sstevel@tonic-gate } 16547c478bd9Sstevel@tonic-gate 16557c478bd9Sstevel@tonic-gate if (p->pcf_wait) { 16567c478bd9Sstevel@tonic-gate mutex_enter(&new_freemem_lock); 16577c478bd9Sstevel@tonic-gate /* 16587c478bd9Sstevel@tonic-gate * Check to see if some other thread 16597c478bd9Sstevel@tonic-gate * is actually waiting. Another bucket 16607c478bd9Sstevel@tonic-gate * may have woken it up by now. If there 16617c478bd9Sstevel@tonic-gate * are no waiters, then set our pcf_wait 16627c478bd9Sstevel@tonic-gate * count to zero to avoid coming in here 16637c478bd9Sstevel@tonic-gate * next time. 16647c478bd9Sstevel@tonic-gate */ 16657c478bd9Sstevel@tonic-gate if (freemem_wait) { 16667c478bd9Sstevel@tonic-gate if (npages > 1) { 16677c478bd9Sstevel@tonic-gate cv_broadcast(&freemem_cv); 16687c478bd9Sstevel@tonic-gate } else { 16697c478bd9Sstevel@tonic-gate cv_signal(&freemem_cv); 16707c478bd9Sstevel@tonic-gate } 16717c478bd9Sstevel@tonic-gate p->pcf_wait--; 16727c478bd9Sstevel@tonic-gate } else { 16737c478bd9Sstevel@tonic-gate p->pcf_wait = 0; 16747c478bd9Sstevel@tonic-gate } 16757c478bd9Sstevel@tonic-gate mutex_exit(&new_freemem_lock); 16767c478bd9Sstevel@tonic-gate } 16777c478bd9Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 16787c478bd9Sstevel@tonic-gate } 16797c478bd9Sstevel@tonic-gate ASSERT(npages == 0); 16807c478bd9Sstevel@tonic-gate } 16817c478bd9Sstevel@tonic-gate 16827c478bd9Sstevel@tonic-gate /* 16837c478bd9Sstevel@tonic-gate * A helper routine for page_create_get_something. 16847c478bd9Sstevel@tonic-gate * The indenting got to deep down there. 16857c478bd9Sstevel@tonic-gate * Unblock the pcf counters. Any pages freed after 16867c478bd9Sstevel@tonic-gate * pcf_block got set are moved to pcf_count and 16877c478bd9Sstevel@tonic-gate * wakeups (cv_broadcast() or cv_signal()) are done as needed. 16887c478bd9Sstevel@tonic-gate */ 16897c478bd9Sstevel@tonic-gate static void 16907c478bd9Sstevel@tonic-gate pcgs_unblock(void) 16917c478bd9Sstevel@tonic-gate { 16927c478bd9Sstevel@tonic-gate int i; 16937c478bd9Sstevel@tonic-gate struct pcf *p; 16947c478bd9Sstevel@tonic-gate 16957c478bd9Sstevel@tonic-gate /* Update freemem while we're here. */ 16967c478bd9Sstevel@tonic-gate freemem = 0; 16977c478bd9Sstevel@tonic-gate p = pcf; 169806fb6a36Sdv142724 for (i = 0; i < pcf_fanout; i++) { 16997c478bd9Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 17007c478bd9Sstevel@tonic-gate ASSERT(p->pcf_count == 0); 17017c478bd9Sstevel@tonic-gate p->pcf_count = p->pcf_reserve; 17027c478bd9Sstevel@tonic-gate p->pcf_block = 0; 17037c478bd9Sstevel@tonic-gate freemem += p->pcf_count; 17047c478bd9Sstevel@tonic-gate if (p->pcf_wait) { 17057c478bd9Sstevel@tonic-gate mutex_enter(&new_freemem_lock); 17067c478bd9Sstevel@tonic-gate if (freemem_wait) { 17077c478bd9Sstevel@tonic-gate if (p->pcf_reserve > 1) { 17087c478bd9Sstevel@tonic-gate cv_broadcast(&freemem_cv); 17097c478bd9Sstevel@tonic-gate p->pcf_wait = 0; 17107c478bd9Sstevel@tonic-gate } else { 17117c478bd9Sstevel@tonic-gate cv_signal(&freemem_cv); 17127c478bd9Sstevel@tonic-gate p->pcf_wait--; 17137c478bd9Sstevel@tonic-gate } 17147c478bd9Sstevel@tonic-gate } else { 17157c478bd9Sstevel@tonic-gate p->pcf_wait = 0; 17167c478bd9Sstevel@tonic-gate } 17177c478bd9Sstevel@tonic-gate mutex_exit(&new_freemem_lock); 17187c478bd9Sstevel@tonic-gate } 17197c478bd9Sstevel@tonic-gate p->pcf_reserve = 0; 17207c478bd9Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 17217c478bd9Sstevel@tonic-gate p++; 17227c478bd9Sstevel@tonic-gate } 17237c478bd9Sstevel@tonic-gate } 17247c478bd9Sstevel@tonic-gate 17257c478bd9Sstevel@tonic-gate /* 17267c478bd9Sstevel@tonic-gate * Called from page_create_va() when both the cache and free lists 17277c478bd9Sstevel@tonic-gate * have been checked once. 17287c478bd9Sstevel@tonic-gate * 17297c478bd9Sstevel@tonic-gate * Either returns a page or panics since the accounting was done 17307c478bd9Sstevel@tonic-gate * way before we got here. 17317c478bd9Sstevel@tonic-gate * 17327c478bd9Sstevel@tonic-gate * We don't come here often, so leave the accounting on permanently. 17337c478bd9Sstevel@tonic-gate */ 17347c478bd9Sstevel@tonic-gate 17357c478bd9Sstevel@tonic-gate #define MAX_PCGS 100 17367c478bd9Sstevel@tonic-gate 17377c478bd9Sstevel@tonic-gate #ifdef DEBUG 17387c478bd9Sstevel@tonic-gate #define PCGS_TRIES 100 17397c478bd9Sstevel@tonic-gate #else /* DEBUG */ 17407c478bd9Sstevel@tonic-gate #define PCGS_TRIES 10 17417c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 17427c478bd9Sstevel@tonic-gate 17437c478bd9Sstevel@tonic-gate #ifdef VM_STATS 17447c478bd9Sstevel@tonic-gate uint_t pcgs_counts[PCGS_TRIES]; 17457c478bd9Sstevel@tonic-gate uint_t pcgs_too_many; 17467c478bd9Sstevel@tonic-gate uint_t pcgs_entered; 17477c478bd9Sstevel@tonic-gate uint_t pcgs_entered_noreloc; 17487c478bd9Sstevel@tonic-gate uint_t pcgs_locked; 17497c478bd9Sstevel@tonic-gate uint_t pcgs_cagelocked; 17507c478bd9Sstevel@tonic-gate #endif /* VM_STATS */ 17517c478bd9Sstevel@tonic-gate 17527c478bd9Sstevel@tonic-gate static page_t * 17537c478bd9Sstevel@tonic-gate page_create_get_something(vnode_t *vp, u_offset_t off, struct seg *seg, 17547c478bd9Sstevel@tonic-gate caddr_t vaddr, uint_t flags) 17557c478bd9Sstevel@tonic-gate { 17567c478bd9Sstevel@tonic-gate uint_t count; 17577c478bd9Sstevel@tonic-gate page_t *pp; 17587c478bd9Sstevel@tonic-gate uint_t locked, i; 17597c478bd9Sstevel@tonic-gate struct pcf *p; 17607c478bd9Sstevel@tonic-gate lgrp_t *lgrp; 17617c478bd9Sstevel@tonic-gate int cagelocked = 0; 17627c478bd9Sstevel@tonic-gate 17637c478bd9Sstevel@tonic-gate VM_STAT_ADD(pcgs_entered); 17647c478bd9Sstevel@tonic-gate 17657c478bd9Sstevel@tonic-gate /* 17667c478bd9Sstevel@tonic-gate * Tap any reserve freelists: if we fail now, we'll die 17677c478bd9Sstevel@tonic-gate * since the page(s) we're looking for have already been 17687c478bd9Sstevel@tonic-gate * accounted for. 17697c478bd9Sstevel@tonic-gate */ 17707c478bd9Sstevel@tonic-gate flags |= PG_PANIC; 17717c478bd9Sstevel@tonic-gate 1772d94ffb28Sjmcp if ((flags & PG_NORELOC) != 0) { 17737c478bd9Sstevel@tonic-gate VM_STAT_ADD(pcgs_entered_noreloc); 17747c478bd9Sstevel@tonic-gate /* 17757c478bd9Sstevel@tonic-gate * Requests for free pages from critical threads 17767c478bd9Sstevel@tonic-gate * such as pageout still won't throttle here, but 17777c478bd9Sstevel@tonic-gate * we must try again, to give the cageout thread 17787c478bd9Sstevel@tonic-gate * another chance to catch up. Since we already 17797c478bd9Sstevel@tonic-gate * accounted for the pages, we had better get them 17807c478bd9Sstevel@tonic-gate * this time. 17817c478bd9Sstevel@tonic-gate * 17827c478bd9Sstevel@tonic-gate * N.B. All non-critical threads acquire the pcgs_cagelock 17837c478bd9Sstevel@tonic-gate * to serialize access to the freelists. This implements a 17847c478bd9Sstevel@tonic-gate * turnstile-type synchornization to avoid starvation of 17857c478bd9Sstevel@tonic-gate * critical requests for PG_NORELOC memory by non-critical 17867c478bd9Sstevel@tonic-gate * threads: all non-critical threads must acquire a 'ticket' 17877c478bd9Sstevel@tonic-gate * before passing through, which entails making sure 17887c478bd9Sstevel@tonic-gate * kcage_freemem won't fall below minfree prior to grabbing 17897c478bd9Sstevel@tonic-gate * pages from the freelists. 17907c478bd9Sstevel@tonic-gate */ 1791d94ffb28Sjmcp if (kcage_create_throttle(1, flags) == KCT_NONCRIT) { 17927c478bd9Sstevel@tonic-gate mutex_enter(&pcgs_cagelock); 17937c478bd9Sstevel@tonic-gate cagelocked = 1; 17947c478bd9Sstevel@tonic-gate VM_STAT_ADD(pcgs_cagelocked); 17957c478bd9Sstevel@tonic-gate } 17967c478bd9Sstevel@tonic-gate } 17977c478bd9Sstevel@tonic-gate 17987c478bd9Sstevel@tonic-gate /* 17997c478bd9Sstevel@tonic-gate * Time to get serious. 18007c478bd9Sstevel@tonic-gate * We failed to get a `correctly colored' page from both the 18017c478bd9Sstevel@tonic-gate * free and cache lists. 18027c478bd9Sstevel@tonic-gate * We escalate in stage. 18037c478bd9Sstevel@tonic-gate * 18047c478bd9Sstevel@tonic-gate * First try both lists without worring about color. 18057c478bd9Sstevel@tonic-gate * 18067c478bd9Sstevel@tonic-gate * Then, grab all page accounting locks (ie. pcf[]) and 18077c478bd9Sstevel@tonic-gate * steal any pages that they have and set the pcf_block flag to 18087c478bd9Sstevel@tonic-gate * stop deletions from the lists. This will help because 18097c478bd9Sstevel@tonic-gate * a page can get added to the free list while we are looking 18107c478bd9Sstevel@tonic-gate * at the cache list, then another page could be added to the cache 18117c478bd9Sstevel@tonic-gate * list allowing the page on the free list to be removed as we 18127c478bd9Sstevel@tonic-gate * move from looking at the cache list to the free list. This 18137c478bd9Sstevel@tonic-gate * could happen over and over. We would never find the page 18147c478bd9Sstevel@tonic-gate * we have accounted for. 18157c478bd9Sstevel@tonic-gate * 18167c478bd9Sstevel@tonic-gate * Noreloc pages are a subset of the global (relocatable) page pool. 18177c478bd9Sstevel@tonic-gate * They are not tracked separately in the pcf bins, so it is 18187c478bd9Sstevel@tonic-gate * impossible to know when doing pcf accounting if the available 18197c478bd9Sstevel@tonic-gate * page(s) are noreloc pages or not. When looking for a noreloc page 18207c478bd9Sstevel@tonic-gate * it is quite easy to end up here even if the global (relocatable) 18217c478bd9Sstevel@tonic-gate * page pool has plenty of free pages but the noreloc pool is empty. 18227c478bd9Sstevel@tonic-gate * 18237c478bd9Sstevel@tonic-gate * When the noreloc pool is empty (or low), additional noreloc pages 18247c478bd9Sstevel@tonic-gate * are created by converting pages from the global page pool. This 18257c478bd9Sstevel@tonic-gate * process will stall during pcf accounting if the pcf bins are 18267c478bd9Sstevel@tonic-gate * already locked. Such is the case when a noreloc allocation is 18277c478bd9Sstevel@tonic-gate * looping here in page_create_get_something waiting for more noreloc 18287c478bd9Sstevel@tonic-gate * pages to appear. 18297c478bd9Sstevel@tonic-gate * 18307c478bd9Sstevel@tonic-gate * Short of adding a new field to the pcf bins to accurately track 18317c478bd9Sstevel@tonic-gate * the number of free noreloc pages, we instead do not grab the 18327c478bd9Sstevel@tonic-gate * pcgs_lock, do not set the pcf blocks and do not timeout when 18337c478bd9Sstevel@tonic-gate * allocating a noreloc page. This allows noreloc allocations to 18347c478bd9Sstevel@tonic-gate * loop without blocking global page pool allocations. 18357c478bd9Sstevel@tonic-gate * 18367c478bd9Sstevel@tonic-gate * NOTE: the behaviour of page_create_get_something has not changed 18377c478bd9Sstevel@tonic-gate * for the case of global page pool allocations. 18387c478bd9Sstevel@tonic-gate */ 18397c478bd9Sstevel@tonic-gate 18407c478bd9Sstevel@tonic-gate flags &= ~PG_MATCH_COLOR; 18417c478bd9Sstevel@tonic-gate locked = 0; 184207ad560dSkchow #if defined(__i386) || defined(__amd64) 1843843e1988Sjohnlev flags = page_create_update_flags_x86(flags); 18447c478bd9Sstevel@tonic-gate #endif 18457c478bd9Sstevel@tonic-gate 18467c478bd9Sstevel@tonic-gate lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 18477c478bd9Sstevel@tonic-gate 1848d94ffb28Sjmcp for (count = 0; kcage_on || count < MAX_PCGS; count++) { 1849d94ffb28Sjmcp pp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 18507c478bd9Sstevel@tonic-gate flags, lgrp); 18517c478bd9Sstevel@tonic-gate if (pp == NULL) { 18527c478bd9Sstevel@tonic-gate pp = page_get_cachelist(vp, off, seg, vaddr, 18537c478bd9Sstevel@tonic-gate flags, lgrp); 18547c478bd9Sstevel@tonic-gate } 18557c478bd9Sstevel@tonic-gate if (pp == NULL) { 18567c478bd9Sstevel@tonic-gate /* 18577c478bd9Sstevel@tonic-gate * Serialize. Don't fight with other pcgs(). 18587c478bd9Sstevel@tonic-gate */ 1859d94ffb28Sjmcp if (!locked && (!kcage_on || !(flags & PG_NORELOC))) { 18607c478bd9Sstevel@tonic-gate mutex_enter(&pcgs_lock); 18617c478bd9Sstevel@tonic-gate VM_STAT_ADD(pcgs_locked); 18627c478bd9Sstevel@tonic-gate locked = 1; 18637c478bd9Sstevel@tonic-gate p = pcf; 186406fb6a36Sdv142724 for (i = 0; i < pcf_fanout; i++) { 18657c478bd9Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 18667c478bd9Sstevel@tonic-gate ASSERT(p->pcf_block == 0); 18677c478bd9Sstevel@tonic-gate p->pcf_block = 1; 18687c478bd9Sstevel@tonic-gate p->pcf_reserve = p->pcf_count; 18697c478bd9Sstevel@tonic-gate p->pcf_count = 0; 18707c478bd9Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 18717c478bd9Sstevel@tonic-gate p++; 18727c478bd9Sstevel@tonic-gate } 18737c478bd9Sstevel@tonic-gate freemem = 0; 18747c478bd9Sstevel@tonic-gate } 18757c478bd9Sstevel@tonic-gate 18767c478bd9Sstevel@tonic-gate if (count) { 18777c478bd9Sstevel@tonic-gate /* 18787c478bd9Sstevel@tonic-gate * Since page_free() puts pages on 18797c478bd9Sstevel@tonic-gate * a list then accounts for it, we 18807c478bd9Sstevel@tonic-gate * just have to wait for page_free() 18817c478bd9Sstevel@tonic-gate * to unlock any page it was working 18827c478bd9Sstevel@tonic-gate * with. The page_lock()-page_reclaim() 18837c478bd9Sstevel@tonic-gate * path falls in the same boat. 18847c478bd9Sstevel@tonic-gate * 18857c478bd9Sstevel@tonic-gate * We don't need to check on the 18867c478bd9Sstevel@tonic-gate * PG_WAIT flag, we have already 18877c478bd9Sstevel@tonic-gate * accounted for the page we are 18887c478bd9Sstevel@tonic-gate * looking for in page_create_va(). 18897c478bd9Sstevel@tonic-gate * 18907c478bd9Sstevel@tonic-gate * We just wait a moment to let any 18917c478bd9Sstevel@tonic-gate * locked pages on the lists free up, 18927c478bd9Sstevel@tonic-gate * then continue around and try again. 18937c478bd9Sstevel@tonic-gate * 18947c478bd9Sstevel@tonic-gate * Will be awakened by set_freemem(). 18957c478bd9Sstevel@tonic-gate */ 18967c478bd9Sstevel@tonic-gate mutex_enter(&pcgs_wait_lock); 18977c478bd9Sstevel@tonic-gate cv_wait(&pcgs_cv, &pcgs_wait_lock); 18987c478bd9Sstevel@tonic-gate mutex_exit(&pcgs_wait_lock); 18997c478bd9Sstevel@tonic-gate } 19007c478bd9Sstevel@tonic-gate } else { 19017c478bd9Sstevel@tonic-gate #ifdef VM_STATS 19027c478bd9Sstevel@tonic-gate if (count >= PCGS_TRIES) { 19037c478bd9Sstevel@tonic-gate VM_STAT_ADD(pcgs_too_many); 19047c478bd9Sstevel@tonic-gate } else { 19057c478bd9Sstevel@tonic-gate VM_STAT_ADD(pcgs_counts[count]); 19067c478bd9Sstevel@tonic-gate } 19077c478bd9Sstevel@tonic-gate #endif 19087c478bd9Sstevel@tonic-gate if (locked) { 19097c478bd9Sstevel@tonic-gate pcgs_unblock(); 19107c478bd9Sstevel@tonic-gate mutex_exit(&pcgs_lock); 19117c478bd9Sstevel@tonic-gate } 19127c478bd9Sstevel@tonic-gate if (cagelocked) 19137c478bd9Sstevel@tonic-gate mutex_exit(&pcgs_cagelock); 19147c478bd9Sstevel@tonic-gate return (pp); 19157c478bd9Sstevel@tonic-gate } 19167c478bd9Sstevel@tonic-gate } 19177c478bd9Sstevel@tonic-gate /* 19187c478bd9Sstevel@tonic-gate * we go down holding the pcf locks. 19197c478bd9Sstevel@tonic-gate */ 19207c478bd9Sstevel@tonic-gate panic("no %spage found %d", 19217c478bd9Sstevel@tonic-gate ((flags & PG_NORELOC) ? "non-reloc " : ""), count); 19227c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 19237c478bd9Sstevel@tonic-gate } 19247c478bd9Sstevel@tonic-gate 19257c478bd9Sstevel@tonic-gate /* 19267c478bd9Sstevel@tonic-gate * Create enough pages for "bytes" worth of data starting at 19277c478bd9Sstevel@tonic-gate * "off" in "vp". 19287c478bd9Sstevel@tonic-gate * 19297c478bd9Sstevel@tonic-gate * Where flag must be one of: 19307c478bd9Sstevel@tonic-gate * 19317c478bd9Sstevel@tonic-gate * PG_EXCL: Exclusive create (fail if any page already 19327c478bd9Sstevel@tonic-gate * exists in the page cache) which does not 19337c478bd9Sstevel@tonic-gate * wait for memory to become available. 19347c478bd9Sstevel@tonic-gate * 19357c478bd9Sstevel@tonic-gate * PG_WAIT: Non-exclusive create which can wait for 19367c478bd9Sstevel@tonic-gate * memory to become available. 19377c478bd9Sstevel@tonic-gate * 19387c478bd9Sstevel@tonic-gate * PG_PHYSCONTIG: Allocate physically contiguous pages. 19397c478bd9Sstevel@tonic-gate * (Not Supported) 19407c478bd9Sstevel@tonic-gate * 19417c478bd9Sstevel@tonic-gate * A doubly linked list of pages is returned to the caller. Each page 19427c478bd9Sstevel@tonic-gate * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock) 19437c478bd9Sstevel@tonic-gate * lock. 19447c478bd9Sstevel@tonic-gate * 19457c478bd9Sstevel@tonic-gate * Unable to change the parameters to page_create() in a minor release, 19467c478bd9Sstevel@tonic-gate * we renamed page_create() to page_create_va(), changed all known calls 19477c478bd9Sstevel@tonic-gate * from page_create() to page_create_va(), and created this wrapper. 19487c478bd9Sstevel@tonic-gate * 19497c478bd9Sstevel@tonic-gate * Upon a major release, we should break compatibility by deleting this 19507c478bd9Sstevel@tonic-gate * wrapper, and replacing all the strings "page_create_va", with "page_create". 19517c478bd9Sstevel@tonic-gate * 19527c478bd9Sstevel@tonic-gate * NOTE: There is a copy of this interface as page_create_io() in 19537c478bd9Sstevel@tonic-gate * i86/vm/vm_machdep.c. Any bugs fixed here should be applied 19547c478bd9Sstevel@tonic-gate * there. 19557c478bd9Sstevel@tonic-gate */ 19567c478bd9Sstevel@tonic-gate page_t * 19577c478bd9Sstevel@tonic-gate page_create(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags) 19587c478bd9Sstevel@tonic-gate { 19597c478bd9Sstevel@tonic-gate caddr_t random_vaddr; 19607c478bd9Sstevel@tonic-gate struct seg kseg; 19617c478bd9Sstevel@tonic-gate 19627c478bd9Sstevel@tonic-gate #ifdef DEBUG 19637c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "Using deprecated interface page_create: caller %p", 19647c478bd9Sstevel@tonic-gate (void *)caller()); 19657c478bd9Sstevel@tonic-gate #endif 19667c478bd9Sstevel@tonic-gate 19677c478bd9Sstevel@tonic-gate random_vaddr = (caddr_t)(((uintptr_t)vp >> 7) ^ 19687c478bd9Sstevel@tonic-gate (uintptr_t)(off >> PAGESHIFT)); 19697c478bd9Sstevel@tonic-gate kseg.s_as = &kas; 19707c478bd9Sstevel@tonic-gate 19717c478bd9Sstevel@tonic-gate return (page_create_va(vp, off, bytes, flags, &kseg, random_vaddr)); 19727c478bd9Sstevel@tonic-gate } 19737c478bd9Sstevel@tonic-gate 19747c478bd9Sstevel@tonic-gate #ifdef DEBUG 19757c478bd9Sstevel@tonic-gate uint32_t pg_alloc_pgs_mtbf = 0; 19767c478bd9Sstevel@tonic-gate #endif 19777c478bd9Sstevel@tonic-gate 19787c478bd9Sstevel@tonic-gate /* 19797c478bd9Sstevel@tonic-gate * Used for large page support. It will attempt to allocate 19807c478bd9Sstevel@tonic-gate * a large page(s) off the freelist. 19817c478bd9Sstevel@tonic-gate * 19827c478bd9Sstevel@tonic-gate * Returns non zero on failure. 19837c478bd9Sstevel@tonic-gate */ 19847c478bd9Sstevel@tonic-gate int 1985e44bd21cSsusans page_alloc_pages(struct vnode *vp, struct seg *seg, caddr_t addr, 19862cb27123Saguzovsk page_t **basepp, page_t *ppa[], uint_t szc, int anypgsz, int pgflags) 19877c478bd9Sstevel@tonic-gate { 19887c478bd9Sstevel@tonic-gate pgcnt_t npgs, curnpgs, totpgs; 19897c478bd9Sstevel@tonic-gate size_t pgsz; 19907c478bd9Sstevel@tonic-gate page_t *pplist = NULL, *pp; 19917c478bd9Sstevel@tonic-gate int err = 0; 19927c478bd9Sstevel@tonic-gate lgrp_t *lgrp; 19937c478bd9Sstevel@tonic-gate 19947c478bd9Sstevel@tonic-gate ASSERT(szc != 0 && szc <= (page_num_pagesizes() - 1)); 19952cb27123Saguzovsk ASSERT(pgflags == 0 || pgflags == PG_LOCAL); 19967c478bd9Sstevel@tonic-gate 19972be2af34Smec /* 19982be2af34Smec * Check if system heavily prefers local large pages over remote 19992be2af34Smec * on systems with multiple lgroups. 20002be2af34Smec */ 20012be2af34Smec if (lpg_alloc_prefer == LPAP_LOCAL && nlgrps > 1) { 20022be2af34Smec pgflags = PG_LOCAL; 20032be2af34Smec } 20042be2af34Smec 20057c478bd9Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[0]); 20067c478bd9Sstevel@tonic-gate 20077c478bd9Sstevel@tonic-gate #ifdef DEBUG 20087c478bd9Sstevel@tonic-gate if (pg_alloc_pgs_mtbf && !(gethrtime() % pg_alloc_pgs_mtbf)) { 20097c478bd9Sstevel@tonic-gate return (ENOMEM); 20107c478bd9Sstevel@tonic-gate } 20117c478bd9Sstevel@tonic-gate #endif 20127c478bd9Sstevel@tonic-gate 20137c478bd9Sstevel@tonic-gate /* 20147c478bd9Sstevel@tonic-gate * One must be NULL but not both. 20157c478bd9Sstevel@tonic-gate * And one must be non NULL but not both. 20167c478bd9Sstevel@tonic-gate */ 20177c478bd9Sstevel@tonic-gate ASSERT(basepp != NULL || ppa != NULL); 20187c478bd9Sstevel@tonic-gate ASSERT(basepp == NULL || ppa == NULL); 20197c478bd9Sstevel@tonic-gate 202078b03d3aSkchow #if defined(__i386) || defined(__amd64) 202178b03d3aSkchow while (page_chk_freelist(szc) == 0) { 202278b03d3aSkchow VM_STAT_ADD(alloc_pages[8]); 202378b03d3aSkchow if (anypgsz == 0 || --szc == 0) 202478b03d3aSkchow return (ENOMEM); 202578b03d3aSkchow } 202678b03d3aSkchow #endif 202778b03d3aSkchow 202878b03d3aSkchow pgsz = page_get_pagesize(szc); 202978b03d3aSkchow totpgs = curnpgs = npgs = pgsz >> PAGESHIFT; 203078b03d3aSkchow 203178b03d3aSkchow ASSERT(((uintptr_t)addr & (pgsz - 1)) == 0); 203278b03d3aSkchow 20337c478bd9Sstevel@tonic-gate (void) page_create_wait(npgs, PG_WAIT); 20347c478bd9Sstevel@tonic-gate 20357c478bd9Sstevel@tonic-gate while (npgs && szc) { 20367c478bd9Sstevel@tonic-gate lgrp = lgrp_mem_choose(seg, addr, pgsz); 20372cb27123Saguzovsk if (pgflags == PG_LOCAL) { 2038d94ffb28Sjmcp pp = page_get_freelist(vp, 0, seg, addr, pgsz, 20392cb27123Saguzovsk pgflags, lgrp); 20402cb27123Saguzovsk if (pp == NULL) { 2041d94ffb28Sjmcp pp = page_get_freelist(vp, 0, seg, addr, pgsz, 20422cb27123Saguzovsk 0, lgrp); 20432cb27123Saguzovsk } 20442cb27123Saguzovsk } else { 2045d94ffb28Sjmcp pp = page_get_freelist(vp, 0, seg, addr, pgsz, 20462cb27123Saguzovsk 0, lgrp); 20472cb27123Saguzovsk } 20487c478bd9Sstevel@tonic-gate if (pp != NULL) { 20497c478bd9Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[1]); 20507c478bd9Sstevel@tonic-gate page_list_concat(&pplist, &pp); 20517c478bd9Sstevel@tonic-gate ASSERT(npgs >= curnpgs); 20527c478bd9Sstevel@tonic-gate npgs -= curnpgs; 20537c478bd9Sstevel@tonic-gate } else if (anypgsz) { 20547c478bd9Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[2]); 20557c478bd9Sstevel@tonic-gate szc--; 20567c478bd9Sstevel@tonic-gate pgsz = page_get_pagesize(szc); 20577c478bd9Sstevel@tonic-gate curnpgs = pgsz >> PAGESHIFT; 20587c478bd9Sstevel@tonic-gate } else { 20597c478bd9Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[3]); 20607c478bd9Sstevel@tonic-gate ASSERT(npgs == totpgs); 20617c478bd9Sstevel@tonic-gate page_create_putback(npgs); 20627c478bd9Sstevel@tonic-gate return (ENOMEM); 20637c478bd9Sstevel@tonic-gate } 20647c478bd9Sstevel@tonic-gate } 20657c478bd9Sstevel@tonic-gate if (szc == 0) { 20667c478bd9Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[4]); 20677c478bd9Sstevel@tonic-gate ASSERT(npgs != 0); 20687c478bd9Sstevel@tonic-gate page_create_putback(npgs); 20697c478bd9Sstevel@tonic-gate err = ENOMEM; 20707c478bd9Sstevel@tonic-gate } else if (basepp != NULL) { 20717c478bd9Sstevel@tonic-gate ASSERT(npgs == 0); 20727c478bd9Sstevel@tonic-gate ASSERT(ppa == NULL); 20737c478bd9Sstevel@tonic-gate *basepp = pplist; 20747c478bd9Sstevel@tonic-gate } 20757c478bd9Sstevel@tonic-gate 20767c478bd9Sstevel@tonic-gate npgs = totpgs - npgs; 20777c478bd9Sstevel@tonic-gate pp = pplist; 20787c478bd9Sstevel@tonic-gate 20797c478bd9Sstevel@tonic-gate /* 20807c478bd9Sstevel@tonic-gate * Clear the free and age bits. Also if we were passed in a ppa then 20817c478bd9Sstevel@tonic-gate * fill it in with all the constituent pages from the large page. But 20827c478bd9Sstevel@tonic-gate * if we failed to allocate all the pages just free what we got. 20837c478bd9Sstevel@tonic-gate */ 20847c478bd9Sstevel@tonic-gate while (npgs != 0) { 20857c478bd9Sstevel@tonic-gate ASSERT(PP_ISFREE(pp)); 20867c478bd9Sstevel@tonic-gate ASSERT(PP_ISAGED(pp)); 20877c478bd9Sstevel@tonic-gate if (ppa != NULL || err != 0) { 20887c478bd9Sstevel@tonic-gate if (err == 0) { 20897c478bd9Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[5]); 20907c478bd9Sstevel@tonic-gate PP_CLRFREE(pp); 20917c478bd9Sstevel@tonic-gate PP_CLRAGED(pp); 20927c478bd9Sstevel@tonic-gate page_sub(&pplist, pp); 20937c478bd9Sstevel@tonic-gate *ppa++ = pp; 20947c478bd9Sstevel@tonic-gate npgs--; 20957c478bd9Sstevel@tonic-gate } else { 20967c478bd9Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[6]); 20977c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc != 0); 20987c478bd9Sstevel@tonic-gate curnpgs = page_get_pagecnt(pp->p_szc); 20997c478bd9Sstevel@tonic-gate page_list_break(&pp, &pplist, curnpgs); 21007c478bd9Sstevel@tonic-gate page_list_add_pages(pp, 0); 21017c478bd9Sstevel@tonic-gate page_create_putback(curnpgs); 21027c478bd9Sstevel@tonic-gate ASSERT(npgs >= curnpgs); 21037c478bd9Sstevel@tonic-gate npgs -= curnpgs; 21047c478bd9Sstevel@tonic-gate } 21057c478bd9Sstevel@tonic-gate pp = pplist; 21067c478bd9Sstevel@tonic-gate } else { 21077c478bd9Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[7]); 21087c478bd9Sstevel@tonic-gate PP_CLRFREE(pp); 21097c478bd9Sstevel@tonic-gate PP_CLRAGED(pp); 21107c478bd9Sstevel@tonic-gate pp = pp->p_next; 21117c478bd9Sstevel@tonic-gate npgs--; 21127c478bd9Sstevel@tonic-gate } 21137c478bd9Sstevel@tonic-gate } 21147c478bd9Sstevel@tonic-gate return (err); 21157c478bd9Sstevel@tonic-gate } 21167c478bd9Sstevel@tonic-gate 21177c478bd9Sstevel@tonic-gate /* 21187c478bd9Sstevel@tonic-gate * Get a single large page off of the freelists, and set it up for use. 21197c478bd9Sstevel@tonic-gate * Number of bytes requested must be a supported page size. 21207c478bd9Sstevel@tonic-gate * 21217c478bd9Sstevel@tonic-gate * Note that this call may fail even if there is sufficient 21227c478bd9Sstevel@tonic-gate * memory available or PG_WAIT is set, so the caller must 21237c478bd9Sstevel@tonic-gate * be willing to fallback on page_create_va(), block and retry, 21247c478bd9Sstevel@tonic-gate * or fail the requester. 21257c478bd9Sstevel@tonic-gate */ 21267c478bd9Sstevel@tonic-gate page_t * 21277c478bd9Sstevel@tonic-gate page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 21287c478bd9Sstevel@tonic-gate struct seg *seg, caddr_t vaddr, void *arg) 21297c478bd9Sstevel@tonic-gate { 213006fb6a36Sdv142724 pgcnt_t npages; 21317c478bd9Sstevel@tonic-gate page_t *pp; 21327c478bd9Sstevel@tonic-gate page_t *rootpp; 21337c478bd9Sstevel@tonic-gate lgrp_t *lgrp; 21347c478bd9Sstevel@tonic-gate lgrp_id_t *lgrpid = (lgrp_id_t *)arg; 21357c478bd9Sstevel@tonic-gate 21367c478bd9Sstevel@tonic-gate ASSERT(vp != NULL); 21377c478bd9Sstevel@tonic-gate 21387c478bd9Sstevel@tonic-gate ASSERT((flags & ~(PG_EXCL | PG_WAIT | 213923a80de1SStan Studzinski PG_NORELOC | PG_PANIC | PG_PUSHPAGE | PG_NORMALPRI)) == 0); 21407c478bd9Sstevel@tonic-gate /* but no others */ 21417c478bd9Sstevel@tonic-gate 21427c478bd9Sstevel@tonic-gate ASSERT((flags & PG_EXCL) == PG_EXCL); 21437c478bd9Sstevel@tonic-gate 21447c478bd9Sstevel@tonic-gate npages = btop(bytes); 21457c478bd9Sstevel@tonic-gate 21467c478bd9Sstevel@tonic-gate if (!kcage_on || panicstr) { 21477c478bd9Sstevel@tonic-gate /* 2148d94ffb28Sjmcp * Cage is OFF, or we are single threaded in 2149d94ffb28Sjmcp * panic, so make everything a RELOC request. 21507c478bd9Sstevel@tonic-gate */ 21517c478bd9Sstevel@tonic-gate flags &= ~PG_NORELOC; 21527c478bd9Sstevel@tonic-gate } 21537c478bd9Sstevel@tonic-gate 21547c478bd9Sstevel@tonic-gate /* 21557c478bd9Sstevel@tonic-gate * Make sure there's adequate physical memory available. 21567c478bd9Sstevel@tonic-gate * Note: PG_WAIT is ignored here. 21577c478bd9Sstevel@tonic-gate */ 21587c478bd9Sstevel@tonic-gate if (freemem <= throttlefree + npages) { 21597c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_large_cnt[1]); 21607c478bd9Sstevel@tonic-gate return (NULL); 21617c478bd9Sstevel@tonic-gate } 21627c478bd9Sstevel@tonic-gate 21637c478bd9Sstevel@tonic-gate /* 2164d94ffb28Sjmcp * If cage is on, dampen draw from cage when available 2165d94ffb28Sjmcp * cage space is low. 21667c478bd9Sstevel@tonic-gate */ 2167d94ffb28Sjmcp if ((flags & (PG_NORELOC | PG_WAIT)) == (PG_NORELOC | PG_WAIT) && 2168d94ffb28Sjmcp kcage_freemem < kcage_throttlefree + npages) { 2169d94ffb28Sjmcp 2170d94ffb28Sjmcp /* 2171d94ffb28Sjmcp * The cage is on, the caller wants PG_NORELOC 2172d94ffb28Sjmcp * pages and available cage memory is very low. 2173d94ffb28Sjmcp * Call kcage_create_throttle() to attempt to 2174d94ffb28Sjmcp * control demand on the cage. 2175d94ffb28Sjmcp */ 2176d94ffb28Sjmcp if (kcage_create_throttle(npages, flags) == KCT_FAILURE) { 21777c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_large_cnt[2]); 21787c478bd9Sstevel@tonic-gate return (NULL); 21797c478bd9Sstevel@tonic-gate } 2180d94ffb28Sjmcp } 21817c478bd9Sstevel@tonic-gate 218206fb6a36Sdv142724 if (!pcf_decrement_bucket(npages) && 218306fb6a36Sdv142724 !pcf_decrement_multiple(NULL, npages, 1)) { 21847c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_large_cnt[4]); 21857c478bd9Sstevel@tonic-gate return (NULL); 21867c478bd9Sstevel@tonic-gate } 21877c478bd9Sstevel@tonic-gate 21887c478bd9Sstevel@tonic-gate /* 21897c478bd9Sstevel@tonic-gate * This is where this function behaves fundamentally differently 21907c478bd9Sstevel@tonic-gate * than page_create_va(); since we're intending to map the page 21917c478bd9Sstevel@tonic-gate * with a single TTE, we have to get it as a physically contiguous 21927c478bd9Sstevel@tonic-gate * hardware pagesize chunk. If we can't, we fail. 21937c478bd9Sstevel@tonic-gate */ 21947c478bd9Sstevel@tonic-gate if (lgrpid != NULL && *lgrpid >= 0 && *lgrpid <= lgrp_alloc_max && 21957c478bd9Sstevel@tonic-gate LGRP_EXISTS(lgrp_table[*lgrpid])) 21967c478bd9Sstevel@tonic-gate lgrp = lgrp_table[*lgrpid]; 21977c478bd9Sstevel@tonic-gate else 21987c478bd9Sstevel@tonic-gate lgrp = lgrp_mem_choose(seg, vaddr, bytes); 21997c478bd9Sstevel@tonic-gate 2200d94ffb28Sjmcp if ((rootpp = page_get_freelist(&kvp, off, seg, vaddr, 2201d94ffb28Sjmcp bytes, flags & ~PG_MATCH_COLOR, lgrp)) == NULL) { 22027c478bd9Sstevel@tonic-gate page_create_putback(npages); 22037c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_large_cnt[5]); 22047c478bd9Sstevel@tonic-gate return (NULL); 22057c478bd9Sstevel@tonic-gate } 22067c478bd9Sstevel@tonic-gate 22077c478bd9Sstevel@tonic-gate /* 22087c478bd9Sstevel@tonic-gate * if we got the page with the wrong mtype give it back this is a 22097c478bd9Sstevel@tonic-gate * workaround for CR 6249718. When CR 6249718 is fixed we never get 22107c478bd9Sstevel@tonic-gate * inside "if" and the workaround becomes just a nop 22117c478bd9Sstevel@tonic-gate */ 22127c478bd9Sstevel@tonic-gate if (kcage_on && (flags & PG_NORELOC) && !PP_ISNORELOC(rootpp)) { 22137c478bd9Sstevel@tonic-gate page_list_add_pages(rootpp, 0); 22147c478bd9Sstevel@tonic-gate page_create_putback(npages); 22157c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_large_cnt[6]); 22167c478bd9Sstevel@tonic-gate return (NULL); 22177c478bd9Sstevel@tonic-gate } 22187c478bd9Sstevel@tonic-gate 22197c478bd9Sstevel@tonic-gate /* 22207c478bd9Sstevel@tonic-gate * If satisfying this request has left us with too little 22217c478bd9Sstevel@tonic-gate * memory, start the wheels turning to get some back. The 22227c478bd9Sstevel@tonic-gate * first clause of the test prevents waking up the pageout 22237c478bd9Sstevel@tonic-gate * daemon in situations where it would decide that there's 22247c478bd9Sstevel@tonic-gate * nothing to do. 22257c478bd9Sstevel@tonic-gate */ 22267c478bd9Sstevel@tonic-gate if (nscan < desscan && freemem < minfree) { 22277c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 22287c478bd9Sstevel@tonic-gate "pageout_cv_signal:freemem %ld", freemem); 22297c478bd9Sstevel@tonic-gate cv_signal(&proc_pageout->p_cv); 22307c478bd9Sstevel@tonic-gate } 22317c478bd9Sstevel@tonic-gate 22327c478bd9Sstevel@tonic-gate pp = rootpp; 22337c478bd9Sstevel@tonic-gate while (npages--) { 22347c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 22357c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode == NULL); 22367c478bd9Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(pp)); 22377c478bd9Sstevel@tonic-gate PP_CLRFREE(pp); 22387c478bd9Sstevel@tonic-gate PP_CLRAGED(pp); 22397c478bd9Sstevel@tonic-gate if (!page_hashin(pp, vp, off, NULL)) 22407c478bd9Sstevel@tonic-gate panic("page_create_large: hashin failed: page %p", 22417c478bd9Sstevel@tonic-gate (void *)pp); 22427c478bd9Sstevel@tonic-gate page_io_lock(pp); 22437c478bd9Sstevel@tonic-gate off += PAGESIZE; 22447c478bd9Sstevel@tonic-gate pp = pp->p_next; 22457c478bd9Sstevel@tonic-gate } 22467c478bd9Sstevel@tonic-gate 22477c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_large_cnt[0]); 22487c478bd9Sstevel@tonic-gate return (rootpp); 22497c478bd9Sstevel@tonic-gate } 22507c478bd9Sstevel@tonic-gate 22517c478bd9Sstevel@tonic-gate page_t * 22527c478bd9Sstevel@tonic-gate page_create_va(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 22537c478bd9Sstevel@tonic-gate struct seg *seg, caddr_t vaddr) 22547c478bd9Sstevel@tonic-gate { 22557c478bd9Sstevel@tonic-gate page_t *plist = NULL; 22567c478bd9Sstevel@tonic-gate pgcnt_t npages; 22577c478bd9Sstevel@tonic-gate pgcnt_t found_on_free = 0; 22587c478bd9Sstevel@tonic-gate pgcnt_t pages_req; 22597c478bd9Sstevel@tonic-gate page_t *npp = NULL; 22607c478bd9Sstevel@tonic-gate struct pcf *p; 22617c478bd9Sstevel@tonic-gate lgrp_t *lgrp; 22627c478bd9Sstevel@tonic-gate 22637c478bd9Sstevel@tonic-gate TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START, 22647c478bd9Sstevel@tonic-gate "page_create_start:vp %p off %llx bytes %lu flags %x", 22657c478bd9Sstevel@tonic-gate vp, off, bytes, flags); 22667c478bd9Sstevel@tonic-gate 22677c478bd9Sstevel@tonic-gate ASSERT(bytes != 0 && vp != NULL); 22687c478bd9Sstevel@tonic-gate 22697c478bd9Sstevel@tonic-gate if ((flags & PG_EXCL) == 0 && (flags & PG_WAIT) == 0) { 22707c478bd9Sstevel@tonic-gate panic("page_create: invalid flags"); 22717c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 22727c478bd9Sstevel@tonic-gate } 22737c478bd9Sstevel@tonic-gate ASSERT((flags & ~(PG_EXCL | PG_WAIT | 227423a80de1SStan Studzinski PG_NORELOC | PG_PANIC | PG_PUSHPAGE | PG_NORMALPRI)) == 0); 22757c478bd9Sstevel@tonic-gate /* but no others */ 22767c478bd9Sstevel@tonic-gate 22777c478bd9Sstevel@tonic-gate pages_req = npages = btopr(bytes); 22787c478bd9Sstevel@tonic-gate /* 22797c478bd9Sstevel@tonic-gate * Try to see whether request is too large to *ever* be 22807c478bd9Sstevel@tonic-gate * satisfied, in order to prevent deadlock. We arbitrarily 22817c478bd9Sstevel@tonic-gate * decide to limit maximum size requests to max_page_get. 22827c478bd9Sstevel@tonic-gate */ 22837c478bd9Sstevel@tonic-gate if (npages >= max_page_get) { 22847c478bd9Sstevel@tonic-gate if ((flags & PG_WAIT) == 0) { 22857c478bd9Sstevel@tonic-gate TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_TOOBIG, 22867c478bd9Sstevel@tonic-gate "page_create_toobig:vp %p off %llx npages " 22877c478bd9Sstevel@tonic-gate "%lu max_page_get %lu", 22887c478bd9Sstevel@tonic-gate vp, off, npages, max_page_get); 22897c478bd9Sstevel@tonic-gate return (NULL); 22907c478bd9Sstevel@tonic-gate } else { 22917c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, 22927c478bd9Sstevel@tonic-gate "Request for too much kernel memory " 22937c478bd9Sstevel@tonic-gate "(%lu bytes), will hang forever", bytes); 22947c478bd9Sstevel@tonic-gate for (;;) 22957c478bd9Sstevel@tonic-gate delay(1000000000); 22967c478bd9Sstevel@tonic-gate } 22977c478bd9Sstevel@tonic-gate } 22987c478bd9Sstevel@tonic-gate 22997c478bd9Sstevel@tonic-gate if (!kcage_on || panicstr) { 23007c478bd9Sstevel@tonic-gate /* 2301d94ffb28Sjmcp * Cage is OFF, or we are single threaded in 2302d94ffb28Sjmcp * panic, so make everything a RELOC request. 23037c478bd9Sstevel@tonic-gate */ 23047c478bd9Sstevel@tonic-gate flags &= ~PG_NORELOC; 23057c478bd9Sstevel@tonic-gate } 23067c478bd9Sstevel@tonic-gate 2307d94ffb28Sjmcp if (freemem <= throttlefree + npages) 2308d94ffb28Sjmcp if (!page_create_throttle(npages, flags)) 23097c478bd9Sstevel@tonic-gate return (NULL); 23107c478bd9Sstevel@tonic-gate 23117c478bd9Sstevel@tonic-gate /* 2312d94ffb28Sjmcp * If cage is on, dampen draw from cage when available 2313d94ffb28Sjmcp * cage space is low. 23147c478bd9Sstevel@tonic-gate */ 2315d94ffb28Sjmcp if ((flags & PG_NORELOC) && 2316d94ffb28Sjmcp kcage_freemem < kcage_throttlefree + npages) { 23177c478bd9Sstevel@tonic-gate 2318d94ffb28Sjmcp /* 2319d94ffb28Sjmcp * The cage is on, the caller wants PG_NORELOC 2320d94ffb28Sjmcp * pages and available cage memory is very low. 2321d94ffb28Sjmcp * Call kcage_create_throttle() to attempt to 2322d94ffb28Sjmcp * control demand on the cage. 2323d94ffb28Sjmcp */ 2324d94ffb28Sjmcp if (kcage_create_throttle(npages, flags) == KCT_FAILURE) 23257c478bd9Sstevel@tonic-gate return (NULL); 23267c478bd9Sstevel@tonic-gate } 23277c478bd9Sstevel@tonic-gate 23287c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_cnt[0]); 23297c478bd9Sstevel@tonic-gate 233006fb6a36Sdv142724 if (!pcf_decrement_bucket(npages)) { 23317c478bd9Sstevel@tonic-gate /* 23327c478bd9Sstevel@tonic-gate * Have to look harder. If npages is greater than 2333da6c28aaSamw * one, then we might have to coalesce the counters. 23347c478bd9Sstevel@tonic-gate * 23357c478bd9Sstevel@tonic-gate * Go wait. We come back having accounted 23367c478bd9Sstevel@tonic-gate * for the memory. 23377c478bd9Sstevel@tonic-gate */ 23387c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_cnt[1]); 23397c478bd9Sstevel@tonic-gate if (!page_create_wait(npages, flags)) { 23407c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_cnt[2]); 23417c478bd9Sstevel@tonic-gate return (NULL); 23427c478bd9Sstevel@tonic-gate } 23437c478bd9Sstevel@tonic-gate } 23447c478bd9Sstevel@tonic-gate 23457c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS, 23467c478bd9Sstevel@tonic-gate "page_create_success:vp %p off %llx", vp, off); 23477c478bd9Sstevel@tonic-gate 23487c478bd9Sstevel@tonic-gate /* 23497c478bd9Sstevel@tonic-gate * If satisfying this request has left us with too little 23507c478bd9Sstevel@tonic-gate * memory, start the wheels turning to get some back. The 23517c478bd9Sstevel@tonic-gate * first clause of the test prevents waking up the pageout 23527c478bd9Sstevel@tonic-gate * daemon in situations where it would decide that there's 23537c478bd9Sstevel@tonic-gate * nothing to do. 23547c478bd9Sstevel@tonic-gate */ 23557c478bd9Sstevel@tonic-gate if (nscan < desscan && freemem < minfree) { 23567c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 23577c478bd9Sstevel@tonic-gate "pageout_cv_signal:freemem %ld", freemem); 23587c478bd9Sstevel@tonic-gate cv_signal(&proc_pageout->p_cv); 23597c478bd9Sstevel@tonic-gate } 23607c478bd9Sstevel@tonic-gate 23617c478bd9Sstevel@tonic-gate /* 23627c478bd9Sstevel@tonic-gate * Loop around collecting the requested number of pages. 23637c478bd9Sstevel@tonic-gate * Most of the time, we have to `create' a new page. With 23647c478bd9Sstevel@tonic-gate * this in mind, pull the page off the free list before 23657c478bd9Sstevel@tonic-gate * getting the hash lock. This will minimize the hash 23667c478bd9Sstevel@tonic-gate * lock hold time, nesting, and the like. If it turns 23677c478bd9Sstevel@tonic-gate * out we don't need the page, we put it back at the end. 23687c478bd9Sstevel@tonic-gate */ 23697c478bd9Sstevel@tonic-gate while (npages--) { 23707c478bd9Sstevel@tonic-gate page_t *pp; 23717c478bd9Sstevel@tonic-gate kmutex_t *phm = NULL; 23727c478bd9Sstevel@tonic-gate ulong_t index; 23737c478bd9Sstevel@tonic-gate 23747c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off); 23757c478bd9Sstevel@tonic-gate top: 23767c478bd9Sstevel@tonic-gate ASSERT(phm == NULL); 23777c478bd9Sstevel@tonic-gate ASSERT(index == PAGE_HASH_FUNC(vp, off)); 23787c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 23797c478bd9Sstevel@tonic-gate 23807c478bd9Sstevel@tonic-gate if (npp == NULL) { 23817c478bd9Sstevel@tonic-gate /* 23827c478bd9Sstevel@tonic-gate * Try to get a page from the freelist (ie, 23837c478bd9Sstevel@tonic-gate * a page with no [vp, off] tag). If that 23847c478bd9Sstevel@tonic-gate * fails, use the cachelist. 23857c478bd9Sstevel@tonic-gate * 23867c478bd9Sstevel@tonic-gate * During the first attempt at both the free 23877c478bd9Sstevel@tonic-gate * and cache lists we try for the correct color. 23887c478bd9Sstevel@tonic-gate */ 23897c478bd9Sstevel@tonic-gate /* 23907c478bd9Sstevel@tonic-gate * XXXX-how do we deal with virtual indexed 23917c478bd9Sstevel@tonic-gate * caches and and colors? 23927c478bd9Sstevel@tonic-gate */ 23937c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_cnt[4]); 23947c478bd9Sstevel@tonic-gate /* 23957c478bd9Sstevel@tonic-gate * Get lgroup to allocate next page of shared memory 23967c478bd9Sstevel@tonic-gate * from and use it to specify where to allocate 23977c478bd9Sstevel@tonic-gate * the physical memory 23987c478bd9Sstevel@tonic-gate */ 23997c478bd9Sstevel@tonic-gate lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 2400d94ffb28Sjmcp npp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 24017c478bd9Sstevel@tonic-gate flags | PG_MATCH_COLOR, lgrp); 24027c478bd9Sstevel@tonic-gate if (npp == NULL) { 24037c478bd9Sstevel@tonic-gate npp = page_get_cachelist(vp, off, seg, 24047c478bd9Sstevel@tonic-gate vaddr, flags | PG_MATCH_COLOR, lgrp); 24057c478bd9Sstevel@tonic-gate if (npp == NULL) { 24067c478bd9Sstevel@tonic-gate npp = page_create_get_something(vp, 24077c478bd9Sstevel@tonic-gate off, seg, vaddr, 24087c478bd9Sstevel@tonic-gate flags & ~PG_MATCH_COLOR); 24097c478bd9Sstevel@tonic-gate } 24107c478bd9Sstevel@tonic-gate 24117c478bd9Sstevel@tonic-gate if (PP_ISAGED(npp) == 0) { 24127c478bd9Sstevel@tonic-gate /* 24137c478bd9Sstevel@tonic-gate * Since this page came from the 24147c478bd9Sstevel@tonic-gate * cachelist, we must destroy the 24157c478bd9Sstevel@tonic-gate * old vnode association. 24167c478bd9Sstevel@tonic-gate */ 24177c478bd9Sstevel@tonic-gate page_hashout(npp, NULL); 24187c478bd9Sstevel@tonic-gate } 24197c478bd9Sstevel@tonic-gate } 24207c478bd9Sstevel@tonic-gate } 24217c478bd9Sstevel@tonic-gate 24227c478bd9Sstevel@tonic-gate /* 24237c478bd9Sstevel@tonic-gate * We own this page! 24247c478bd9Sstevel@tonic-gate */ 24257c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(npp)); 24267c478bd9Sstevel@tonic-gate ASSERT(npp->p_vnode == NULL); 24277c478bd9Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(npp)); 24287c478bd9Sstevel@tonic-gate PP_CLRFREE(npp); 24297c478bd9Sstevel@tonic-gate PP_CLRAGED(npp); 24307c478bd9Sstevel@tonic-gate 24317c478bd9Sstevel@tonic-gate /* 24327c478bd9Sstevel@tonic-gate * Here we have a page in our hot little mits and are 24337c478bd9Sstevel@tonic-gate * just waiting to stuff it on the appropriate lists. 24347c478bd9Sstevel@tonic-gate * Get the mutex and check to see if it really does 24357c478bd9Sstevel@tonic-gate * not exist. 24367c478bd9Sstevel@tonic-gate */ 24377c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 24387c478bd9Sstevel@tonic-gate mutex_enter(phm); 2439e7c874afSJosef 'Jeff' Sipek pp = page_hash_search(index, vp, off); 24407c478bd9Sstevel@tonic-gate if (pp == NULL) { 24417c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_new); 24427c478bd9Sstevel@tonic-gate pp = npp; 24437c478bd9Sstevel@tonic-gate npp = NULL; 24447c478bd9Sstevel@tonic-gate if (!page_hashin(pp, vp, off, phm)) { 24457c478bd9Sstevel@tonic-gate /* 24467c478bd9Sstevel@tonic-gate * Since we hold the page hash mutex and 24477c478bd9Sstevel@tonic-gate * just searched for this page, page_hashin 24487c478bd9Sstevel@tonic-gate * had better not fail. If it does, that 24497c478bd9Sstevel@tonic-gate * means somethread did not follow the 24507c478bd9Sstevel@tonic-gate * page hash mutex rules. Panic now and 24517c478bd9Sstevel@tonic-gate * get it over with. As usual, go down 24527c478bd9Sstevel@tonic-gate * holding all the locks. 24537c478bd9Sstevel@tonic-gate */ 24547c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm)); 24557c478bd9Sstevel@tonic-gate panic("page_create: " 24567c478bd9Sstevel@tonic-gate "hashin failed %p %p %llx %p", 24577c478bd9Sstevel@tonic-gate (void *)pp, (void *)vp, off, (void *)phm); 24587c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 24597c478bd9Sstevel@tonic-gate } 24607c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm)); 24617c478bd9Sstevel@tonic-gate mutex_exit(phm); 24627c478bd9Sstevel@tonic-gate phm = NULL; 24637c478bd9Sstevel@tonic-gate 24647c478bd9Sstevel@tonic-gate /* 24657c478bd9Sstevel@tonic-gate * Hat layer locking need not be done to set 24667c478bd9Sstevel@tonic-gate * the following bits since the page is not hashed 24677c478bd9Sstevel@tonic-gate * and was on the free list (i.e., had no mappings). 24687c478bd9Sstevel@tonic-gate * 24697c478bd9Sstevel@tonic-gate * Set the reference bit to protect 24707c478bd9Sstevel@tonic-gate * against immediate pageout 24717c478bd9Sstevel@tonic-gate * 24727c478bd9Sstevel@tonic-gate * XXXmh modify freelist code to set reference 24737c478bd9Sstevel@tonic-gate * bit so we don't have to do it here. 24747c478bd9Sstevel@tonic-gate */ 24757c478bd9Sstevel@tonic-gate page_set_props(pp, P_REF); 24767c478bd9Sstevel@tonic-gate found_on_free++; 24777c478bd9Sstevel@tonic-gate } else { 24787c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_exists); 24797c478bd9Sstevel@tonic-gate if (flags & PG_EXCL) { 24807c478bd9Sstevel@tonic-gate /* 24817c478bd9Sstevel@tonic-gate * Found an existing page, and the caller 24827c478bd9Sstevel@tonic-gate * wanted all new pages. Undo all of the work 24837c478bd9Sstevel@tonic-gate * we have done. 24847c478bd9Sstevel@tonic-gate */ 24857c478bd9Sstevel@tonic-gate mutex_exit(phm); 24867c478bd9Sstevel@tonic-gate phm = NULL; 24877c478bd9Sstevel@tonic-gate while (plist != NULL) { 24887c478bd9Sstevel@tonic-gate pp = plist; 24897c478bd9Sstevel@tonic-gate page_sub(&plist, pp); 24907c478bd9Sstevel@tonic-gate page_io_unlock(pp); 24917c478bd9Sstevel@tonic-gate /* large pages should not end up here */ 24927c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 24937c478bd9Sstevel@tonic-gate /*LINTED: constant in conditional ctx*/ 24947c478bd9Sstevel@tonic-gate VN_DISPOSE(pp, B_INVAL, 0, kcred); 24957c478bd9Sstevel@tonic-gate } 24967c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_found_one); 24977c478bd9Sstevel@tonic-gate goto fail; 24987c478bd9Sstevel@tonic-gate } 24997c478bd9Sstevel@tonic-gate ASSERT(flags & PG_WAIT); 25007c478bd9Sstevel@tonic-gate if (!page_lock(pp, SE_EXCL, phm, P_NO_RECLAIM)) { 25017c478bd9Sstevel@tonic-gate /* 25027c478bd9Sstevel@tonic-gate * Start all over again if we blocked trying 25037c478bd9Sstevel@tonic-gate * to lock the page. 25047c478bd9Sstevel@tonic-gate */ 25057c478bd9Sstevel@tonic-gate mutex_exit(phm); 25067c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_page_lock_failed); 25077c478bd9Sstevel@tonic-gate phm = NULL; 25087c478bd9Sstevel@tonic-gate goto top; 25097c478bd9Sstevel@tonic-gate } 25107c478bd9Sstevel@tonic-gate mutex_exit(phm); 25117c478bd9Sstevel@tonic-gate phm = NULL; 25127c478bd9Sstevel@tonic-gate 25137c478bd9Sstevel@tonic-gate if (PP_ISFREE(pp)) { 25147c478bd9Sstevel@tonic-gate ASSERT(PP_ISAGED(pp) == 0); 25157c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_get_cache); 25167c478bd9Sstevel@tonic-gate page_list_sub(pp, PG_CACHE_LIST); 25177c478bd9Sstevel@tonic-gate PP_CLRFREE(pp); 25187c478bd9Sstevel@tonic-gate found_on_free++; 25197c478bd9Sstevel@tonic-gate } 25207c478bd9Sstevel@tonic-gate } 25217c478bd9Sstevel@tonic-gate 25227c478bd9Sstevel@tonic-gate /* 25237c478bd9Sstevel@tonic-gate * Got a page! It is locked. Acquire the i/o 25247c478bd9Sstevel@tonic-gate * lock since we are going to use the p_next and 25257c478bd9Sstevel@tonic-gate * p_prev fields to link the requested pages together. 25267c478bd9Sstevel@tonic-gate */ 25277c478bd9Sstevel@tonic-gate page_io_lock(pp); 25287c478bd9Sstevel@tonic-gate page_add(&plist, pp); 25297c478bd9Sstevel@tonic-gate plist = plist->p_next; 25307c478bd9Sstevel@tonic-gate off += PAGESIZE; 25317c478bd9Sstevel@tonic-gate vaddr += PAGESIZE; 25327c478bd9Sstevel@tonic-gate } 25337c478bd9Sstevel@tonic-gate 25347c478bd9Sstevel@tonic-gate ASSERT((flags & PG_EXCL) ? (found_on_free == pages_req) : 1); 25357c478bd9Sstevel@tonic-gate fail: 25367c478bd9Sstevel@tonic-gate if (npp != NULL) { 25377c478bd9Sstevel@tonic-gate /* 25387c478bd9Sstevel@tonic-gate * Did not need this page after all. 25397c478bd9Sstevel@tonic-gate * Put it back on the free list. 25407c478bd9Sstevel@tonic-gate */ 25417c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_putbacks); 25427c478bd9Sstevel@tonic-gate PP_SETFREE(npp); 25437c478bd9Sstevel@tonic-gate PP_SETAGED(npp); 25447c478bd9Sstevel@tonic-gate npp->p_offset = (u_offset_t)-1; 25457c478bd9Sstevel@tonic-gate page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL); 25467c478bd9Sstevel@tonic-gate page_unlock(npp); 2547d94ffb28Sjmcp 25487c478bd9Sstevel@tonic-gate } 25497c478bd9Sstevel@tonic-gate 25507c478bd9Sstevel@tonic-gate ASSERT(pages_req >= found_on_free); 25517c478bd9Sstevel@tonic-gate 25527c478bd9Sstevel@tonic-gate { 25537c478bd9Sstevel@tonic-gate uint_t overshoot = (uint_t)(pages_req - found_on_free); 25547c478bd9Sstevel@tonic-gate 25557c478bd9Sstevel@tonic-gate if (overshoot) { 25567c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_overshoot); 255706fb6a36Sdv142724 p = &pcf[PCF_INDEX()]; 25587c478bd9Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 25597c478bd9Sstevel@tonic-gate if (p->pcf_block) { 25607c478bd9Sstevel@tonic-gate p->pcf_reserve += overshoot; 25617c478bd9Sstevel@tonic-gate } else { 25627c478bd9Sstevel@tonic-gate p->pcf_count += overshoot; 25637c478bd9Sstevel@tonic-gate if (p->pcf_wait) { 25647c478bd9Sstevel@tonic-gate mutex_enter(&new_freemem_lock); 25657c478bd9Sstevel@tonic-gate if (freemem_wait) { 25667c478bd9Sstevel@tonic-gate cv_signal(&freemem_cv); 25677c478bd9Sstevel@tonic-gate p->pcf_wait--; 25687c478bd9Sstevel@tonic-gate } else { 25697c478bd9Sstevel@tonic-gate p->pcf_wait = 0; 25707c478bd9Sstevel@tonic-gate } 25717c478bd9Sstevel@tonic-gate mutex_exit(&new_freemem_lock); 25727c478bd9Sstevel@tonic-gate } 25737c478bd9Sstevel@tonic-gate } 25747c478bd9Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 25757c478bd9Sstevel@tonic-gate /* freemem is approximate, so this test OK */ 25767c478bd9Sstevel@tonic-gate if (!p->pcf_block) 25777c478bd9Sstevel@tonic-gate freemem += overshoot; 25787c478bd9Sstevel@tonic-gate } 25797c478bd9Sstevel@tonic-gate } 25807c478bd9Sstevel@tonic-gate 25817c478bd9Sstevel@tonic-gate return (plist); 25827c478bd9Sstevel@tonic-gate } 25837c478bd9Sstevel@tonic-gate 25847c478bd9Sstevel@tonic-gate /* 25857c478bd9Sstevel@tonic-gate * One or more constituent pages of this large page has been marked 25867c478bd9Sstevel@tonic-gate * toxic. Simply demote the large page to PAGESIZE pages and let 25877c478bd9Sstevel@tonic-gate * page_free() handle it. This routine should only be called by 25887c478bd9Sstevel@tonic-gate * large page free routines (page_free_pages() and page_destroy_pages(). 25897c478bd9Sstevel@tonic-gate * All pages are locked SE_EXCL and have already been marked free. 25907c478bd9Sstevel@tonic-gate */ 25917c478bd9Sstevel@tonic-gate static void 25927c478bd9Sstevel@tonic-gate page_free_toxic_pages(page_t *rootpp) 25937c478bd9Sstevel@tonic-gate { 25947c478bd9Sstevel@tonic-gate page_t *tpp; 25957c478bd9Sstevel@tonic-gate pgcnt_t i, pgcnt = page_get_pagecnt(rootpp->p_szc); 25967c478bd9Sstevel@tonic-gate uint_t szc = rootpp->p_szc; 25977c478bd9Sstevel@tonic-gate 25987c478bd9Sstevel@tonic-gate for (i = 0, tpp = rootpp; i < pgcnt; i++, tpp = tpp->p_next) { 25997c478bd9Sstevel@tonic-gate ASSERT(tpp->p_szc == szc); 26007c478bd9Sstevel@tonic-gate ASSERT((PAGE_EXCL(tpp) && 26017c478bd9Sstevel@tonic-gate !page_iolock_assert(tpp)) || panicstr); 26027c478bd9Sstevel@tonic-gate tpp->p_szc = 0; 26037c478bd9Sstevel@tonic-gate } 26047c478bd9Sstevel@tonic-gate 26057c478bd9Sstevel@tonic-gate while (rootpp != NULL) { 26067c478bd9Sstevel@tonic-gate tpp = rootpp; 26077c478bd9Sstevel@tonic-gate page_sub(&rootpp, tpp); 26087c478bd9Sstevel@tonic-gate ASSERT(PP_ISFREE(tpp)); 26097c478bd9Sstevel@tonic-gate PP_CLRFREE(tpp); 26107c478bd9Sstevel@tonic-gate page_free(tpp, 1); 26117c478bd9Sstevel@tonic-gate } 26127c478bd9Sstevel@tonic-gate } 26137c478bd9Sstevel@tonic-gate 26147c478bd9Sstevel@tonic-gate /* 26157c478bd9Sstevel@tonic-gate * Put page on the "free" list. 26167c478bd9Sstevel@tonic-gate * The free list is really two lists maintained by 26177c478bd9Sstevel@tonic-gate * the PSM of whatever machine we happen to be on. 26187c478bd9Sstevel@tonic-gate */ 26197c478bd9Sstevel@tonic-gate void 26207c478bd9Sstevel@tonic-gate page_free(page_t *pp, int dontneed) 26217c478bd9Sstevel@tonic-gate { 26227c478bd9Sstevel@tonic-gate struct pcf *p; 26237c478bd9Sstevel@tonic-gate uint_t pcf_index; 26247c478bd9Sstevel@tonic-gate 26257c478bd9Sstevel@tonic-gate ASSERT((PAGE_EXCL(pp) && 26267c478bd9Sstevel@tonic-gate !page_iolock_assert(pp)) || panicstr); 26277c478bd9Sstevel@tonic-gate 26287c478bd9Sstevel@tonic-gate if (PP_ISFREE(pp)) { 26297c478bd9Sstevel@tonic-gate panic("page_free: page %p is free", (void *)pp); 26307c478bd9Sstevel@tonic-gate } 26317c478bd9Sstevel@tonic-gate 26327c478bd9Sstevel@tonic-gate if (pp->p_szc != 0) { 26337c478bd9Sstevel@tonic-gate if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 2634ad23a2dbSjohansen PP_ISKAS(pp)) { 26357c478bd9Sstevel@tonic-gate panic("page_free: anon or kernel " 26367c478bd9Sstevel@tonic-gate "or no vnode large page %p", (void *)pp); 26377c478bd9Sstevel@tonic-gate } 26387c478bd9Sstevel@tonic-gate page_demote_vp_pages(pp); 26397c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 26407c478bd9Sstevel@tonic-gate } 26417c478bd9Sstevel@tonic-gate 26427c478bd9Sstevel@tonic-gate /* 26437c478bd9Sstevel@tonic-gate * The page_struct_lock need not be acquired to examine these 26447c478bd9Sstevel@tonic-gate * fields since the page has an "exclusive" lock. 26457c478bd9Sstevel@tonic-gate */ 264607b65a64Saguzovsk if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 264707b65a64Saguzovsk pp->p_slckcnt != 0) { 264807b65a64Saguzovsk panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d " 26498793b36bSNick Todd "slckcnt = %d", (void *)pp, page_pptonum(pp), pp->p_lckcnt, 265007b65a64Saguzovsk pp->p_cowcnt, pp->p_slckcnt); 26517c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 26527c478bd9Sstevel@tonic-gate } 26537c478bd9Sstevel@tonic-gate 26547c478bd9Sstevel@tonic-gate ASSERT(!hat_page_getshare(pp)); 26557c478bd9Sstevel@tonic-gate 26567c478bd9Sstevel@tonic-gate PP_SETFREE(pp); 26577c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) || 26587c478bd9Sstevel@tonic-gate !hat_ismod(pp)); 26599d0d62adSJason Beloro page_clr_all_props(pp); 26607c478bd9Sstevel@tonic-gate ASSERT(!hat_page_getshare(pp)); 26617c478bd9Sstevel@tonic-gate 26627c478bd9Sstevel@tonic-gate /* 26637c478bd9Sstevel@tonic-gate * Now we add the page to the head of the free list. 26647c478bd9Sstevel@tonic-gate * But if this page is associated with a paged vnode 26657c478bd9Sstevel@tonic-gate * then we adjust the head forward so that the page is 26667c478bd9Sstevel@tonic-gate * effectively at the end of the list. 26677c478bd9Sstevel@tonic-gate */ 26687c478bd9Sstevel@tonic-gate if (pp->p_vnode == NULL) { 26697c478bd9Sstevel@tonic-gate /* 26707c478bd9Sstevel@tonic-gate * Page has no identity, put it on the free list. 26717c478bd9Sstevel@tonic-gate */ 26727c478bd9Sstevel@tonic-gate PP_SETAGED(pp); 26737c478bd9Sstevel@tonic-gate pp->p_offset = (u_offset_t)-1; 26747c478bd9Sstevel@tonic-gate page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 26757c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_free); 26767c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 26777c478bd9Sstevel@tonic-gate "page_free_free:pp %p", pp); 26787c478bd9Sstevel@tonic-gate } else { 26797c478bd9Sstevel@tonic-gate PP_CLRAGED(pp); 26807c478bd9Sstevel@tonic-gate 26818d4235fbSJosef 'Jeff' Sipek if (!dontneed) { 26827c478bd9Sstevel@tonic-gate /* move it to the tail of the list */ 26837c478bd9Sstevel@tonic-gate page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL); 26847c478bd9Sstevel@tonic-gate 26857c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_cache); 26867c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL, 26877c478bd9Sstevel@tonic-gate "page_free_cache_tail:pp %p", pp); 26887c478bd9Sstevel@tonic-gate } else { 26897c478bd9Sstevel@tonic-gate page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD); 26907c478bd9Sstevel@tonic-gate 26917c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_dontneed); 26927c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD, 26937c478bd9Sstevel@tonic-gate "page_free_cache_head:pp %p", pp); 26947c478bd9Sstevel@tonic-gate } 26957c478bd9Sstevel@tonic-gate } 26967c478bd9Sstevel@tonic-gate page_unlock(pp); 26977c478bd9Sstevel@tonic-gate 26987c478bd9Sstevel@tonic-gate /* 26997c478bd9Sstevel@tonic-gate * Now do the `freemem' accounting. 27007c478bd9Sstevel@tonic-gate */ 27017c478bd9Sstevel@tonic-gate pcf_index = PCF_INDEX(); 27027c478bd9Sstevel@tonic-gate p = &pcf[pcf_index]; 27037c478bd9Sstevel@tonic-gate 27047c478bd9Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 27057c478bd9Sstevel@tonic-gate if (p->pcf_block) { 27067c478bd9Sstevel@tonic-gate p->pcf_reserve += 1; 27077c478bd9Sstevel@tonic-gate } else { 27087c478bd9Sstevel@tonic-gate p->pcf_count += 1; 27097c478bd9Sstevel@tonic-gate if (p->pcf_wait) { 27107c478bd9Sstevel@tonic-gate mutex_enter(&new_freemem_lock); 27117c478bd9Sstevel@tonic-gate /* 27127c478bd9Sstevel@tonic-gate * Check to see if some other thread 27137c478bd9Sstevel@tonic-gate * is actually waiting. Another bucket 27147c478bd9Sstevel@tonic-gate * may have woken it up by now. If there 27157c478bd9Sstevel@tonic-gate * are no waiters, then set our pcf_wait 27167c478bd9Sstevel@tonic-gate * count to zero to avoid coming in here 27177c478bd9Sstevel@tonic-gate * next time. Also, since only one page 27187c478bd9Sstevel@tonic-gate * was put on the free list, just wake 27197c478bd9Sstevel@tonic-gate * up one waiter. 27207c478bd9Sstevel@tonic-gate */ 27217c478bd9Sstevel@tonic-gate if (freemem_wait) { 27227c478bd9Sstevel@tonic-gate cv_signal(&freemem_cv); 27237c478bd9Sstevel@tonic-gate p->pcf_wait--; 27247c478bd9Sstevel@tonic-gate } else { 27257c478bd9Sstevel@tonic-gate p->pcf_wait = 0; 27267c478bd9Sstevel@tonic-gate } 27277c478bd9Sstevel@tonic-gate mutex_exit(&new_freemem_lock); 27287c478bd9Sstevel@tonic-gate } 27297c478bd9Sstevel@tonic-gate } 27307c478bd9Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 27317c478bd9Sstevel@tonic-gate 27327c478bd9Sstevel@tonic-gate /* freemem is approximate, so this test OK */ 27337c478bd9Sstevel@tonic-gate if (!p->pcf_block) 27347c478bd9Sstevel@tonic-gate freemem += 1; 27357c478bd9Sstevel@tonic-gate } 27367c478bd9Sstevel@tonic-gate 27377c478bd9Sstevel@tonic-gate /* 27387c478bd9Sstevel@tonic-gate * Put page on the "free" list during intial startup. 27397c478bd9Sstevel@tonic-gate * This happens during initial single threaded execution. 27407c478bd9Sstevel@tonic-gate */ 27417c478bd9Sstevel@tonic-gate void 27427c478bd9Sstevel@tonic-gate page_free_at_startup(page_t *pp) 27437c478bd9Sstevel@tonic-gate { 27447c478bd9Sstevel@tonic-gate struct pcf *p; 27457c478bd9Sstevel@tonic-gate uint_t pcf_index; 27467c478bd9Sstevel@tonic-gate 27477c478bd9Sstevel@tonic-gate page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT); 27487c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_free); 27497c478bd9Sstevel@tonic-gate 27507c478bd9Sstevel@tonic-gate /* 27517c478bd9Sstevel@tonic-gate * Now do the `freemem' accounting. 27527c478bd9Sstevel@tonic-gate */ 27537c478bd9Sstevel@tonic-gate pcf_index = PCF_INDEX(); 27547c478bd9Sstevel@tonic-gate p = &pcf[pcf_index]; 27557c478bd9Sstevel@tonic-gate 27567c478bd9Sstevel@tonic-gate ASSERT(p->pcf_block == 0); 27577c478bd9Sstevel@tonic-gate ASSERT(p->pcf_wait == 0); 27587c478bd9Sstevel@tonic-gate p->pcf_count += 1; 27597c478bd9Sstevel@tonic-gate 27607c478bd9Sstevel@tonic-gate /* freemem is approximate, so this is OK */ 27617c478bd9Sstevel@tonic-gate freemem += 1; 27627c478bd9Sstevel@tonic-gate } 27637c478bd9Sstevel@tonic-gate 27647c478bd9Sstevel@tonic-gate void 27657c478bd9Sstevel@tonic-gate page_free_pages(page_t *pp) 27667c478bd9Sstevel@tonic-gate { 27677c478bd9Sstevel@tonic-gate page_t *tpp, *rootpp = NULL; 27687c478bd9Sstevel@tonic-gate pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 27697c478bd9Sstevel@tonic-gate pgcnt_t i; 27707c478bd9Sstevel@tonic-gate uint_t szc = pp->p_szc; 27717c478bd9Sstevel@tonic-gate 27727c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_pages); 27737c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 27747c478bd9Sstevel@tonic-gate "page_free_free:pp %p", pp); 27757c478bd9Sstevel@tonic-gate 27767c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 27777c478bd9Sstevel@tonic-gate if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 27787c478bd9Sstevel@tonic-gate panic("page_free_pages: not root page %p", (void *)pp); 27797c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 27807c478bd9Sstevel@tonic-gate } 27817c478bd9Sstevel@tonic-gate 2782affbd3ccSkchow for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 27837c478bd9Sstevel@tonic-gate ASSERT((PAGE_EXCL(tpp) && 27847c478bd9Sstevel@tonic-gate !page_iolock_assert(tpp)) || panicstr); 27857c478bd9Sstevel@tonic-gate if (PP_ISFREE(tpp)) { 27867c478bd9Sstevel@tonic-gate panic("page_free_pages: page %p is free", (void *)tpp); 27877c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 27887c478bd9Sstevel@tonic-gate } 27897c478bd9Sstevel@tonic-gate if (hat_page_is_mapped(tpp) || tpp->p_lckcnt != 0 || 279007b65a64Saguzovsk tpp->p_cowcnt != 0 || tpp->p_slckcnt != 0) { 27917c478bd9Sstevel@tonic-gate panic("page_free_pages %p", (void *)tpp); 27927c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 27937c478bd9Sstevel@tonic-gate } 27947c478bd9Sstevel@tonic-gate 27957c478bd9Sstevel@tonic-gate ASSERT(!hat_page_getshare(tpp)); 27967c478bd9Sstevel@tonic-gate ASSERT(tpp->p_vnode == NULL); 27977c478bd9Sstevel@tonic-gate ASSERT(tpp->p_szc == szc); 27987c478bd9Sstevel@tonic-gate 27997c478bd9Sstevel@tonic-gate PP_SETFREE(tpp); 28009d0d62adSJason Beloro page_clr_all_props(tpp); 28017c478bd9Sstevel@tonic-gate PP_SETAGED(tpp); 28027c478bd9Sstevel@tonic-gate tpp->p_offset = (u_offset_t)-1; 28037c478bd9Sstevel@tonic-gate ASSERT(tpp->p_next == tpp); 28047c478bd9Sstevel@tonic-gate ASSERT(tpp->p_prev == tpp); 28057c478bd9Sstevel@tonic-gate page_list_concat(&rootpp, &tpp); 28067c478bd9Sstevel@tonic-gate } 28077c478bd9Sstevel@tonic-gate ASSERT(rootpp == pp); 28087c478bd9Sstevel@tonic-gate 28097c478bd9Sstevel@tonic-gate page_list_add_pages(rootpp, 0); 28107c478bd9Sstevel@tonic-gate page_create_putback(pgcnt); 28117c478bd9Sstevel@tonic-gate } 28127c478bd9Sstevel@tonic-gate 28137c478bd9Sstevel@tonic-gate int free_pages = 1; 28147c478bd9Sstevel@tonic-gate 28157c478bd9Sstevel@tonic-gate /* 28167c478bd9Sstevel@tonic-gate * This routine attempts to return pages to the cachelist via page_release(). 28177c478bd9Sstevel@tonic-gate * It does not *have* to be successful in all cases, since the pageout scanner 28187c478bd9Sstevel@tonic-gate * will catch any pages it misses. It does need to be fast and not introduce 28197c478bd9Sstevel@tonic-gate * too much overhead. 28207c478bd9Sstevel@tonic-gate * 28217c478bd9Sstevel@tonic-gate * If a page isn't found on the unlocked sweep of the page_hash bucket, we 28227c478bd9Sstevel@tonic-gate * don't lock and retry. This is ok, since the page scanner will eventually 28237c478bd9Sstevel@tonic-gate * find any page we miss in free_vp_pages(). 28247c478bd9Sstevel@tonic-gate */ 28257c478bd9Sstevel@tonic-gate void 28267c478bd9Sstevel@tonic-gate free_vp_pages(vnode_t *vp, u_offset_t off, size_t len) 28277c478bd9Sstevel@tonic-gate { 28287c478bd9Sstevel@tonic-gate page_t *pp; 28297c478bd9Sstevel@tonic-gate u_offset_t eoff; 28307c478bd9Sstevel@tonic-gate extern int swap_in_range(vnode_t *, u_offset_t, size_t); 28317c478bd9Sstevel@tonic-gate 28327c478bd9Sstevel@tonic-gate eoff = off + len; 28337c478bd9Sstevel@tonic-gate 28347c478bd9Sstevel@tonic-gate if (free_pages == 0) 28357c478bd9Sstevel@tonic-gate return; 28367c478bd9Sstevel@tonic-gate if (swap_in_range(vp, off, len)) 28377c478bd9Sstevel@tonic-gate return; 28387c478bd9Sstevel@tonic-gate 28397c478bd9Sstevel@tonic-gate for (; off < eoff; off += PAGESIZE) { 28407c478bd9Sstevel@tonic-gate 28417c478bd9Sstevel@tonic-gate /* 28427c478bd9Sstevel@tonic-gate * find the page using a fast, but inexact search. It'll be OK 28437c478bd9Sstevel@tonic-gate * if a few pages slip through the cracks here. 28447c478bd9Sstevel@tonic-gate */ 28457c478bd9Sstevel@tonic-gate pp = page_exists(vp, off); 28467c478bd9Sstevel@tonic-gate 28477c478bd9Sstevel@tonic-gate /* 28487c478bd9Sstevel@tonic-gate * If we didn't find the page (it may not exist), the page 28497c478bd9Sstevel@tonic-gate * is free, looks still in use (shared), or we can't lock it, 28507c478bd9Sstevel@tonic-gate * just give up. 28517c478bd9Sstevel@tonic-gate */ 28527c478bd9Sstevel@tonic-gate if (pp == NULL || 28537c478bd9Sstevel@tonic-gate PP_ISFREE(pp) || 28547c478bd9Sstevel@tonic-gate page_share_cnt(pp) > 0 || 28557c478bd9Sstevel@tonic-gate !page_trylock(pp, SE_EXCL)) 28567c478bd9Sstevel@tonic-gate continue; 28577c478bd9Sstevel@tonic-gate 28587c478bd9Sstevel@tonic-gate /* 28597c478bd9Sstevel@tonic-gate * Once we have locked pp, verify that it's still the 28607c478bd9Sstevel@tonic-gate * correct page and not already free 28617c478bd9Sstevel@tonic-gate */ 28627c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL)); 28637c478bd9Sstevel@tonic-gate if (pp->p_vnode != vp || pp->p_offset != off || PP_ISFREE(pp)) { 28647c478bd9Sstevel@tonic-gate page_unlock(pp); 28657c478bd9Sstevel@tonic-gate continue; 28667c478bd9Sstevel@tonic-gate } 28677c478bd9Sstevel@tonic-gate 28687c478bd9Sstevel@tonic-gate /* 28697c478bd9Sstevel@tonic-gate * try to release the page... 28707c478bd9Sstevel@tonic-gate */ 28717c478bd9Sstevel@tonic-gate (void) page_release(pp, 1); 28727c478bd9Sstevel@tonic-gate } 28737c478bd9Sstevel@tonic-gate } 28747c478bd9Sstevel@tonic-gate 28757c478bd9Sstevel@tonic-gate /* 28767c478bd9Sstevel@tonic-gate * Reclaim the given page from the free list. 28776e4dd838Smec * If pp is part of a large pages, only the given constituent page is reclaimed 28786e4dd838Smec * and the large page it belonged to will be demoted. This can only happen 28796e4dd838Smec * if the page is not on the cachelist. 28806e4dd838Smec * 28817c478bd9Sstevel@tonic-gate * Returns 1 on success or 0 on failure. 28827c478bd9Sstevel@tonic-gate * 28837c478bd9Sstevel@tonic-gate * The page is unlocked if it can't be reclaimed (when freemem == 0). 28847c478bd9Sstevel@tonic-gate * If `lock' is non-null, it will be dropped and re-acquired if 28857c478bd9Sstevel@tonic-gate * the routine must wait while freemem is 0. 28867c478bd9Sstevel@tonic-gate * 28877c478bd9Sstevel@tonic-gate * As it turns out, boot_getpages() does this. It picks a page, 28887c478bd9Sstevel@tonic-gate * based on where OBP mapped in some address, gets its pfn, searches 28897c478bd9Sstevel@tonic-gate * the memsegs, locks the page, then pulls it off the free list! 28907c478bd9Sstevel@tonic-gate */ 28917c478bd9Sstevel@tonic-gate int 28927c478bd9Sstevel@tonic-gate page_reclaim(page_t *pp, kmutex_t *lock) 28937c478bd9Sstevel@tonic-gate { 28947c478bd9Sstevel@tonic-gate struct pcf *p; 28957c478bd9Sstevel@tonic-gate struct cpu *cpup; 28966e4dd838Smec int enough; 28977c478bd9Sstevel@tonic-gate uint_t i; 28987c478bd9Sstevel@tonic-gate 28997c478bd9Sstevel@tonic-gate ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1); 29007c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp)); 2901db874c57Selowe 29027c478bd9Sstevel@tonic-gate /* 29037c478bd9Sstevel@tonic-gate * If `freemem' is 0, we cannot reclaim this page from the 29047c478bd9Sstevel@tonic-gate * freelist, so release every lock we might hold: the page, 29057c478bd9Sstevel@tonic-gate * and the `lock' before blocking. 29067c478bd9Sstevel@tonic-gate * 29077c478bd9Sstevel@tonic-gate * The only way `freemem' can become 0 while there are pages 29087c478bd9Sstevel@tonic-gate * marked free (have their p->p_free bit set) is when the 29097c478bd9Sstevel@tonic-gate * system is low on memory and doing a page_create(). In 29107c478bd9Sstevel@tonic-gate * order to guarantee that once page_create() starts acquiring 29117c478bd9Sstevel@tonic-gate * pages it will be able to get all that it needs since `freemem' 29127c478bd9Sstevel@tonic-gate * was decreased by the requested amount. So, we need to release 29137c478bd9Sstevel@tonic-gate * this page, and let page_create() have it. 29147c478bd9Sstevel@tonic-gate * 29157c478bd9Sstevel@tonic-gate * Since `freemem' being zero is not supposed to happen, just 29167c478bd9Sstevel@tonic-gate * use the usual hash stuff as a starting point. If that bucket 29177c478bd9Sstevel@tonic-gate * is empty, then assume the worst, and start at the beginning 29187c478bd9Sstevel@tonic-gate * of the pcf array. If we always start at the beginning 29197c478bd9Sstevel@tonic-gate * when acquiring more than one pcf lock, there won't be any 29207c478bd9Sstevel@tonic-gate * deadlock problems. 29217c478bd9Sstevel@tonic-gate */ 29227c478bd9Sstevel@tonic-gate 29237c478bd9Sstevel@tonic-gate /* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */ 29247c478bd9Sstevel@tonic-gate 29256e4dd838Smec if (freemem <= throttlefree && !page_create_throttle(1l, 0)) { 29267c478bd9Sstevel@tonic-gate pcf_acquire_all(); 29277c478bd9Sstevel@tonic-gate goto page_reclaim_nomem; 29287c478bd9Sstevel@tonic-gate } 29297c478bd9Sstevel@tonic-gate 293006fb6a36Sdv142724 enough = pcf_decrement_bucket(1); 29317c478bd9Sstevel@tonic-gate 29326e4dd838Smec if (!enough) { 29337c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_reclaim_zero); 29347c478bd9Sstevel@tonic-gate /* 29357c478bd9Sstevel@tonic-gate * Check again. Its possible that some other thread 29367c478bd9Sstevel@tonic-gate * could have been right behind us, and added one 29377c478bd9Sstevel@tonic-gate * to a list somewhere. Acquire each of the pcf locks 29387c478bd9Sstevel@tonic-gate * until we find a page. 29397c478bd9Sstevel@tonic-gate */ 29407c478bd9Sstevel@tonic-gate p = pcf; 294106fb6a36Sdv142724 for (i = 0; i < pcf_fanout; i++) { 29427c478bd9Sstevel@tonic-gate mutex_enter(&p->pcf_lock); 29436e4dd838Smec if (p->pcf_count >= 1) { 29446e4dd838Smec p->pcf_count -= 1; 29455797d5ddSDavid Valin /* 29465797d5ddSDavid Valin * freemem is not protected by any lock. Thus, 29475797d5ddSDavid Valin * we cannot have any assertion containing 29485797d5ddSDavid Valin * freemem here. 29495797d5ddSDavid Valin */ 29505797d5ddSDavid Valin freemem -= 1; 29516e4dd838Smec enough = 1; 29527c478bd9Sstevel@tonic-gate break; 29537c478bd9Sstevel@tonic-gate } 29547c478bd9Sstevel@tonic-gate p++; 29557c478bd9Sstevel@tonic-gate } 29567c478bd9Sstevel@tonic-gate 29576e4dd838Smec if (!enough) { 29587c478bd9Sstevel@tonic-gate page_reclaim_nomem: 29597c478bd9Sstevel@tonic-gate /* 29607c478bd9Sstevel@tonic-gate * We really can't have page `pp'. 29617c478bd9Sstevel@tonic-gate * Time for the no-memory dance with 29627c478bd9Sstevel@tonic-gate * page_free(). This is just like 29637c478bd9Sstevel@tonic-gate * page_create_wait(). Plus the added 29647c478bd9Sstevel@tonic-gate * attraction of releasing whatever mutex 29657c478bd9Sstevel@tonic-gate * we held when we were called with in `lock'. 29667c478bd9Sstevel@tonic-gate * Page_unlock() will wakeup any thread 29677c478bd9Sstevel@tonic-gate * waiting around for this page. 29687c478bd9Sstevel@tonic-gate */ 29697c478bd9Sstevel@tonic-gate if (lock) { 29707c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_reclaim_zero_locked); 29717c478bd9Sstevel@tonic-gate mutex_exit(lock); 29727c478bd9Sstevel@tonic-gate } 29737c478bd9Sstevel@tonic-gate page_unlock(pp); 29747c478bd9Sstevel@tonic-gate 29757c478bd9Sstevel@tonic-gate /* 29767c478bd9Sstevel@tonic-gate * get this before we drop all the pcf locks. 29777c478bd9Sstevel@tonic-gate */ 29787c478bd9Sstevel@tonic-gate mutex_enter(&new_freemem_lock); 29797c478bd9Sstevel@tonic-gate 29807c478bd9Sstevel@tonic-gate p = pcf; 298106fb6a36Sdv142724 for (i = 0; i < pcf_fanout; i++) { 29827c478bd9Sstevel@tonic-gate p->pcf_wait++; 29837c478bd9Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 29847c478bd9Sstevel@tonic-gate p++; 29857c478bd9Sstevel@tonic-gate } 29867c478bd9Sstevel@tonic-gate 29877c478bd9Sstevel@tonic-gate freemem_wait++; 29887c478bd9Sstevel@tonic-gate cv_wait(&freemem_cv, &new_freemem_lock); 29897c478bd9Sstevel@tonic-gate freemem_wait--; 29907c478bd9Sstevel@tonic-gate 29917c478bd9Sstevel@tonic-gate mutex_exit(&new_freemem_lock); 29927c478bd9Sstevel@tonic-gate 29937c478bd9Sstevel@tonic-gate if (lock) { 29947c478bd9Sstevel@tonic-gate mutex_enter(lock); 29957c478bd9Sstevel@tonic-gate } 29967c478bd9Sstevel@tonic-gate return (0); 29977c478bd9Sstevel@tonic-gate } 29987c478bd9Sstevel@tonic-gate 29997c478bd9Sstevel@tonic-gate /* 30007c478bd9Sstevel@tonic-gate * The pcf accounting has been done, 30017c478bd9Sstevel@tonic-gate * though none of the pcf_wait flags have been set, 30027c478bd9Sstevel@tonic-gate * drop the locks and continue on. 30037c478bd9Sstevel@tonic-gate */ 30047c478bd9Sstevel@tonic-gate while (p >= pcf) { 30057c478bd9Sstevel@tonic-gate mutex_exit(&p->pcf_lock); 30067c478bd9Sstevel@tonic-gate p--; 30077c478bd9Sstevel@tonic-gate } 30087c478bd9Sstevel@tonic-gate } 30097c478bd9Sstevel@tonic-gate 30107c478bd9Sstevel@tonic-gate 30117c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_reclaim); 30126e4dd838Smec 30136e4dd838Smec /* 30146e4dd838Smec * page_list_sub will handle the case where pp is a large page. 30156e4dd838Smec * It's possible that the page was promoted while on the freelist 30166e4dd838Smec */ 30177c478bd9Sstevel@tonic-gate if (PP_ISAGED(pp)) { 30187c478bd9Sstevel@tonic-gate page_list_sub(pp, PG_FREE_LIST); 30197c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_FREE, 30207c478bd9Sstevel@tonic-gate "page_reclaim_free:pp %p", pp); 30217c478bd9Sstevel@tonic-gate } else { 30227c478bd9Sstevel@tonic-gate page_list_sub(pp, PG_CACHE_LIST); 30237c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_CACHE, 30247c478bd9Sstevel@tonic-gate "page_reclaim_cache:pp %p", pp); 30257c478bd9Sstevel@tonic-gate } 30267c478bd9Sstevel@tonic-gate 30277c478bd9Sstevel@tonic-gate /* 30287c478bd9Sstevel@tonic-gate * clear the p_free & p_age bits since this page is no longer 30297c478bd9Sstevel@tonic-gate * on the free list. Notice that there was a brief time where 30307c478bd9Sstevel@tonic-gate * a page is marked as free, but is not on the list. 30317c478bd9Sstevel@tonic-gate * 30327c478bd9Sstevel@tonic-gate * Set the reference bit to protect against immediate pageout. 30337c478bd9Sstevel@tonic-gate */ 30347c478bd9Sstevel@tonic-gate PP_CLRFREE(pp); 30357c478bd9Sstevel@tonic-gate PP_CLRAGED(pp); 30367c478bd9Sstevel@tonic-gate page_set_props(pp, P_REF); 30377c478bd9Sstevel@tonic-gate 30387c478bd9Sstevel@tonic-gate CPU_STATS_ENTER_K(); 30397c478bd9Sstevel@tonic-gate cpup = CPU; /* get cpup now that CPU cannot change */ 30407c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cpup, vm, pgrec, 1); 30417c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cpup, vm, pgfrec, 1); 30427c478bd9Sstevel@tonic-gate CPU_STATS_EXIT_K(); 30436e4dd838Smec ASSERT(pp->p_szc == 0); 30447c478bd9Sstevel@tonic-gate 30457c478bd9Sstevel@tonic-gate return (1); 30467c478bd9Sstevel@tonic-gate } 30477c478bd9Sstevel@tonic-gate 30487c478bd9Sstevel@tonic-gate /* 30497c478bd9Sstevel@tonic-gate * Destroy identity of the page and put it back on 30507c478bd9Sstevel@tonic-gate * the page free list. Assumes that the caller has 30517c478bd9Sstevel@tonic-gate * acquired the "exclusive" lock on the page. 30527c478bd9Sstevel@tonic-gate */ 30537c478bd9Sstevel@tonic-gate void 30547c478bd9Sstevel@tonic-gate page_destroy(page_t *pp, int dontfree) 30557c478bd9Sstevel@tonic-gate { 30567c478bd9Sstevel@tonic-gate ASSERT((PAGE_EXCL(pp) && 30577c478bd9Sstevel@tonic-gate !page_iolock_assert(pp)) || panicstr); 305807b65a64Saguzovsk ASSERT(pp->p_slckcnt == 0 || panicstr); 30597c478bd9Sstevel@tonic-gate 30607c478bd9Sstevel@tonic-gate if (pp->p_szc != 0) { 30617c478bd9Sstevel@tonic-gate if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 3062ad23a2dbSjohansen PP_ISKAS(pp)) { 30637c478bd9Sstevel@tonic-gate panic("page_destroy: anon or kernel or no vnode " 30647c478bd9Sstevel@tonic-gate "large page %p", (void *)pp); 30657c478bd9Sstevel@tonic-gate } 30667c478bd9Sstevel@tonic-gate page_demote_vp_pages(pp); 30677c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 30687c478bd9Sstevel@tonic-gate } 30697c478bd9Sstevel@tonic-gate 30707c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy:pp %p", pp); 30717c478bd9Sstevel@tonic-gate 30727c478bd9Sstevel@tonic-gate /* 30737c478bd9Sstevel@tonic-gate * Unload translations, if any, then hash out the 30747c478bd9Sstevel@tonic-gate * page to erase its identity. 30757c478bd9Sstevel@tonic-gate */ 30767c478bd9Sstevel@tonic-gate (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 30777c478bd9Sstevel@tonic-gate page_hashout(pp, NULL); 30787c478bd9Sstevel@tonic-gate 30797c478bd9Sstevel@tonic-gate if (!dontfree) { 30807c478bd9Sstevel@tonic-gate /* 30817c478bd9Sstevel@tonic-gate * Acquire the "freemem_lock" for availrmem. 30827c478bd9Sstevel@tonic-gate * The page_struct_lock need not be acquired for lckcnt 30837c478bd9Sstevel@tonic-gate * and cowcnt since the page has an "exclusive" lock. 3084552507c5SGangadhar Mylapuram * We are doing a modified version of page_pp_unlock here. 30857c478bd9Sstevel@tonic-gate */ 30867c478bd9Sstevel@tonic-gate if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) { 30877c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 30887c478bd9Sstevel@tonic-gate if (pp->p_lckcnt != 0) { 30897c478bd9Sstevel@tonic-gate availrmem++; 3090552507c5SGangadhar Mylapuram pages_locked--; 30917c478bd9Sstevel@tonic-gate pp->p_lckcnt = 0; 30927c478bd9Sstevel@tonic-gate } 30937c478bd9Sstevel@tonic-gate if (pp->p_cowcnt != 0) { 30947c478bd9Sstevel@tonic-gate availrmem += pp->p_cowcnt; 3095552507c5SGangadhar Mylapuram pages_locked -= pp->p_cowcnt; 30967c478bd9Sstevel@tonic-gate pp->p_cowcnt = 0; 30977c478bd9Sstevel@tonic-gate } 30987c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 30997c478bd9Sstevel@tonic-gate } 31007c478bd9Sstevel@tonic-gate /* 31017c478bd9Sstevel@tonic-gate * Put the page on the "free" list. 31027c478bd9Sstevel@tonic-gate */ 31037c478bd9Sstevel@tonic-gate page_free(pp, 0); 31047c478bd9Sstevel@tonic-gate } 31057c478bd9Sstevel@tonic-gate } 31067c478bd9Sstevel@tonic-gate 31077c478bd9Sstevel@tonic-gate void 31087c478bd9Sstevel@tonic-gate page_destroy_pages(page_t *pp) 31097c478bd9Sstevel@tonic-gate { 31107c478bd9Sstevel@tonic-gate 31117c478bd9Sstevel@tonic-gate page_t *tpp, *rootpp = NULL; 31127c478bd9Sstevel@tonic-gate pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 31137c478bd9Sstevel@tonic-gate pgcnt_t i, pglcks = 0; 31147c478bd9Sstevel@tonic-gate uint_t szc = pp->p_szc; 31157c478bd9Sstevel@tonic-gate 31167c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 31177c478bd9Sstevel@tonic-gate 31187c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_destroy_pages); 31197c478bd9Sstevel@tonic-gate 31207c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy_pages:pp %p", pp); 31217c478bd9Sstevel@tonic-gate 31227c478bd9Sstevel@tonic-gate if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 31237c478bd9Sstevel@tonic-gate panic("page_destroy_pages: not root page %p", (void *)pp); 31247c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 31257c478bd9Sstevel@tonic-gate } 31267c478bd9Sstevel@tonic-gate 3127affbd3ccSkchow for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 31287c478bd9Sstevel@tonic-gate ASSERT((PAGE_EXCL(tpp) && 31297c478bd9Sstevel@tonic-gate !page_iolock_assert(tpp)) || panicstr); 313007b65a64Saguzovsk ASSERT(tpp->p_slckcnt == 0 || panicstr); 31317c478bd9Sstevel@tonic-gate (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 31327c478bd9Sstevel@tonic-gate page_hashout(tpp, NULL); 31337c478bd9Sstevel@tonic-gate ASSERT(tpp->p_offset == (u_offset_t)-1); 31347c478bd9Sstevel@tonic-gate if (tpp->p_lckcnt != 0) { 31357c478bd9Sstevel@tonic-gate pglcks++; 31367c478bd9Sstevel@tonic-gate tpp->p_lckcnt = 0; 31377c478bd9Sstevel@tonic-gate } else if (tpp->p_cowcnt != 0) { 31387c478bd9Sstevel@tonic-gate pglcks += tpp->p_cowcnt; 31397c478bd9Sstevel@tonic-gate tpp->p_cowcnt = 0; 31407c478bd9Sstevel@tonic-gate } 31417c478bd9Sstevel@tonic-gate ASSERT(!hat_page_getshare(tpp)); 31427c478bd9Sstevel@tonic-gate ASSERT(tpp->p_vnode == NULL); 31437c478bd9Sstevel@tonic-gate ASSERT(tpp->p_szc == szc); 31447c478bd9Sstevel@tonic-gate 31457c478bd9Sstevel@tonic-gate PP_SETFREE(tpp); 31469d0d62adSJason Beloro page_clr_all_props(tpp); 31477c478bd9Sstevel@tonic-gate PP_SETAGED(tpp); 31487c478bd9Sstevel@tonic-gate ASSERT(tpp->p_next == tpp); 31497c478bd9Sstevel@tonic-gate ASSERT(tpp->p_prev == tpp); 31507c478bd9Sstevel@tonic-gate page_list_concat(&rootpp, &tpp); 31517c478bd9Sstevel@tonic-gate } 31527c478bd9Sstevel@tonic-gate 31537c478bd9Sstevel@tonic-gate ASSERT(rootpp == pp); 31547c478bd9Sstevel@tonic-gate if (pglcks != 0) { 31557c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 31567c478bd9Sstevel@tonic-gate availrmem += pglcks; 31577c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 31587c478bd9Sstevel@tonic-gate } 31597c478bd9Sstevel@tonic-gate 31607c478bd9Sstevel@tonic-gate page_list_add_pages(rootpp, 0); 31617c478bd9Sstevel@tonic-gate page_create_putback(pgcnt); 31627c478bd9Sstevel@tonic-gate } 31637c478bd9Sstevel@tonic-gate 31647c478bd9Sstevel@tonic-gate /* 31657c478bd9Sstevel@tonic-gate * Similar to page_destroy(), but destroys pages which are 31667c478bd9Sstevel@tonic-gate * locked and known to be on the page free list. Since 31677c478bd9Sstevel@tonic-gate * the page is known to be free and locked, no one can access 31687c478bd9Sstevel@tonic-gate * it. 31697c478bd9Sstevel@tonic-gate * 31707c478bd9Sstevel@tonic-gate * Also, the number of free pages does not change. 31717c478bd9Sstevel@tonic-gate */ 31727c478bd9Sstevel@tonic-gate void 31737c478bd9Sstevel@tonic-gate page_destroy_free(page_t *pp) 31747c478bd9Sstevel@tonic-gate { 31757c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 31767c478bd9Sstevel@tonic-gate ASSERT(PP_ISFREE(pp)); 31777c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode); 31787c478bd9Sstevel@tonic-gate ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0); 31797c478bd9Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(pp)); 31807c478bd9Sstevel@tonic-gate ASSERT(PP_ISAGED(pp) == 0); 31817c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 31827c478bd9Sstevel@tonic-gate 31837c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_destroy_free); 31847c478bd9Sstevel@tonic-gate page_list_sub(pp, PG_CACHE_LIST); 31857c478bd9Sstevel@tonic-gate 31867c478bd9Sstevel@tonic-gate page_hashout(pp, NULL); 31877c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode == NULL); 31887c478bd9Sstevel@tonic-gate ASSERT(pp->p_offset == (u_offset_t)-1); 31897c478bd9Sstevel@tonic-gate ASSERT(pp->p_hash == NULL); 31907c478bd9Sstevel@tonic-gate 31917c478bd9Sstevel@tonic-gate PP_SETAGED(pp); 31927c478bd9Sstevel@tonic-gate page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 31937c478bd9Sstevel@tonic-gate page_unlock(pp); 31947c478bd9Sstevel@tonic-gate 31957c478bd9Sstevel@tonic-gate mutex_enter(&new_freemem_lock); 31967c478bd9Sstevel@tonic-gate if (freemem_wait) { 31977c478bd9Sstevel@tonic-gate cv_signal(&freemem_cv); 31987c478bd9Sstevel@tonic-gate } 31997c478bd9Sstevel@tonic-gate mutex_exit(&new_freemem_lock); 32007c478bd9Sstevel@tonic-gate } 32017c478bd9Sstevel@tonic-gate 32027c478bd9Sstevel@tonic-gate /* 32037c478bd9Sstevel@tonic-gate * Rename the page "opp" to have an identity specified 32047c478bd9Sstevel@tonic-gate * by [vp, off]. If a page already exists with this name 32057c478bd9Sstevel@tonic-gate * it is locked and destroyed. Note that the page's 32067c478bd9Sstevel@tonic-gate * translations are not unloaded during the rename. 32077c478bd9Sstevel@tonic-gate * 32087c478bd9Sstevel@tonic-gate * This routine is used by the anon layer to "steal" the 32097c478bd9Sstevel@tonic-gate * original page and is not unlike destroying a page and 32107c478bd9Sstevel@tonic-gate * creating a new page using the same page frame. 32117c478bd9Sstevel@tonic-gate * 32127c478bd9Sstevel@tonic-gate * XXX -- Could deadlock if caller 1 tries to rename A to B while 32137c478bd9Sstevel@tonic-gate * caller 2 tries to rename B to A. 32147c478bd9Sstevel@tonic-gate */ 32157c478bd9Sstevel@tonic-gate void 32167c478bd9Sstevel@tonic-gate page_rename(page_t *opp, vnode_t *vp, u_offset_t off) 32177c478bd9Sstevel@tonic-gate { 32187c478bd9Sstevel@tonic-gate page_t *pp; 32197c478bd9Sstevel@tonic-gate int olckcnt = 0; 32207c478bd9Sstevel@tonic-gate int ocowcnt = 0; 32217c478bd9Sstevel@tonic-gate kmutex_t *phm; 32227c478bd9Sstevel@tonic-gate ulong_t index; 32237c478bd9Sstevel@tonic-gate 32247c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(opp) && !page_iolock_assert(opp)); 32257c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 32267c478bd9Sstevel@tonic-gate ASSERT(PP_ISFREE(opp) == 0); 32277c478bd9Sstevel@tonic-gate 32287c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_rename_count); 32297c478bd9Sstevel@tonic-gate 32307c478bd9Sstevel@tonic-gate TRACE_3(TR_FAC_VM, TR_PAGE_RENAME, 32317c478bd9Sstevel@tonic-gate "page rename:pp %p vp %p off %llx", opp, vp, off); 32327c478bd9Sstevel@tonic-gate 323337fbc076Saguzovsk /* 323437fbc076Saguzovsk * CacheFS may call page_rename for a large NFS page 323537fbc076Saguzovsk * when both CacheFS and NFS mount points are used 323637fbc076Saguzovsk * by applications. Demote this large page before 323737fbc076Saguzovsk * renaming it, to ensure that there are no "partial" 323837fbc076Saguzovsk * large pages left lying around. 323937fbc076Saguzovsk */ 324037fbc076Saguzovsk if (opp->p_szc != 0) { 324137fbc076Saguzovsk vnode_t *ovp = opp->p_vnode; 324237fbc076Saguzovsk ASSERT(ovp != NULL); 324337fbc076Saguzovsk ASSERT(!IS_SWAPFSVP(ovp)); 3244ad23a2dbSjohansen ASSERT(!VN_ISKAS(ovp)); 324537fbc076Saguzovsk page_demote_vp_pages(opp); 324637fbc076Saguzovsk ASSERT(opp->p_szc == 0); 324737fbc076Saguzovsk } 324837fbc076Saguzovsk 32497c478bd9Sstevel@tonic-gate page_hashout(opp, NULL); 32507c478bd9Sstevel@tonic-gate PP_CLRAGED(opp); 32517c478bd9Sstevel@tonic-gate 32527c478bd9Sstevel@tonic-gate /* 32537c478bd9Sstevel@tonic-gate * Acquire the appropriate page hash lock, since 32547c478bd9Sstevel@tonic-gate * we're going to rename the page. 32557c478bd9Sstevel@tonic-gate */ 32567c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off); 32577c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index); 32587c478bd9Sstevel@tonic-gate mutex_enter(phm); 32597c478bd9Sstevel@tonic-gate top: 32607c478bd9Sstevel@tonic-gate /* 32617c478bd9Sstevel@tonic-gate * Look for an existing page with this name and destroy it if found. 32627c478bd9Sstevel@tonic-gate * By holding the page hash lock all the way to the page_hashin() 32637c478bd9Sstevel@tonic-gate * call, we are assured that no page can be created with this 32647c478bd9Sstevel@tonic-gate * identity. In the case when the phm lock is dropped to undo any 32657c478bd9Sstevel@tonic-gate * hat layer mappings, the existing page is held with an "exclusive" 32667c478bd9Sstevel@tonic-gate * lock, again preventing another page from being created with 32677c478bd9Sstevel@tonic-gate * this identity. 32687c478bd9Sstevel@tonic-gate */ 3269e7c874afSJosef 'Jeff' Sipek pp = page_hash_search(index, vp, off); 32707c478bd9Sstevel@tonic-gate if (pp != NULL) { 32717c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_rename_exists); 32727c478bd9Sstevel@tonic-gate 32737c478bd9Sstevel@tonic-gate /* 32747c478bd9Sstevel@tonic-gate * As it turns out, this is one of only two places where 32757c478bd9Sstevel@tonic-gate * page_lock() needs to hold the passed in lock in the 32767c478bd9Sstevel@tonic-gate * successful case. In all of the others, the lock could 32777c478bd9Sstevel@tonic-gate * be dropped as soon as the attempt is made to lock 32787c478bd9Sstevel@tonic-gate * the page. It is tempting to add yet another arguement, 32797c478bd9Sstevel@tonic-gate * PL_KEEP or PL_DROP, to let page_lock know what to do. 32807c478bd9Sstevel@tonic-gate */ 32817c478bd9Sstevel@tonic-gate if (!page_lock(pp, SE_EXCL, phm, P_RECLAIM)) { 32827c478bd9Sstevel@tonic-gate /* 32837c478bd9Sstevel@tonic-gate * Went to sleep because the page could not 32847c478bd9Sstevel@tonic-gate * be locked. We were woken up when the page 32857c478bd9Sstevel@tonic-gate * was unlocked, or when the page was destroyed. 32867c478bd9Sstevel@tonic-gate * In either case, `phm' was dropped while we 32877c478bd9Sstevel@tonic-gate * slept. Hence we should not just roar through 32887c478bd9Sstevel@tonic-gate * this loop. 32897c478bd9Sstevel@tonic-gate */ 32907c478bd9Sstevel@tonic-gate goto top; 32917c478bd9Sstevel@tonic-gate } 32927c478bd9Sstevel@tonic-gate 329337fbc076Saguzovsk /* 329437fbc076Saguzovsk * If an existing page is a large page, then demote 329537fbc076Saguzovsk * it to ensure that no "partial" large pages are 329637fbc076Saguzovsk * "created" after page_rename. An existing page 329737fbc076Saguzovsk * can be a CacheFS page, and can't belong to swapfs. 329837fbc076Saguzovsk */ 32997c478bd9Sstevel@tonic-gate if (hat_page_is_mapped(pp)) { 33007c478bd9Sstevel@tonic-gate /* 33017c478bd9Sstevel@tonic-gate * Unload translations. Since we hold the 33027c478bd9Sstevel@tonic-gate * exclusive lock on this page, the page 33037c478bd9Sstevel@tonic-gate * can not be changed while we drop phm. 33047c478bd9Sstevel@tonic-gate * This is also not a lock protocol violation, 33057c478bd9Sstevel@tonic-gate * but rather the proper way to do things. 33067c478bd9Sstevel@tonic-gate */ 33077c478bd9Sstevel@tonic-gate mutex_exit(phm); 33087c478bd9Sstevel@tonic-gate (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 330937fbc076Saguzovsk if (pp->p_szc != 0) { 331037fbc076Saguzovsk ASSERT(!IS_SWAPFSVP(vp)); 3311ad23a2dbSjohansen ASSERT(!VN_ISKAS(vp)); 331237fbc076Saguzovsk page_demote_vp_pages(pp); 331337fbc076Saguzovsk ASSERT(pp->p_szc == 0); 331437fbc076Saguzovsk } 331537fbc076Saguzovsk mutex_enter(phm); 331637fbc076Saguzovsk } else if (pp->p_szc != 0) { 331737fbc076Saguzovsk ASSERT(!IS_SWAPFSVP(vp)); 3318ad23a2dbSjohansen ASSERT(!VN_ISKAS(vp)); 331937fbc076Saguzovsk mutex_exit(phm); 332037fbc076Saguzovsk page_demote_vp_pages(pp); 332137fbc076Saguzovsk ASSERT(pp->p_szc == 0); 33227c478bd9Sstevel@tonic-gate mutex_enter(phm); 33237c478bd9Sstevel@tonic-gate } 33247c478bd9Sstevel@tonic-gate page_hashout(pp, phm); 33257c478bd9Sstevel@tonic-gate } 33267c478bd9Sstevel@tonic-gate /* 33277c478bd9Sstevel@tonic-gate * Hash in the page with the new identity. 33287c478bd9Sstevel@tonic-gate */ 33297c478bd9Sstevel@tonic-gate if (!page_hashin(opp, vp, off, phm)) { 33307c478bd9Sstevel@tonic-gate /* 33317c478bd9Sstevel@tonic-gate * We were holding phm while we searched for [vp, off] 33327c478bd9Sstevel@tonic-gate * and only dropped phm if we found and locked a page. 33337c478bd9Sstevel@tonic-gate * If we can't create this page now, then some thing 33347c478bd9Sstevel@tonic-gate * is really broken. 33357c478bd9Sstevel@tonic-gate */ 33367c478bd9Sstevel@tonic-gate panic("page_rename: Can't hash in page: %p", (void *)pp); 33377c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 33387c478bd9Sstevel@tonic-gate } 33397c478bd9Sstevel@tonic-gate 33407c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm)); 33417c478bd9Sstevel@tonic-gate mutex_exit(phm); 33427c478bd9Sstevel@tonic-gate 33437c478bd9Sstevel@tonic-gate /* 33447c478bd9Sstevel@tonic-gate * Now that we have dropped phm, lets get around to finishing up 33457c478bd9Sstevel@tonic-gate * with pp. 33467c478bd9Sstevel@tonic-gate */ 33477c478bd9Sstevel@tonic-gate if (pp != NULL) { 33487c478bd9Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(pp)); 33497c478bd9Sstevel@tonic-gate /* for now large pages should not end up here */ 33507c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 33517c478bd9Sstevel@tonic-gate /* 33527c478bd9Sstevel@tonic-gate * Save the locks for transfer to the new page and then 33537c478bd9Sstevel@tonic-gate * clear them so page_free doesn't think they're important. 33547c478bd9Sstevel@tonic-gate * The page_struct_lock need not be acquired for lckcnt and 33557c478bd9Sstevel@tonic-gate * cowcnt since the page has an "exclusive" lock. 33567c478bd9Sstevel@tonic-gate */ 33577c478bd9Sstevel@tonic-gate olckcnt = pp->p_lckcnt; 33587c478bd9Sstevel@tonic-gate ocowcnt = pp->p_cowcnt; 33597c478bd9Sstevel@tonic-gate pp->p_lckcnt = pp->p_cowcnt = 0; 33607c478bd9Sstevel@tonic-gate 33617c478bd9Sstevel@tonic-gate /* 33627c478bd9Sstevel@tonic-gate * Put the page on the "free" list after we drop 33637c478bd9Sstevel@tonic-gate * the lock. The less work under the lock the better. 33647c478bd9Sstevel@tonic-gate */ 33657c478bd9Sstevel@tonic-gate /*LINTED: constant in conditional context*/ 33667c478bd9Sstevel@tonic-gate VN_DISPOSE(pp, B_FREE, 0, kcred); 33677c478bd9Sstevel@tonic-gate } 33687c478bd9Sstevel@tonic-gate 33697c478bd9Sstevel@tonic-gate /* 33707c478bd9Sstevel@tonic-gate * Transfer the lock count from the old page (if any). 33717c478bd9Sstevel@tonic-gate * The page_struct_lock need not be acquired for lckcnt and 33727c478bd9Sstevel@tonic-gate * cowcnt since the page has an "exclusive" lock. 33737c478bd9Sstevel@tonic-gate */ 33747c478bd9Sstevel@tonic-gate opp->p_lckcnt += olckcnt; 33757c478bd9Sstevel@tonic-gate opp->p_cowcnt += ocowcnt; 33767c478bd9Sstevel@tonic-gate } 33777c478bd9Sstevel@tonic-gate 33787c478bd9Sstevel@tonic-gate /* 33797c478bd9Sstevel@tonic-gate * low level routine to add page `pp' to the hash and vp chains for [vp, offset] 33807c478bd9Sstevel@tonic-gate * 33817c478bd9Sstevel@tonic-gate * Pages are normally inserted at the start of a vnode's v_pages list. 33827c478bd9Sstevel@tonic-gate * If the vnode is VMODSORT and the page is modified, it goes at the end. 33837c478bd9Sstevel@tonic-gate * This can happen when a modified page is relocated for DR. 33847c478bd9Sstevel@tonic-gate * 33857c478bd9Sstevel@tonic-gate * Returns 1 on success and 0 on failure. 33867c478bd9Sstevel@tonic-gate */ 33877c478bd9Sstevel@tonic-gate static int 33887c478bd9Sstevel@tonic-gate page_do_hashin(page_t *pp, vnode_t *vp, u_offset_t offset) 33897c478bd9Sstevel@tonic-gate { 33907c478bd9Sstevel@tonic-gate page_t **listp; 33917c478bd9Sstevel@tonic-gate page_t *tp; 33927c478bd9Sstevel@tonic-gate ulong_t index; 33937c478bd9Sstevel@tonic-gate 33947c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 33957c478bd9Sstevel@tonic-gate ASSERT(vp != NULL); 33967c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 33977c478bd9Sstevel@tonic-gate 33987c478bd9Sstevel@tonic-gate /* 33997c478bd9Sstevel@tonic-gate * Be sure to set these up before the page is inserted on the hash 34007c478bd9Sstevel@tonic-gate * list. As soon as the page is placed on the list some other 34017c478bd9Sstevel@tonic-gate * thread might get confused and wonder how this page could 34027c478bd9Sstevel@tonic-gate * possibly hash to this list. 34037c478bd9Sstevel@tonic-gate */ 34047c478bd9Sstevel@tonic-gate pp->p_vnode = vp; 34057c478bd9Sstevel@tonic-gate pp->p_offset = offset; 34067c478bd9Sstevel@tonic-gate 34077c478bd9Sstevel@tonic-gate /* 34087c478bd9Sstevel@tonic-gate * record if this page is on a swap vnode 34097c478bd9Sstevel@tonic-gate */ 34107c478bd9Sstevel@tonic-gate if ((vp->v_flag & VISSWAP) != 0) 34117c478bd9Sstevel@tonic-gate PP_SETSWAP(pp); 34127c478bd9Sstevel@tonic-gate 34137c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, offset); 34147c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(index))); 34157c478bd9Sstevel@tonic-gate listp = &page_hash[index]; 34167c478bd9Sstevel@tonic-gate 34177c478bd9Sstevel@tonic-gate /* 34187c478bd9Sstevel@tonic-gate * If this page is already hashed in, fail this attempt to add it. 34197c478bd9Sstevel@tonic-gate */ 34207c478bd9Sstevel@tonic-gate for (tp = *listp; tp != NULL; tp = tp->p_hash) { 34217c478bd9Sstevel@tonic-gate if (tp->p_vnode == vp && tp->p_offset == offset) { 34227c478bd9Sstevel@tonic-gate pp->p_vnode = NULL; 34237c478bd9Sstevel@tonic-gate pp->p_offset = (u_offset_t)(-1); 34247c478bd9Sstevel@tonic-gate return (0); 34257c478bd9Sstevel@tonic-gate } 34267c478bd9Sstevel@tonic-gate } 34277c478bd9Sstevel@tonic-gate pp->p_hash = *listp; 34287c478bd9Sstevel@tonic-gate *listp = pp; 34297c478bd9Sstevel@tonic-gate 34307c478bd9Sstevel@tonic-gate /* 34317c478bd9Sstevel@tonic-gate * Add the page to the vnode's list of pages 34327c478bd9Sstevel@tonic-gate */ 34337c478bd9Sstevel@tonic-gate if (vp->v_pages != NULL && IS_VMODSORT(vp) && hat_ismod(pp)) 34347c478bd9Sstevel@tonic-gate listp = &vp->v_pages->p_vpprev->p_vpnext; 34357c478bd9Sstevel@tonic-gate else 34367c478bd9Sstevel@tonic-gate listp = &vp->v_pages; 34377c478bd9Sstevel@tonic-gate 34387c478bd9Sstevel@tonic-gate page_vpadd(listp, pp); 34397c478bd9Sstevel@tonic-gate 34407c478bd9Sstevel@tonic-gate return (1); 34417c478bd9Sstevel@tonic-gate } 34427c478bd9Sstevel@tonic-gate 34437c478bd9Sstevel@tonic-gate /* 34447c478bd9Sstevel@tonic-gate * Add page `pp' to both the hash and vp chains for [vp, offset]. 34457c478bd9Sstevel@tonic-gate * 34467c478bd9Sstevel@tonic-gate * Returns 1 on success and 0 on failure. 34477c478bd9Sstevel@tonic-gate * If hold is passed in, it is not dropped. 34487c478bd9Sstevel@tonic-gate */ 34497c478bd9Sstevel@tonic-gate int 34507c478bd9Sstevel@tonic-gate page_hashin(page_t *pp, vnode_t *vp, u_offset_t offset, kmutex_t *hold) 34517c478bd9Sstevel@tonic-gate { 34527c478bd9Sstevel@tonic-gate kmutex_t *phm = NULL; 34537c478bd9Sstevel@tonic-gate kmutex_t *vphm; 34547c478bd9Sstevel@tonic-gate int rc; 34557c478bd9Sstevel@tonic-gate 34567c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3457c7531c7fSPrakash Sangappa ASSERT(pp->p_fsdata == 0 || panicstr); 34587c478bd9Sstevel@tonic-gate 34597c478bd9Sstevel@tonic-gate TRACE_3(TR_FAC_VM, TR_PAGE_HASHIN, 34607c478bd9Sstevel@tonic-gate "page_hashin:pp %p vp %p offset %llx", 34617c478bd9Sstevel@tonic-gate pp, vp, offset); 34627c478bd9Sstevel@tonic-gate 34637c478bd9Sstevel@tonic-gate VM_STAT_ADD(hashin_count); 34647c478bd9Sstevel@tonic-gate 34657c478bd9Sstevel@tonic-gate if (hold != NULL) 34667c478bd9Sstevel@tonic-gate phm = hold; 34677c478bd9Sstevel@tonic-gate else { 34687c478bd9Sstevel@tonic-gate VM_STAT_ADD(hashin_not_held); 34697c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, offset)); 34707c478bd9Sstevel@tonic-gate mutex_enter(phm); 34717c478bd9Sstevel@tonic-gate } 34727c478bd9Sstevel@tonic-gate 34737c478bd9Sstevel@tonic-gate vphm = page_vnode_mutex(vp); 34747c478bd9Sstevel@tonic-gate mutex_enter(vphm); 34757c478bd9Sstevel@tonic-gate rc = page_do_hashin(pp, vp, offset); 34767c478bd9Sstevel@tonic-gate mutex_exit(vphm); 34777c478bd9Sstevel@tonic-gate if (hold == NULL) 34787c478bd9Sstevel@tonic-gate mutex_exit(phm); 3479d94ffb28Sjmcp if (rc == 0) 34807c478bd9Sstevel@tonic-gate VM_STAT_ADD(hashin_already); 34817c478bd9Sstevel@tonic-gate return (rc); 34827c478bd9Sstevel@tonic-gate } 34837c478bd9Sstevel@tonic-gate 34847c478bd9Sstevel@tonic-gate /* 34857c478bd9Sstevel@tonic-gate * Remove page ``pp'' from the hash and vp chains and remove vp association. 34867c478bd9Sstevel@tonic-gate * All mutexes must be held 34877c478bd9Sstevel@tonic-gate */ 34887c478bd9Sstevel@tonic-gate static void 34897c478bd9Sstevel@tonic-gate page_do_hashout(page_t *pp) 34907c478bd9Sstevel@tonic-gate { 34917c478bd9Sstevel@tonic-gate page_t **hpp; 34927c478bd9Sstevel@tonic-gate page_t *hp; 34937c478bd9Sstevel@tonic-gate vnode_t *vp = pp->p_vnode; 34947c478bd9Sstevel@tonic-gate 34957c478bd9Sstevel@tonic-gate ASSERT(vp != NULL); 34967c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 34977c478bd9Sstevel@tonic-gate 34987c478bd9Sstevel@tonic-gate /* 34997c478bd9Sstevel@tonic-gate * First, take pp off of its hash chain. 35007c478bd9Sstevel@tonic-gate */ 35017c478bd9Sstevel@tonic-gate hpp = &page_hash[PAGE_HASH_FUNC(vp, pp->p_offset)]; 35027c478bd9Sstevel@tonic-gate 35037c478bd9Sstevel@tonic-gate for (;;) { 35047c478bd9Sstevel@tonic-gate hp = *hpp; 35057c478bd9Sstevel@tonic-gate if (hp == pp) 35067c478bd9Sstevel@tonic-gate break; 35077c478bd9Sstevel@tonic-gate if (hp == NULL) { 35087c478bd9Sstevel@tonic-gate panic("page_do_hashout"); 35097c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 35107c478bd9Sstevel@tonic-gate } 35117c478bd9Sstevel@tonic-gate hpp = &hp->p_hash; 35127c478bd9Sstevel@tonic-gate } 35137c478bd9Sstevel@tonic-gate *hpp = pp->p_hash; 35147c478bd9Sstevel@tonic-gate 35157c478bd9Sstevel@tonic-gate /* 35167c478bd9Sstevel@tonic-gate * Now remove it from its associated vnode. 35177c478bd9Sstevel@tonic-gate */ 35187c478bd9Sstevel@tonic-gate if (vp->v_pages) 35197c478bd9Sstevel@tonic-gate page_vpsub(&vp->v_pages, pp); 35207c478bd9Sstevel@tonic-gate 35217c478bd9Sstevel@tonic-gate pp->p_hash = NULL; 35229d0d62adSJason Beloro page_clr_all_props(pp); 35237c478bd9Sstevel@tonic-gate PP_CLRSWAP(pp); 35247c478bd9Sstevel@tonic-gate pp->p_vnode = NULL; 35257c478bd9Sstevel@tonic-gate pp->p_offset = (u_offset_t)-1; 3526c7531c7fSPrakash Sangappa pp->p_fsdata = 0; 35277c478bd9Sstevel@tonic-gate } 35287c478bd9Sstevel@tonic-gate 35297c478bd9Sstevel@tonic-gate /* 35307c478bd9Sstevel@tonic-gate * Remove page ``pp'' from the hash and vp chains and remove vp association. 35317c478bd9Sstevel@tonic-gate * 35327c478bd9Sstevel@tonic-gate * When `phm' is non-NULL it contains the address of the mutex protecting the 35337c478bd9Sstevel@tonic-gate * hash list pp is on. It is not dropped. 35347c478bd9Sstevel@tonic-gate */ 35357c478bd9Sstevel@tonic-gate void 35367c478bd9Sstevel@tonic-gate page_hashout(page_t *pp, kmutex_t *phm) 35377c478bd9Sstevel@tonic-gate { 35387c478bd9Sstevel@tonic-gate vnode_t *vp; 35397c478bd9Sstevel@tonic-gate ulong_t index; 35407c478bd9Sstevel@tonic-gate kmutex_t *nphm; 35417c478bd9Sstevel@tonic-gate kmutex_t *vphm; 35427c478bd9Sstevel@tonic-gate kmutex_t *sep; 35437c478bd9Sstevel@tonic-gate 35447c478bd9Sstevel@tonic-gate ASSERT(phm != NULL ? MUTEX_HELD(phm) : 1); 35457c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode != NULL); 35467c478bd9Sstevel@tonic-gate ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr); 35477c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp->p_vnode))); 35487c478bd9Sstevel@tonic-gate 35497c478bd9Sstevel@tonic-gate vp = pp->p_vnode; 35507c478bd9Sstevel@tonic-gate 35517c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_HASHOUT, 35527c478bd9Sstevel@tonic-gate "page_hashout:pp %p vp %p", pp, vp); 35537c478bd9Sstevel@tonic-gate 35547c478bd9Sstevel@tonic-gate /* Kernel probe */ 35557c478bd9Sstevel@tonic-gate TNF_PROBE_2(page_unmap, "vm pagefault", /* CSTYLED */, 35567c478bd9Sstevel@tonic-gate tnf_opaque, vnode, vp, 35577c478bd9Sstevel@tonic-gate tnf_offset, offset, pp->p_offset); 35587c478bd9Sstevel@tonic-gate 35597c478bd9Sstevel@tonic-gate /* 35607c478bd9Sstevel@tonic-gate * 35617c478bd9Sstevel@tonic-gate */ 35627c478bd9Sstevel@tonic-gate VM_STAT_ADD(hashout_count); 35637c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, pp->p_offset); 35647c478bd9Sstevel@tonic-gate if (phm == NULL) { 35657c478bd9Sstevel@tonic-gate VM_STAT_ADD(hashout_not_held); 35667c478bd9Sstevel@tonic-gate nphm = PAGE_HASH_MUTEX(index); 35677c478bd9Sstevel@tonic-gate mutex_enter(nphm); 35687c478bd9Sstevel@tonic-gate } 35697c478bd9Sstevel@tonic-gate ASSERT(phm ? phm == PAGE_HASH_MUTEX(index) : 1); 35707c478bd9Sstevel@tonic-gate 35717c478bd9Sstevel@tonic-gate 35727c478bd9Sstevel@tonic-gate /* 35737c478bd9Sstevel@tonic-gate * grab page vnode mutex and remove it... 35747c478bd9Sstevel@tonic-gate */ 35757c478bd9Sstevel@tonic-gate vphm = page_vnode_mutex(vp); 35767c478bd9Sstevel@tonic-gate mutex_enter(vphm); 35777c478bd9Sstevel@tonic-gate 35787c478bd9Sstevel@tonic-gate page_do_hashout(pp); 35797c478bd9Sstevel@tonic-gate 35807c478bd9Sstevel@tonic-gate mutex_exit(vphm); 35817c478bd9Sstevel@tonic-gate if (phm == NULL) 35827c478bd9Sstevel@tonic-gate mutex_exit(nphm); 35837c478bd9Sstevel@tonic-gate 35847c478bd9Sstevel@tonic-gate /* 35857c478bd9Sstevel@tonic-gate * Wake up processes waiting for this page. The page's 35867c478bd9Sstevel@tonic-gate * identity has been changed, and is probably not the 35877c478bd9Sstevel@tonic-gate * desired page any longer. 35887c478bd9Sstevel@tonic-gate */ 35897c478bd9Sstevel@tonic-gate sep = page_se_mutex(pp); 35907c478bd9Sstevel@tonic-gate mutex_enter(sep); 359142787a71Sstans pp->p_selock &= ~SE_EWANTED; 35927c478bd9Sstevel@tonic-gate if (CV_HAS_WAITERS(&pp->p_cv)) 35937c478bd9Sstevel@tonic-gate cv_broadcast(&pp->p_cv); 35947c478bd9Sstevel@tonic-gate mutex_exit(sep); 35957c478bd9Sstevel@tonic-gate } 35967c478bd9Sstevel@tonic-gate 35977c478bd9Sstevel@tonic-gate /* 35987c478bd9Sstevel@tonic-gate * Add the page to the front of a linked list of pages 35997c478bd9Sstevel@tonic-gate * using the p_next & p_prev pointers for the list. 36007c478bd9Sstevel@tonic-gate * The caller is responsible for protecting the list pointers. 36017c478bd9Sstevel@tonic-gate */ 36027c478bd9Sstevel@tonic-gate void 36037c478bd9Sstevel@tonic-gate page_add(page_t **ppp, page_t *pp) 36047c478bd9Sstevel@tonic-gate { 36057c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 36067c478bd9Sstevel@tonic-gate 36077c478bd9Sstevel@tonic-gate page_add_common(ppp, pp); 36087c478bd9Sstevel@tonic-gate } 36097c478bd9Sstevel@tonic-gate 36107c478bd9Sstevel@tonic-gate 36117c478bd9Sstevel@tonic-gate 36127c478bd9Sstevel@tonic-gate /* 36137c478bd9Sstevel@tonic-gate * Common code for page_add() and mach_page_add() 36147c478bd9Sstevel@tonic-gate */ 36157c478bd9Sstevel@tonic-gate void 36167c478bd9Sstevel@tonic-gate page_add_common(page_t **ppp, page_t *pp) 36177c478bd9Sstevel@tonic-gate { 36187c478bd9Sstevel@tonic-gate if (*ppp == NULL) { 36197c478bd9Sstevel@tonic-gate pp->p_next = pp->p_prev = pp; 36207c478bd9Sstevel@tonic-gate } else { 36217c478bd9Sstevel@tonic-gate pp->p_next = *ppp; 36227c478bd9Sstevel@tonic-gate pp->p_prev = (*ppp)->p_prev; 36237c478bd9Sstevel@tonic-gate (*ppp)->p_prev = pp; 36247c478bd9Sstevel@tonic-gate pp->p_prev->p_next = pp; 36257c478bd9Sstevel@tonic-gate } 36267c478bd9Sstevel@tonic-gate *ppp = pp; 36277c478bd9Sstevel@tonic-gate } 36287c478bd9Sstevel@tonic-gate 36297c478bd9Sstevel@tonic-gate 36307c478bd9Sstevel@tonic-gate /* 36317c478bd9Sstevel@tonic-gate * Remove this page from a linked list of pages 36327c478bd9Sstevel@tonic-gate * using the p_next & p_prev pointers for the list. 36337c478bd9Sstevel@tonic-gate * 36347c478bd9Sstevel@tonic-gate * The caller is responsible for protecting the list pointers. 36357c478bd9Sstevel@tonic-gate */ 36367c478bd9Sstevel@tonic-gate void 36377c478bd9Sstevel@tonic-gate page_sub(page_t **ppp, page_t *pp) 36387c478bd9Sstevel@tonic-gate { 36397c478bd9Sstevel@tonic-gate ASSERT((PP_ISFREE(pp)) ? 1 : 36407c478bd9Sstevel@tonic-gate (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 36417c478bd9Sstevel@tonic-gate 36427c478bd9Sstevel@tonic-gate if (*ppp == NULL || pp == NULL) { 36437c478bd9Sstevel@tonic-gate panic("page_sub: bad arg(s): pp %p, *ppp %p", 36447c478bd9Sstevel@tonic-gate (void *)pp, (void *)(*ppp)); 36457c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 36467c478bd9Sstevel@tonic-gate } 36477c478bd9Sstevel@tonic-gate 36487c478bd9Sstevel@tonic-gate page_sub_common(ppp, pp); 36497c478bd9Sstevel@tonic-gate } 36507c478bd9Sstevel@tonic-gate 36517c478bd9Sstevel@tonic-gate 36527c478bd9Sstevel@tonic-gate /* 36537c478bd9Sstevel@tonic-gate * Common code for page_sub() and mach_page_sub() 36547c478bd9Sstevel@tonic-gate */ 36557c478bd9Sstevel@tonic-gate void 36567c478bd9Sstevel@tonic-gate page_sub_common(page_t **ppp, page_t *pp) 36577c478bd9Sstevel@tonic-gate { 36587c478bd9Sstevel@tonic-gate if (*ppp == pp) 36597c478bd9Sstevel@tonic-gate *ppp = pp->p_next; /* go to next page */ 36607c478bd9Sstevel@tonic-gate 36617c478bd9Sstevel@tonic-gate if (*ppp == pp) 36627c478bd9Sstevel@tonic-gate *ppp = NULL; /* page list is gone */ 36637c478bd9Sstevel@tonic-gate else { 36647c478bd9Sstevel@tonic-gate pp->p_prev->p_next = pp->p_next; 36657c478bd9Sstevel@tonic-gate pp->p_next->p_prev = pp->p_prev; 36667c478bd9Sstevel@tonic-gate } 36677c478bd9Sstevel@tonic-gate pp->p_prev = pp->p_next = pp; /* make pp a list of one */ 36687c478bd9Sstevel@tonic-gate } 36697c478bd9Sstevel@tonic-gate 36707c478bd9Sstevel@tonic-gate 36717c478bd9Sstevel@tonic-gate /* 36727c478bd9Sstevel@tonic-gate * Break page list cppp into two lists with npages in the first list. 36737c478bd9Sstevel@tonic-gate * The tail is returned in nppp. 36747c478bd9Sstevel@tonic-gate */ 36757c478bd9Sstevel@tonic-gate void 36767c478bd9Sstevel@tonic-gate page_list_break(page_t **oppp, page_t **nppp, pgcnt_t npages) 36777c478bd9Sstevel@tonic-gate { 36787c478bd9Sstevel@tonic-gate page_t *s1pp = *oppp; 36797c478bd9Sstevel@tonic-gate page_t *s2pp; 36807c478bd9Sstevel@tonic-gate page_t *e1pp, *e2pp; 36817c478bd9Sstevel@tonic-gate long n = 0; 36827c478bd9Sstevel@tonic-gate 36837c478bd9Sstevel@tonic-gate if (s1pp == NULL) { 36847c478bd9Sstevel@tonic-gate *nppp = NULL; 36857c478bd9Sstevel@tonic-gate return; 36867c478bd9Sstevel@tonic-gate } 36877c478bd9Sstevel@tonic-gate if (npages == 0) { 36887c478bd9Sstevel@tonic-gate *nppp = s1pp; 36897c478bd9Sstevel@tonic-gate *oppp = NULL; 36907c478bd9Sstevel@tonic-gate return; 36917c478bd9Sstevel@tonic-gate } 36927c478bd9Sstevel@tonic-gate for (n = 0, s2pp = *oppp; n < npages; n++) { 36937c478bd9Sstevel@tonic-gate s2pp = s2pp->p_next; 36947c478bd9Sstevel@tonic-gate } 36957c478bd9Sstevel@tonic-gate /* Fix head and tail of new lists */ 36967c478bd9Sstevel@tonic-gate e1pp = s2pp->p_prev; 36977c478bd9Sstevel@tonic-gate e2pp = s1pp->p_prev; 36987c478bd9Sstevel@tonic-gate s1pp->p_prev = e1pp; 36997c478bd9Sstevel@tonic-gate e1pp->p_next = s1pp; 37007c478bd9Sstevel@tonic-gate s2pp->p_prev = e2pp; 37017c478bd9Sstevel@tonic-gate e2pp->p_next = s2pp; 37027c478bd9Sstevel@tonic-gate 37037c478bd9Sstevel@tonic-gate /* second list empty */ 37047c478bd9Sstevel@tonic-gate if (s2pp == s1pp) { 37057c478bd9Sstevel@tonic-gate *oppp = s1pp; 37067c478bd9Sstevel@tonic-gate *nppp = NULL; 37077c478bd9Sstevel@tonic-gate } else { 37087c478bd9Sstevel@tonic-gate *oppp = s1pp; 37097c478bd9Sstevel@tonic-gate *nppp = s2pp; 37107c478bd9Sstevel@tonic-gate } 37117c478bd9Sstevel@tonic-gate } 37127c478bd9Sstevel@tonic-gate 37137c478bd9Sstevel@tonic-gate /* 37147c478bd9Sstevel@tonic-gate * Concatenate page list nppp onto the end of list ppp. 37157c478bd9Sstevel@tonic-gate */ 37167c478bd9Sstevel@tonic-gate void 37177c478bd9Sstevel@tonic-gate page_list_concat(page_t **ppp, page_t **nppp) 37187c478bd9Sstevel@tonic-gate { 37197c478bd9Sstevel@tonic-gate page_t *s1pp, *s2pp, *e1pp, *e2pp; 37207c478bd9Sstevel@tonic-gate 37217c478bd9Sstevel@tonic-gate if (*nppp == NULL) { 37227c478bd9Sstevel@tonic-gate return; 37237c478bd9Sstevel@tonic-gate } 37247c478bd9Sstevel@tonic-gate if (*ppp == NULL) { 37257c478bd9Sstevel@tonic-gate *ppp = *nppp; 37267c478bd9Sstevel@tonic-gate return; 37277c478bd9Sstevel@tonic-gate } 37287c478bd9Sstevel@tonic-gate s1pp = *ppp; 37297c478bd9Sstevel@tonic-gate e1pp = s1pp->p_prev; 37307c478bd9Sstevel@tonic-gate s2pp = *nppp; 37317c478bd9Sstevel@tonic-gate e2pp = s2pp->p_prev; 37327c478bd9Sstevel@tonic-gate s1pp->p_prev = e2pp; 37337c478bd9Sstevel@tonic-gate e2pp->p_next = s1pp; 37347c478bd9Sstevel@tonic-gate e1pp->p_next = s2pp; 37357c478bd9Sstevel@tonic-gate s2pp->p_prev = e1pp; 37367c478bd9Sstevel@tonic-gate } 37377c478bd9Sstevel@tonic-gate 37387c478bd9Sstevel@tonic-gate /* 37397c478bd9Sstevel@tonic-gate * return the next page in the page list 37407c478bd9Sstevel@tonic-gate */ 37417c478bd9Sstevel@tonic-gate page_t * 37427c478bd9Sstevel@tonic-gate page_list_next(page_t *pp) 37437c478bd9Sstevel@tonic-gate { 37447c478bd9Sstevel@tonic-gate return (pp->p_next); 37457c478bd9Sstevel@tonic-gate } 37467c478bd9Sstevel@tonic-gate 37477c478bd9Sstevel@tonic-gate 37487c478bd9Sstevel@tonic-gate /* 37497c478bd9Sstevel@tonic-gate * Add the page to the front of the linked list of pages 37507c478bd9Sstevel@tonic-gate * using p_vpnext/p_vpprev pointers for the list. 37517c478bd9Sstevel@tonic-gate * 37527c478bd9Sstevel@tonic-gate * The caller is responsible for protecting the lists. 37537c478bd9Sstevel@tonic-gate */ 37547c478bd9Sstevel@tonic-gate void 37557c478bd9Sstevel@tonic-gate page_vpadd(page_t **ppp, page_t *pp) 37567c478bd9Sstevel@tonic-gate { 37577c478bd9Sstevel@tonic-gate if (*ppp == NULL) { 37587c478bd9Sstevel@tonic-gate pp->p_vpnext = pp->p_vpprev = pp; 37597c478bd9Sstevel@tonic-gate } else { 37607c478bd9Sstevel@tonic-gate pp->p_vpnext = *ppp; 37617c478bd9Sstevel@tonic-gate pp->p_vpprev = (*ppp)->p_vpprev; 37627c478bd9Sstevel@tonic-gate (*ppp)->p_vpprev = pp; 37637c478bd9Sstevel@tonic-gate pp->p_vpprev->p_vpnext = pp; 37647c478bd9Sstevel@tonic-gate } 37657c478bd9Sstevel@tonic-gate *ppp = pp; 37667c478bd9Sstevel@tonic-gate } 37677c478bd9Sstevel@tonic-gate 37687c478bd9Sstevel@tonic-gate /* 37697c478bd9Sstevel@tonic-gate * Remove this page from the linked list of pages 37707c478bd9Sstevel@tonic-gate * using p_vpnext/p_vpprev pointers for the list. 37717c478bd9Sstevel@tonic-gate * 37727c478bd9Sstevel@tonic-gate * The caller is responsible for protecting the lists. 37737c478bd9Sstevel@tonic-gate */ 37747c478bd9Sstevel@tonic-gate void 37757c478bd9Sstevel@tonic-gate page_vpsub(page_t **ppp, page_t *pp) 37767c478bd9Sstevel@tonic-gate { 37777c478bd9Sstevel@tonic-gate if (*ppp == NULL || pp == NULL) { 37787c478bd9Sstevel@tonic-gate panic("page_vpsub: bad arg(s): pp %p, *ppp %p", 37797c478bd9Sstevel@tonic-gate (void *)pp, (void *)(*ppp)); 37807c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 37817c478bd9Sstevel@tonic-gate } 37827c478bd9Sstevel@tonic-gate 37837c478bd9Sstevel@tonic-gate if (*ppp == pp) 37847c478bd9Sstevel@tonic-gate *ppp = pp->p_vpnext; /* go to next page */ 37857c478bd9Sstevel@tonic-gate 37867c478bd9Sstevel@tonic-gate if (*ppp == pp) 37877c478bd9Sstevel@tonic-gate *ppp = NULL; /* page list is gone */ 37887c478bd9Sstevel@tonic-gate else { 37897c478bd9Sstevel@tonic-gate pp->p_vpprev->p_vpnext = pp->p_vpnext; 37907c478bd9Sstevel@tonic-gate pp->p_vpnext->p_vpprev = pp->p_vpprev; 37917c478bd9Sstevel@tonic-gate } 37927c478bd9Sstevel@tonic-gate pp->p_vpprev = pp->p_vpnext = pp; /* make pp a list of one */ 37937c478bd9Sstevel@tonic-gate } 37947c478bd9Sstevel@tonic-gate 37957c478bd9Sstevel@tonic-gate /* 37967c478bd9Sstevel@tonic-gate * Lock a physical page into memory "long term". Used to support "lock 37977c478bd9Sstevel@tonic-gate * in memory" functions. Accepts the page to be locked, and a cow variable 37987c478bd9Sstevel@tonic-gate * to indicate whether a the lock will travel to the new page during 37997c478bd9Sstevel@tonic-gate * a potential copy-on-write. 38007c478bd9Sstevel@tonic-gate */ 38017c478bd9Sstevel@tonic-gate int 38027c478bd9Sstevel@tonic-gate page_pp_lock( 38037c478bd9Sstevel@tonic-gate page_t *pp, /* page to be locked */ 38047c478bd9Sstevel@tonic-gate int cow, /* cow lock */ 38057c478bd9Sstevel@tonic-gate int kernel) /* must succeed -- ignore checking */ 38067c478bd9Sstevel@tonic-gate { 38077c478bd9Sstevel@tonic-gate int r = 0; /* result -- assume failure */ 38087c478bd9Sstevel@tonic-gate 38097c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp)); 38107c478bd9Sstevel@tonic-gate 38117c478bd9Sstevel@tonic-gate page_struct_lock(pp); 38127c478bd9Sstevel@tonic-gate /* 38137c478bd9Sstevel@tonic-gate * Acquire the "freemem_lock" for availrmem. 38147c478bd9Sstevel@tonic-gate */ 38157c478bd9Sstevel@tonic-gate if (cow) { 38167c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 38177c478bd9Sstevel@tonic-gate if ((availrmem > pages_pp_maximum) && 38187c478bd9Sstevel@tonic-gate (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 38197c478bd9Sstevel@tonic-gate availrmem--; 38207c478bd9Sstevel@tonic-gate pages_locked++; 38217c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 38227c478bd9Sstevel@tonic-gate r = 1; 38237c478bd9Sstevel@tonic-gate if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 38247c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, 38257c478bd9Sstevel@tonic-gate "COW lock limit reached on pfn 0x%lx", 38267c478bd9Sstevel@tonic-gate page_pptonum(pp)); 38277c478bd9Sstevel@tonic-gate } 38287c478bd9Sstevel@tonic-gate } else 38297c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 38307c478bd9Sstevel@tonic-gate } else { 38317c478bd9Sstevel@tonic-gate if (pp->p_lckcnt) { 38327c478bd9Sstevel@tonic-gate if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 38337c478bd9Sstevel@tonic-gate r = 1; 38347c478bd9Sstevel@tonic-gate if (++pp->p_lckcnt == 38357c478bd9Sstevel@tonic-gate (ushort_t)PAGE_LOCK_MAXIMUM) { 38367c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "Page lock limit " 38377c478bd9Sstevel@tonic-gate "reached on pfn 0x%lx", 38387c478bd9Sstevel@tonic-gate page_pptonum(pp)); 38397c478bd9Sstevel@tonic-gate } 38407c478bd9Sstevel@tonic-gate } 38417c478bd9Sstevel@tonic-gate } else { 38427c478bd9Sstevel@tonic-gate if (kernel) { 38437c478bd9Sstevel@tonic-gate /* availrmem accounting done by caller */ 38447c478bd9Sstevel@tonic-gate ++pp->p_lckcnt; 38457c478bd9Sstevel@tonic-gate r = 1; 38467c478bd9Sstevel@tonic-gate } else { 38477c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 38487c478bd9Sstevel@tonic-gate if (availrmem > pages_pp_maximum) { 38497c478bd9Sstevel@tonic-gate availrmem--; 38507c478bd9Sstevel@tonic-gate pages_locked++; 38517c478bd9Sstevel@tonic-gate ++pp->p_lckcnt; 38527c478bd9Sstevel@tonic-gate r = 1; 38537c478bd9Sstevel@tonic-gate } 38547c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 38557c478bd9Sstevel@tonic-gate } 38567c478bd9Sstevel@tonic-gate } 38577c478bd9Sstevel@tonic-gate } 38587c478bd9Sstevel@tonic-gate page_struct_unlock(pp); 38597c478bd9Sstevel@tonic-gate return (r); 38607c478bd9Sstevel@tonic-gate } 38617c478bd9Sstevel@tonic-gate 38627c478bd9Sstevel@tonic-gate /* 38637c478bd9Sstevel@tonic-gate * Decommit a lock on a physical page frame. Account for cow locks if 38647c478bd9Sstevel@tonic-gate * appropriate. 38657c478bd9Sstevel@tonic-gate */ 38667c478bd9Sstevel@tonic-gate void 38677c478bd9Sstevel@tonic-gate page_pp_unlock( 38687c478bd9Sstevel@tonic-gate page_t *pp, /* page to be unlocked */ 38697c478bd9Sstevel@tonic-gate int cow, /* expect cow lock */ 38707c478bd9Sstevel@tonic-gate int kernel) /* this was a kernel lock */ 38717c478bd9Sstevel@tonic-gate { 38727c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp)); 38737c478bd9Sstevel@tonic-gate 38747c478bd9Sstevel@tonic-gate page_struct_lock(pp); 38757c478bd9Sstevel@tonic-gate /* 38767c478bd9Sstevel@tonic-gate * Acquire the "freemem_lock" for availrmem. 38777c478bd9Sstevel@tonic-gate * If cowcnt or lcknt is already 0 do nothing; i.e., we 38787c478bd9Sstevel@tonic-gate * could be called to unlock even if nothing is locked. This could 38797c478bd9Sstevel@tonic-gate * happen if locked file pages were truncated (removing the lock) 38807c478bd9Sstevel@tonic-gate * and the file was grown again and new pages faulted in; the new 38817c478bd9Sstevel@tonic-gate * pages are unlocked but the segment still thinks they're locked. 38827c478bd9Sstevel@tonic-gate */ 38837c478bd9Sstevel@tonic-gate if (cow) { 38847c478bd9Sstevel@tonic-gate if (pp->p_cowcnt) { 38857c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 38867c478bd9Sstevel@tonic-gate pp->p_cowcnt--; 38877c478bd9Sstevel@tonic-gate availrmem++; 38887c478bd9Sstevel@tonic-gate pages_locked--; 38897c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 38907c478bd9Sstevel@tonic-gate } 38917c478bd9Sstevel@tonic-gate } else { 38927c478bd9Sstevel@tonic-gate if (pp->p_lckcnt && --pp->p_lckcnt == 0) { 38937c478bd9Sstevel@tonic-gate if (!kernel) { 38947c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 38957c478bd9Sstevel@tonic-gate availrmem++; 38967c478bd9Sstevel@tonic-gate pages_locked--; 38977c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 38987c478bd9Sstevel@tonic-gate } 38997c478bd9Sstevel@tonic-gate } 39007c478bd9Sstevel@tonic-gate } 39017c478bd9Sstevel@tonic-gate page_struct_unlock(pp); 39027c478bd9Sstevel@tonic-gate } 39037c478bd9Sstevel@tonic-gate 39047c478bd9Sstevel@tonic-gate /* 39057c478bd9Sstevel@tonic-gate * This routine reserves availrmem for npages; 39067c478bd9Sstevel@tonic-gate * flags: KM_NOSLEEP or KM_SLEEP 39077c478bd9Sstevel@tonic-gate * returns 1 on success or 0 on failure 39087c478bd9Sstevel@tonic-gate */ 39097c478bd9Sstevel@tonic-gate int 39107c478bd9Sstevel@tonic-gate page_resv(pgcnt_t npages, uint_t flags) 39117c478bd9Sstevel@tonic-gate { 39127c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 39137c478bd9Sstevel@tonic-gate while (availrmem < tune.t_minarmem + npages) { 39147c478bd9Sstevel@tonic-gate if (flags & KM_NOSLEEP) { 39157c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 39167c478bd9Sstevel@tonic-gate return (0); 39177c478bd9Sstevel@tonic-gate } 39187c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 39197c478bd9Sstevel@tonic-gate page_needfree(npages); 39207c478bd9Sstevel@tonic-gate kmem_reap(); 39217c478bd9Sstevel@tonic-gate delay(hz >> 2); 39227c478bd9Sstevel@tonic-gate page_needfree(-(spgcnt_t)npages); 39237c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 39247c478bd9Sstevel@tonic-gate } 39257c478bd9Sstevel@tonic-gate availrmem -= npages; 39267c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 39277c478bd9Sstevel@tonic-gate return (1); 39287c478bd9Sstevel@tonic-gate } 39297c478bd9Sstevel@tonic-gate 39307c478bd9Sstevel@tonic-gate /* 39317c478bd9Sstevel@tonic-gate * This routine unreserves availrmem for npages; 39327c478bd9Sstevel@tonic-gate */ 39337c478bd9Sstevel@tonic-gate void 39347c478bd9Sstevel@tonic-gate page_unresv(pgcnt_t npages) 39357c478bd9Sstevel@tonic-gate { 39367c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 39377c478bd9Sstevel@tonic-gate availrmem += npages; 39387c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 39397c478bd9Sstevel@tonic-gate } 39407c478bd9Sstevel@tonic-gate 39417c478bd9Sstevel@tonic-gate /* 39427c478bd9Sstevel@tonic-gate * See Statement at the beginning of segvn_lockop() regarding 39437c478bd9Sstevel@tonic-gate * the way we handle cowcnts and lckcnts. 39447c478bd9Sstevel@tonic-gate * 39457c478bd9Sstevel@tonic-gate * Transfer cowcnt on 'opp' to cowcnt on 'npp' if the vpage 39467c478bd9Sstevel@tonic-gate * that breaks COW has PROT_WRITE. 39477c478bd9Sstevel@tonic-gate * 39487c478bd9Sstevel@tonic-gate * Note that, we may also break COW in case we are softlocking 39497c478bd9Sstevel@tonic-gate * on read access during physio; 39507c478bd9Sstevel@tonic-gate * in this softlock case, the vpage may not have PROT_WRITE. 39517c478bd9Sstevel@tonic-gate * So, we need to transfer lckcnt on 'opp' to lckcnt on 'npp' 39527c478bd9Sstevel@tonic-gate * if the vpage doesn't have PROT_WRITE. 39537c478bd9Sstevel@tonic-gate * 39547c478bd9Sstevel@tonic-gate * This routine is never called if we are stealing a page 39557c478bd9Sstevel@tonic-gate * in anon_private. 39567c478bd9Sstevel@tonic-gate * 39577c478bd9Sstevel@tonic-gate * The caller subtracted from availrmem for read only mapping. 39587c478bd9Sstevel@tonic-gate * if lckcnt is 1 increment availrmem. 39597c478bd9Sstevel@tonic-gate */ 39607c478bd9Sstevel@tonic-gate void 39617c478bd9Sstevel@tonic-gate page_pp_useclaim( 39627c478bd9Sstevel@tonic-gate page_t *opp, /* original page frame losing lock */ 39637c478bd9Sstevel@tonic-gate page_t *npp, /* new page frame gaining lock */ 39647c478bd9Sstevel@tonic-gate uint_t write_perm) /* set if vpage has PROT_WRITE */ 39657c478bd9Sstevel@tonic-gate { 39667c478bd9Sstevel@tonic-gate int payback = 0; 3967cb15d5d9SPeter Rival int nidx, oidx; 39687c478bd9Sstevel@tonic-gate 39697c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(opp)); 39707c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(npp)); 39717c478bd9Sstevel@tonic-gate 3972cb15d5d9SPeter Rival /* 3973cb15d5d9SPeter Rival * Since we have two pages we probably have two locks. We need to take 3974cb15d5d9SPeter Rival * them in a defined order to avoid deadlocks. It's also possible they 3975cb15d5d9SPeter Rival * both hash to the same lock in which case this is a non-issue. 3976cb15d5d9SPeter Rival */ 3977cb15d5d9SPeter Rival nidx = PAGE_LLOCK_HASH(PP_PAGEROOT(npp)); 3978cb15d5d9SPeter Rival oidx = PAGE_LLOCK_HASH(PP_PAGEROOT(opp)); 3979cb15d5d9SPeter Rival if (nidx < oidx) { 3980cb15d5d9SPeter Rival page_struct_lock(npp); 39817c478bd9Sstevel@tonic-gate page_struct_lock(opp); 3982cb15d5d9SPeter Rival } else if (oidx < nidx) { 3983cb15d5d9SPeter Rival page_struct_lock(opp); 3984cb15d5d9SPeter Rival page_struct_lock(npp); 3985cb15d5d9SPeter Rival } else { /* The pages hash to the same lock */ 3986cb15d5d9SPeter Rival page_struct_lock(npp); 3987cb15d5d9SPeter Rival } 39887c478bd9Sstevel@tonic-gate 39897c478bd9Sstevel@tonic-gate ASSERT(npp->p_cowcnt == 0); 39907c478bd9Sstevel@tonic-gate ASSERT(npp->p_lckcnt == 0); 39917c478bd9Sstevel@tonic-gate 39927c478bd9Sstevel@tonic-gate /* Don't use claim if nothing is locked (see page_pp_unlock above) */ 39937c478bd9Sstevel@tonic-gate if ((write_perm && opp->p_cowcnt != 0) || 39947c478bd9Sstevel@tonic-gate (!write_perm && opp->p_lckcnt != 0)) { 39957c478bd9Sstevel@tonic-gate 39967c478bd9Sstevel@tonic-gate if (write_perm) { 39977c478bd9Sstevel@tonic-gate npp->p_cowcnt++; 39987c478bd9Sstevel@tonic-gate ASSERT(opp->p_cowcnt != 0); 39997c478bd9Sstevel@tonic-gate opp->p_cowcnt--; 40007c478bd9Sstevel@tonic-gate } else { 40017c478bd9Sstevel@tonic-gate 40027c478bd9Sstevel@tonic-gate ASSERT(opp->p_lckcnt != 0); 40037c478bd9Sstevel@tonic-gate 40047c478bd9Sstevel@tonic-gate /* 40057c478bd9Sstevel@tonic-gate * We didn't need availrmem decremented if p_lckcnt on 40067c478bd9Sstevel@tonic-gate * original page is 1. Here, we are unlocking 40077c478bd9Sstevel@tonic-gate * read-only copy belonging to original page and 40087c478bd9Sstevel@tonic-gate * are locking a copy belonging to new page. 40097c478bd9Sstevel@tonic-gate */ 40107c478bd9Sstevel@tonic-gate if (opp->p_lckcnt == 1) 40117c478bd9Sstevel@tonic-gate payback = 1; 40127c478bd9Sstevel@tonic-gate 40137c478bd9Sstevel@tonic-gate npp->p_lckcnt++; 40147c478bd9Sstevel@tonic-gate opp->p_lckcnt--; 40157c478bd9Sstevel@tonic-gate } 40167c478bd9Sstevel@tonic-gate } 40177c478bd9Sstevel@tonic-gate if (payback) { 40187c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 40197c478bd9Sstevel@tonic-gate availrmem++; 40207c478bd9Sstevel@tonic-gate pages_useclaim--; 40217c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 40227c478bd9Sstevel@tonic-gate } 4023cb15d5d9SPeter Rival 4024cb15d5d9SPeter Rival if (nidx < oidx) { 40257c478bd9Sstevel@tonic-gate page_struct_unlock(opp); 4026cb15d5d9SPeter Rival page_struct_unlock(npp); 4027cb15d5d9SPeter Rival } else if (oidx < nidx) { 4028cb15d5d9SPeter Rival page_struct_unlock(npp); 4029cb15d5d9SPeter Rival page_struct_unlock(opp); 4030cb15d5d9SPeter Rival } else { /* The pages hash to the same lock */ 4031cb15d5d9SPeter Rival page_struct_unlock(npp); 4032cb15d5d9SPeter Rival } 40337c478bd9Sstevel@tonic-gate } 40347c478bd9Sstevel@tonic-gate 40357c478bd9Sstevel@tonic-gate /* 40367c478bd9Sstevel@tonic-gate * Simple claim adjust functions -- used to support changes in 40377c478bd9Sstevel@tonic-gate * claims due to changes in access permissions. Used by segvn_setprot(). 40387c478bd9Sstevel@tonic-gate */ 40397c478bd9Sstevel@tonic-gate int 40407c478bd9Sstevel@tonic-gate page_addclaim(page_t *pp) 40417c478bd9Sstevel@tonic-gate { 40427c478bd9Sstevel@tonic-gate int r = 0; /* result */ 40437c478bd9Sstevel@tonic-gate 40447c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp)); 40457c478bd9Sstevel@tonic-gate 40467c478bd9Sstevel@tonic-gate page_struct_lock(pp); 40477c478bd9Sstevel@tonic-gate ASSERT(pp->p_lckcnt != 0); 40487c478bd9Sstevel@tonic-gate 40497c478bd9Sstevel@tonic-gate if (pp->p_lckcnt == 1) { 40507c478bd9Sstevel@tonic-gate if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 40517c478bd9Sstevel@tonic-gate --pp->p_lckcnt; 40527c478bd9Sstevel@tonic-gate r = 1; 40537c478bd9Sstevel@tonic-gate if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 40547c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, 40557c478bd9Sstevel@tonic-gate "COW lock limit reached on pfn 0x%lx", 40567c478bd9Sstevel@tonic-gate page_pptonum(pp)); 40577c478bd9Sstevel@tonic-gate } 40587c478bd9Sstevel@tonic-gate } 40597c478bd9Sstevel@tonic-gate } else { 40607c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 40617c478bd9Sstevel@tonic-gate if ((availrmem > pages_pp_maximum) && 40627c478bd9Sstevel@tonic-gate (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 40637c478bd9Sstevel@tonic-gate --availrmem; 40647c478bd9Sstevel@tonic-gate ++pages_claimed; 40657c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 40667c478bd9Sstevel@tonic-gate --pp->p_lckcnt; 40677c478bd9Sstevel@tonic-gate r = 1; 40687c478bd9Sstevel@tonic-gate if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 40697c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, 40707c478bd9Sstevel@tonic-gate "COW lock limit reached on pfn 0x%lx", 40717c478bd9Sstevel@tonic-gate page_pptonum(pp)); 40727c478bd9Sstevel@tonic-gate } 40737c478bd9Sstevel@tonic-gate } else 40747c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 40757c478bd9Sstevel@tonic-gate } 40767c478bd9Sstevel@tonic-gate page_struct_unlock(pp); 40777c478bd9Sstevel@tonic-gate return (r); 40787c478bd9Sstevel@tonic-gate } 40797c478bd9Sstevel@tonic-gate 40807c478bd9Sstevel@tonic-gate int 40817c478bd9Sstevel@tonic-gate page_subclaim(page_t *pp) 40827c478bd9Sstevel@tonic-gate { 40837c478bd9Sstevel@tonic-gate int r = 0; 40847c478bd9Sstevel@tonic-gate 40857c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp)); 40867c478bd9Sstevel@tonic-gate 40877c478bd9Sstevel@tonic-gate page_struct_lock(pp); 40887c478bd9Sstevel@tonic-gate ASSERT(pp->p_cowcnt != 0); 40897c478bd9Sstevel@tonic-gate 40907c478bd9Sstevel@tonic-gate if (pp->p_lckcnt) { 40917c478bd9Sstevel@tonic-gate if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 40927c478bd9Sstevel@tonic-gate r = 1; 40937c478bd9Sstevel@tonic-gate /* 40947c478bd9Sstevel@tonic-gate * for availrmem 40957c478bd9Sstevel@tonic-gate */ 40967c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 40977c478bd9Sstevel@tonic-gate availrmem++; 40987c478bd9Sstevel@tonic-gate pages_claimed--; 40997c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 41007c478bd9Sstevel@tonic-gate 41017c478bd9Sstevel@tonic-gate pp->p_cowcnt--; 41027c478bd9Sstevel@tonic-gate 41037c478bd9Sstevel@tonic-gate if (++pp->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 41047c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, 41057c478bd9Sstevel@tonic-gate "Page lock limit reached on pfn 0x%lx", 41067c478bd9Sstevel@tonic-gate page_pptonum(pp)); 41077c478bd9Sstevel@tonic-gate } 41087c478bd9Sstevel@tonic-gate } 41097c478bd9Sstevel@tonic-gate } else { 41107c478bd9Sstevel@tonic-gate r = 1; 41117c478bd9Sstevel@tonic-gate pp->p_cowcnt--; 41127c478bd9Sstevel@tonic-gate pp->p_lckcnt++; 41137c478bd9Sstevel@tonic-gate } 41147c478bd9Sstevel@tonic-gate page_struct_unlock(pp); 41157c478bd9Sstevel@tonic-gate return (r); 41167c478bd9Sstevel@tonic-gate } 41177c478bd9Sstevel@tonic-gate 4118cb15d5d9SPeter Rival /* 4119cb15d5d9SPeter Rival * Variant of page_addclaim(), where ppa[] contains the pages of a single large 4120cb15d5d9SPeter Rival * page. 4121cb15d5d9SPeter Rival */ 41227c478bd9Sstevel@tonic-gate int 41237c478bd9Sstevel@tonic-gate page_addclaim_pages(page_t **ppa) 41247c478bd9Sstevel@tonic-gate { 41257c478bd9Sstevel@tonic-gate pgcnt_t lckpgs = 0, pg_idx; 41267c478bd9Sstevel@tonic-gate 41277c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_addclaim_pages); 41287c478bd9Sstevel@tonic-gate 4129cb15d5d9SPeter Rival /* 4130cb15d5d9SPeter Rival * Only need to take the page struct lock on the large page root. 4131cb15d5d9SPeter Rival */ 4132cb15d5d9SPeter Rival page_struct_lock(ppa[0]); 41337c478bd9Sstevel@tonic-gate for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 41347c478bd9Sstevel@tonic-gate 41357c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(ppa[pg_idx])); 41367c478bd9Sstevel@tonic-gate ASSERT(ppa[pg_idx]->p_lckcnt != 0); 41377c478bd9Sstevel@tonic-gate if (ppa[pg_idx]->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4138cb15d5d9SPeter Rival page_struct_unlock(ppa[0]); 41397c478bd9Sstevel@tonic-gate return (0); 41407c478bd9Sstevel@tonic-gate } 41417c478bd9Sstevel@tonic-gate if (ppa[pg_idx]->p_lckcnt > 1) 41427c478bd9Sstevel@tonic-gate lckpgs++; 41437c478bd9Sstevel@tonic-gate } 41447c478bd9Sstevel@tonic-gate 41457c478bd9Sstevel@tonic-gate if (lckpgs != 0) { 41467c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 41477c478bd9Sstevel@tonic-gate if (availrmem >= pages_pp_maximum + lckpgs) { 41487c478bd9Sstevel@tonic-gate availrmem -= lckpgs; 41497c478bd9Sstevel@tonic-gate pages_claimed += lckpgs; 41507c478bd9Sstevel@tonic-gate } else { 41517c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 4152cb15d5d9SPeter Rival page_struct_unlock(ppa[0]); 41537c478bd9Sstevel@tonic-gate return (0); 41547c478bd9Sstevel@tonic-gate } 41557c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 41567c478bd9Sstevel@tonic-gate } 41577c478bd9Sstevel@tonic-gate 41587c478bd9Sstevel@tonic-gate for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 41597c478bd9Sstevel@tonic-gate ppa[pg_idx]->p_lckcnt--; 41607c478bd9Sstevel@tonic-gate ppa[pg_idx]->p_cowcnt++; 41617c478bd9Sstevel@tonic-gate } 4162cb15d5d9SPeter Rival page_struct_unlock(ppa[0]); 41637c478bd9Sstevel@tonic-gate return (1); 41647c478bd9Sstevel@tonic-gate } 41657c478bd9Sstevel@tonic-gate 4166cb15d5d9SPeter Rival /* 4167cb15d5d9SPeter Rival * Variant of page_subclaim(), where ppa[] contains the pages of a single large 4168cb15d5d9SPeter Rival * page. 4169cb15d5d9SPeter Rival */ 41707c478bd9Sstevel@tonic-gate int 41717c478bd9Sstevel@tonic-gate page_subclaim_pages(page_t **ppa) 41727c478bd9Sstevel@tonic-gate { 41737c478bd9Sstevel@tonic-gate pgcnt_t ulckpgs = 0, pg_idx; 41747c478bd9Sstevel@tonic-gate 41757c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_subclaim_pages); 41767c478bd9Sstevel@tonic-gate 4177cb15d5d9SPeter Rival /* 4178cb15d5d9SPeter Rival * Only need to take the page struct lock on the large page root. 4179cb15d5d9SPeter Rival */ 4180cb15d5d9SPeter Rival page_struct_lock(ppa[0]); 41817c478bd9Sstevel@tonic-gate for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 41827c478bd9Sstevel@tonic-gate 41837c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(ppa[pg_idx])); 41847c478bd9Sstevel@tonic-gate ASSERT(ppa[pg_idx]->p_cowcnt != 0); 41857c478bd9Sstevel@tonic-gate if (ppa[pg_idx]->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4186cb15d5d9SPeter Rival page_struct_unlock(ppa[0]); 41877c478bd9Sstevel@tonic-gate return (0); 41887c478bd9Sstevel@tonic-gate } 41897c478bd9Sstevel@tonic-gate if (ppa[pg_idx]->p_lckcnt != 0) 41907c478bd9Sstevel@tonic-gate ulckpgs++; 41917c478bd9Sstevel@tonic-gate } 41927c478bd9Sstevel@tonic-gate 41937c478bd9Sstevel@tonic-gate if (ulckpgs != 0) { 41947c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 41957c478bd9Sstevel@tonic-gate availrmem += ulckpgs; 41967c478bd9Sstevel@tonic-gate pages_claimed -= ulckpgs; 41977c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 41987c478bd9Sstevel@tonic-gate } 41997c478bd9Sstevel@tonic-gate 42007c478bd9Sstevel@tonic-gate for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 42017c478bd9Sstevel@tonic-gate ppa[pg_idx]->p_cowcnt--; 42027c478bd9Sstevel@tonic-gate ppa[pg_idx]->p_lckcnt++; 42037c478bd9Sstevel@tonic-gate 42047c478bd9Sstevel@tonic-gate } 4205cb15d5d9SPeter Rival page_struct_unlock(ppa[0]); 42067c478bd9Sstevel@tonic-gate return (1); 42077c478bd9Sstevel@tonic-gate } 42087c478bd9Sstevel@tonic-gate 42097c478bd9Sstevel@tonic-gate page_t * 42107c478bd9Sstevel@tonic-gate page_numtopp(pfn_t pfnum, se_t se) 42117c478bd9Sstevel@tonic-gate { 42127c478bd9Sstevel@tonic-gate page_t *pp; 42137c478bd9Sstevel@tonic-gate 42147c478bd9Sstevel@tonic-gate retry: 42157c478bd9Sstevel@tonic-gate pp = page_numtopp_nolock(pfnum); 42167c478bd9Sstevel@tonic-gate if (pp == NULL) { 42177c478bd9Sstevel@tonic-gate return ((page_t *)NULL); 42187c478bd9Sstevel@tonic-gate } 42197c478bd9Sstevel@tonic-gate 42207c478bd9Sstevel@tonic-gate /* 42217c478bd9Sstevel@tonic-gate * Acquire the appropriate lock on the page. 42227c478bd9Sstevel@tonic-gate */ 42237c478bd9Sstevel@tonic-gate while (!page_lock(pp, se, (kmutex_t *)NULL, P_RECLAIM)) { 42247c478bd9Sstevel@tonic-gate if (page_pptonum(pp) != pfnum) 42257c478bd9Sstevel@tonic-gate goto retry; 42267c478bd9Sstevel@tonic-gate continue; 42277c478bd9Sstevel@tonic-gate } 42287c478bd9Sstevel@tonic-gate 42297c478bd9Sstevel@tonic-gate if (page_pptonum(pp) != pfnum) { 42307c478bd9Sstevel@tonic-gate page_unlock(pp); 42317c478bd9Sstevel@tonic-gate goto retry; 42327c478bd9Sstevel@tonic-gate } 42337c478bd9Sstevel@tonic-gate 42347c478bd9Sstevel@tonic-gate return (pp); 42357c478bd9Sstevel@tonic-gate } 42367c478bd9Sstevel@tonic-gate 42377c478bd9Sstevel@tonic-gate page_t * 42387c478bd9Sstevel@tonic-gate page_numtopp_noreclaim(pfn_t pfnum, se_t se) 42397c478bd9Sstevel@tonic-gate { 42407c478bd9Sstevel@tonic-gate page_t *pp; 42417c478bd9Sstevel@tonic-gate 42427c478bd9Sstevel@tonic-gate retry: 42437c478bd9Sstevel@tonic-gate pp = page_numtopp_nolock(pfnum); 42447c478bd9Sstevel@tonic-gate if (pp == NULL) { 42457c478bd9Sstevel@tonic-gate return ((page_t *)NULL); 42467c478bd9Sstevel@tonic-gate } 42477c478bd9Sstevel@tonic-gate 42487c478bd9Sstevel@tonic-gate /* 42497c478bd9Sstevel@tonic-gate * Acquire the appropriate lock on the page. 42507c478bd9Sstevel@tonic-gate */ 42517c478bd9Sstevel@tonic-gate while (!page_lock(pp, se, (kmutex_t *)NULL, P_NO_RECLAIM)) { 42527c478bd9Sstevel@tonic-gate if (page_pptonum(pp) != pfnum) 42537c478bd9Sstevel@tonic-gate goto retry; 42547c478bd9Sstevel@tonic-gate continue; 42557c478bd9Sstevel@tonic-gate } 42567c478bd9Sstevel@tonic-gate 42577c478bd9Sstevel@tonic-gate if (page_pptonum(pp) != pfnum) { 42587c478bd9Sstevel@tonic-gate page_unlock(pp); 42597c478bd9Sstevel@tonic-gate goto retry; 42607c478bd9Sstevel@tonic-gate } 42617c478bd9Sstevel@tonic-gate 42627c478bd9Sstevel@tonic-gate return (pp); 42637c478bd9Sstevel@tonic-gate } 42647c478bd9Sstevel@tonic-gate 42657c478bd9Sstevel@tonic-gate /* 42667c478bd9Sstevel@tonic-gate * This routine is like page_numtopp, but will only return page structs 42677c478bd9Sstevel@tonic-gate * for pages which are ok for loading into hardware using the page struct. 42687c478bd9Sstevel@tonic-gate */ 42697c478bd9Sstevel@tonic-gate page_t * 42707c478bd9Sstevel@tonic-gate page_numtopp_nowait(pfn_t pfnum, se_t se) 42717c478bd9Sstevel@tonic-gate { 42727c478bd9Sstevel@tonic-gate page_t *pp; 42737c478bd9Sstevel@tonic-gate 42747c478bd9Sstevel@tonic-gate retry: 42757c478bd9Sstevel@tonic-gate pp = page_numtopp_nolock(pfnum); 42767c478bd9Sstevel@tonic-gate if (pp == NULL) { 42777c478bd9Sstevel@tonic-gate return ((page_t *)NULL); 42787c478bd9Sstevel@tonic-gate } 42797c478bd9Sstevel@tonic-gate 42807c478bd9Sstevel@tonic-gate /* 42817c478bd9Sstevel@tonic-gate * Try to acquire the appropriate lock on the page. 42827c478bd9Sstevel@tonic-gate */ 42837c478bd9Sstevel@tonic-gate if (PP_ISFREE(pp)) 42847c478bd9Sstevel@tonic-gate pp = NULL; 42857c478bd9Sstevel@tonic-gate else { 42867c478bd9Sstevel@tonic-gate if (!page_trylock(pp, se)) 42877c478bd9Sstevel@tonic-gate pp = NULL; 42887c478bd9Sstevel@tonic-gate else { 42897c478bd9Sstevel@tonic-gate if (page_pptonum(pp) != pfnum) { 42907c478bd9Sstevel@tonic-gate page_unlock(pp); 42917c478bd9Sstevel@tonic-gate goto retry; 42927c478bd9Sstevel@tonic-gate } 42937c478bd9Sstevel@tonic-gate if (PP_ISFREE(pp)) { 42947c478bd9Sstevel@tonic-gate page_unlock(pp); 42957c478bd9Sstevel@tonic-gate pp = NULL; 42967c478bd9Sstevel@tonic-gate } 42977c478bd9Sstevel@tonic-gate } 42987c478bd9Sstevel@tonic-gate } 42997c478bd9Sstevel@tonic-gate return (pp); 43007c478bd9Sstevel@tonic-gate } 43017c478bd9Sstevel@tonic-gate 4302ca3e8d88SDave Plauger #define SYNC_PROGRESS_NPAGES 1000 4303ca3e8d88SDave Plauger 43047c478bd9Sstevel@tonic-gate /* 43057c478bd9Sstevel@tonic-gate * Returns a count of dirty pages that are in the process 43067c478bd9Sstevel@tonic-gate * of being written out. If 'cleanit' is set, try to push the page. 43077c478bd9Sstevel@tonic-gate */ 43087c478bd9Sstevel@tonic-gate pgcnt_t 43097c478bd9Sstevel@tonic-gate page_busy(int cleanit) 43107c478bd9Sstevel@tonic-gate { 43117c478bd9Sstevel@tonic-gate page_t *page0 = page_first(); 43127c478bd9Sstevel@tonic-gate page_t *pp = page0; 43137c478bd9Sstevel@tonic-gate pgcnt_t nppbusy = 0; 4314ca3e8d88SDave Plauger int counter = 0; 43157c478bd9Sstevel@tonic-gate u_offset_t off; 43167c478bd9Sstevel@tonic-gate 43177c478bd9Sstevel@tonic-gate do { 43187c478bd9Sstevel@tonic-gate vnode_t *vp = pp->p_vnode; 43197c478bd9Sstevel@tonic-gate 43207c478bd9Sstevel@tonic-gate /* 4321ca3e8d88SDave Plauger * Reset the sync timeout. The page list is very long 4322ca3e8d88SDave Plauger * on large memory systems. 4323ca3e8d88SDave Plauger */ 4324ca3e8d88SDave Plauger if (++counter > SYNC_PROGRESS_NPAGES) { 4325ca3e8d88SDave Plauger counter = 0; 4326ca3e8d88SDave Plauger vfs_syncprogress(); 4327ca3e8d88SDave Plauger } 4328ca3e8d88SDave Plauger 4329ca3e8d88SDave Plauger /* 43307c478bd9Sstevel@tonic-gate * A page is a candidate for syncing if it is: 43317c478bd9Sstevel@tonic-gate * 43327c478bd9Sstevel@tonic-gate * (a) On neither the freelist nor the cachelist 43337c478bd9Sstevel@tonic-gate * (b) Hashed onto a vnode 43347c478bd9Sstevel@tonic-gate * (c) Not a kernel page 43357c478bd9Sstevel@tonic-gate * (d) Dirty 43367c478bd9Sstevel@tonic-gate * (e) Not part of a swapfile 43377c478bd9Sstevel@tonic-gate * (f) a page which belongs to a real vnode; eg has a non-null 43387c478bd9Sstevel@tonic-gate * v_vfsp pointer. 43397c478bd9Sstevel@tonic-gate * (g) Backed by a filesystem which doesn't have a 43407c478bd9Sstevel@tonic-gate * stubbed-out sync operation 43417c478bd9Sstevel@tonic-gate */ 4342ad23a2dbSjohansen if (!PP_ISFREE(pp) && vp != NULL && !VN_ISKAS(vp) && 43437c478bd9Sstevel@tonic-gate hat_ismod(pp) && !IS_SWAPVP(vp) && vp->v_vfsp != NULL && 43447c478bd9Sstevel@tonic-gate vfs_can_sync(vp->v_vfsp)) { 43457c478bd9Sstevel@tonic-gate nppbusy++; 43467c478bd9Sstevel@tonic-gate 43477c478bd9Sstevel@tonic-gate if (!cleanit) 43487c478bd9Sstevel@tonic-gate continue; 43497c478bd9Sstevel@tonic-gate if (!page_trylock(pp, SE_EXCL)) 43507c478bd9Sstevel@tonic-gate continue; 43517c478bd9Sstevel@tonic-gate 43527c478bd9Sstevel@tonic-gate if (PP_ISFREE(pp) || vp == NULL || IS_SWAPVP(vp) || 43537c478bd9Sstevel@tonic-gate pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 43547c478bd9Sstevel@tonic-gate !(hat_pagesync(pp, 43557c478bd9Sstevel@tonic-gate HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) & P_MOD)) { 43567c478bd9Sstevel@tonic-gate page_unlock(pp); 43577c478bd9Sstevel@tonic-gate continue; 43587c478bd9Sstevel@tonic-gate } 43597c478bd9Sstevel@tonic-gate off = pp->p_offset; 43607c478bd9Sstevel@tonic-gate VN_HOLD(vp); 43617c478bd9Sstevel@tonic-gate page_unlock(pp); 43627c478bd9Sstevel@tonic-gate (void) VOP_PUTPAGE(vp, off, PAGESIZE, 4363da6c28aaSamw B_ASYNC | B_FREE, kcred, NULL); 43647c478bd9Sstevel@tonic-gate VN_RELE(vp); 43657c478bd9Sstevel@tonic-gate } 43667c478bd9Sstevel@tonic-gate } while ((pp = page_next(pp)) != page0); 43677c478bd9Sstevel@tonic-gate 4368ca3e8d88SDave Plauger vfs_syncprogress(); 43697c478bd9Sstevel@tonic-gate return (nppbusy); 43707c478bd9Sstevel@tonic-gate } 43717c478bd9Sstevel@tonic-gate 43727c478bd9Sstevel@tonic-gate void page_invalidate_pages(void); 43737c478bd9Sstevel@tonic-gate 43747c478bd9Sstevel@tonic-gate /* 43757c478bd9Sstevel@tonic-gate * callback handler to vm sub-system 43767c478bd9Sstevel@tonic-gate * 43777c478bd9Sstevel@tonic-gate * callers make sure no recursive entries to this func. 43787c478bd9Sstevel@tonic-gate */ 43797c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 43807c478bd9Sstevel@tonic-gate boolean_t 43817c478bd9Sstevel@tonic-gate callb_vm_cpr(void *arg, int code) 43827c478bd9Sstevel@tonic-gate { 43837c478bd9Sstevel@tonic-gate if (code == CB_CODE_CPR_CHKPT) 43847c478bd9Sstevel@tonic-gate page_invalidate_pages(); 43857c478bd9Sstevel@tonic-gate return (B_TRUE); 43867c478bd9Sstevel@tonic-gate } 43877c478bd9Sstevel@tonic-gate 43887c478bd9Sstevel@tonic-gate /* 43897c478bd9Sstevel@tonic-gate * Invalidate all pages of the system. 43907c478bd9Sstevel@tonic-gate * It shouldn't be called until all user page activities are all stopped. 43917c478bd9Sstevel@tonic-gate */ 43927c478bd9Sstevel@tonic-gate void 43937c478bd9Sstevel@tonic-gate page_invalidate_pages() 43947c478bd9Sstevel@tonic-gate { 43957c478bd9Sstevel@tonic-gate page_t *pp; 43967c478bd9Sstevel@tonic-gate page_t *page0; 43977c478bd9Sstevel@tonic-gate pgcnt_t nbusypages; 43987c478bd9Sstevel@tonic-gate int retry = 0; 43997c478bd9Sstevel@tonic-gate const int MAXRETRIES = 4; 44007c478bd9Sstevel@tonic-gate top: 44017c478bd9Sstevel@tonic-gate /* 44028b464eb8Smec * Flush dirty pages and destroy the clean ones. 44037c478bd9Sstevel@tonic-gate */ 44047c478bd9Sstevel@tonic-gate nbusypages = 0; 44057c478bd9Sstevel@tonic-gate 44067c478bd9Sstevel@tonic-gate pp = page0 = page_first(); 44077c478bd9Sstevel@tonic-gate do { 44087c478bd9Sstevel@tonic-gate struct vnode *vp; 44097c478bd9Sstevel@tonic-gate u_offset_t offset; 44107c478bd9Sstevel@tonic-gate int mod; 44117c478bd9Sstevel@tonic-gate 44127c478bd9Sstevel@tonic-gate /* 44137c478bd9Sstevel@tonic-gate * skip the page if it has no vnode or the page associated 44147c478bd9Sstevel@tonic-gate * with the kernel vnode or prom allocated kernel mem. 44157c478bd9Sstevel@tonic-gate */ 4416ad23a2dbSjohansen if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp)) 44177c478bd9Sstevel@tonic-gate continue; 44187c478bd9Sstevel@tonic-gate 44197c478bd9Sstevel@tonic-gate /* 44207c478bd9Sstevel@tonic-gate * skip the page which is already free invalidated. 44217c478bd9Sstevel@tonic-gate */ 44227c478bd9Sstevel@tonic-gate if (PP_ISFREE(pp) && PP_ISAGED(pp)) 44237c478bd9Sstevel@tonic-gate continue; 44247c478bd9Sstevel@tonic-gate 44257c478bd9Sstevel@tonic-gate /* 44267c478bd9Sstevel@tonic-gate * skip pages that are already locked or can't be "exclusively" 44277c478bd9Sstevel@tonic-gate * locked or are already free. After we lock the page, check 44282e0ea4c4SMichael Corcoran * the free and age bits again to be sure it's not destroyed 44297c478bd9Sstevel@tonic-gate * yet. 44307c478bd9Sstevel@tonic-gate * To achieve max. parallelization, we use page_trylock instead 44317c478bd9Sstevel@tonic-gate * of page_lock so that we don't get block on individual pages 44327c478bd9Sstevel@tonic-gate * while we have thousands of other pages to process. 44337c478bd9Sstevel@tonic-gate */ 44347c478bd9Sstevel@tonic-gate if (!page_trylock(pp, SE_EXCL)) { 44357c478bd9Sstevel@tonic-gate nbusypages++; 44367c478bd9Sstevel@tonic-gate continue; 44377c478bd9Sstevel@tonic-gate } else if (PP_ISFREE(pp)) { 44387c478bd9Sstevel@tonic-gate if (!PP_ISAGED(pp)) { 44397c478bd9Sstevel@tonic-gate page_destroy_free(pp); 44407c478bd9Sstevel@tonic-gate } else { 44417c478bd9Sstevel@tonic-gate page_unlock(pp); 44427c478bd9Sstevel@tonic-gate } 44437c478bd9Sstevel@tonic-gate continue; 44447c478bd9Sstevel@tonic-gate } 44457c478bd9Sstevel@tonic-gate /* 44467c478bd9Sstevel@tonic-gate * Is this page involved in some I/O? shared? 44477c478bd9Sstevel@tonic-gate * 44487c478bd9Sstevel@tonic-gate * The page_struct_lock need not be acquired to 44497c478bd9Sstevel@tonic-gate * examine these fields since the page has an 44507c478bd9Sstevel@tonic-gate * "exclusive" lock. 44517c478bd9Sstevel@tonic-gate */ 44527c478bd9Sstevel@tonic-gate if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 44537c478bd9Sstevel@tonic-gate page_unlock(pp); 44547c478bd9Sstevel@tonic-gate continue; 44557c478bd9Sstevel@tonic-gate } 44567c478bd9Sstevel@tonic-gate 44577c478bd9Sstevel@tonic-gate if (vp->v_type == VCHR) { 44587c478bd9Sstevel@tonic-gate panic("vp->v_type == VCHR"); 44597c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 44607c478bd9Sstevel@tonic-gate } 44617c478bd9Sstevel@tonic-gate 44627c478bd9Sstevel@tonic-gate if (!page_try_demote_pages(pp)) { 44637c478bd9Sstevel@tonic-gate page_unlock(pp); 44647c478bd9Sstevel@tonic-gate continue; 44657c478bd9Sstevel@tonic-gate } 44667c478bd9Sstevel@tonic-gate 44677c478bd9Sstevel@tonic-gate /* 44687c478bd9Sstevel@tonic-gate * Check the modified bit. Leave the bits alone in hardware 44697c478bd9Sstevel@tonic-gate * (they will be modified if we do the putpage). 44707c478bd9Sstevel@tonic-gate */ 44717c478bd9Sstevel@tonic-gate mod = (hat_pagesync(pp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) 44727c478bd9Sstevel@tonic-gate & P_MOD); 44737c478bd9Sstevel@tonic-gate if (mod) { 44747c478bd9Sstevel@tonic-gate offset = pp->p_offset; 44757c478bd9Sstevel@tonic-gate /* 44767c478bd9Sstevel@tonic-gate * Hold the vnode before releasing the page lock 44777c478bd9Sstevel@tonic-gate * to prevent it from being freed and re-used by 44787c478bd9Sstevel@tonic-gate * some other thread. 44797c478bd9Sstevel@tonic-gate */ 44807c478bd9Sstevel@tonic-gate VN_HOLD(vp); 44817c478bd9Sstevel@tonic-gate page_unlock(pp); 44827c478bd9Sstevel@tonic-gate /* 44837c478bd9Sstevel@tonic-gate * No error return is checked here. Callers such as 44847c478bd9Sstevel@tonic-gate * cpr deals with the dirty pages at the dump time 44857c478bd9Sstevel@tonic-gate * if this putpage fails. 44867c478bd9Sstevel@tonic-gate */ 44877c478bd9Sstevel@tonic-gate (void) VOP_PUTPAGE(vp, offset, PAGESIZE, B_INVAL, 4488da6c28aaSamw kcred, NULL); 44897c478bd9Sstevel@tonic-gate VN_RELE(vp); 44907c478bd9Sstevel@tonic-gate } else { 44912e0ea4c4SMichael Corcoran /*LINTED: constant in conditional context*/ 44922e0ea4c4SMichael Corcoran VN_DISPOSE(pp, B_INVAL, 0, kcred); 44937c478bd9Sstevel@tonic-gate } 44947c478bd9Sstevel@tonic-gate } while ((pp = page_next(pp)) != page0); 44957c478bd9Sstevel@tonic-gate if (nbusypages && retry++ < MAXRETRIES) { 44967c478bd9Sstevel@tonic-gate delay(1); 44977c478bd9Sstevel@tonic-gate goto top; 44987c478bd9Sstevel@tonic-gate } 44997c478bd9Sstevel@tonic-gate } 45007c478bd9Sstevel@tonic-gate 45017c478bd9Sstevel@tonic-gate /* 45027c478bd9Sstevel@tonic-gate * Replace the page "old" with the page "new" on the page hash and vnode lists 45037c478bd9Sstevel@tonic-gate * 4504da6c28aaSamw * the replacement must be done in place, ie the equivalent sequence: 45057c478bd9Sstevel@tonic-gate * 45067c478bd9Sstevel@tonic-gate * vp = old->p_vnode; 45077c478bd9Sstevel@tonic-gate * off = old->p_offset; 45087c478bd9Sstevel@tonic-gate * page_do_hashout(old) 45097c478bd9Sstevel@tonic-gate * page_do_hashin(new, vp, off) 45107c478bd9Sstevel@tonic-gate * 45117c478bd9Sstevel@tonic-gate * doesn't work, since 45127c478bd9Sstevel@tonic-gate * 1) if old is the only page on the vnode, the v_pages list has a window 45137c478bd9Sstevel@tonic-gate * where it looks empty. This will break file system assumptions. 45147c478bd9Sstevel@tonic-gate * and 45157c478bd9Sstevel@tonic-gate * 2) pvn_vplist_dirty() can't deal with pages moving on the v_pages list. 45167c478bd9Sstevel@tonic-gate */ 45177c478bd9Sstevel@tonic-gate static void 45187c478bd9Sstevel@tonic-gate page_do_relocate_hash(page_t *new, page_t *old) 45197c478bd9Sstevel@tonic-gate { 45207c478bd9Sstevel@tonic-gate page_t **hash_list; 45217c478bd9Sstevel@tonic-gate vnode_t *vp = old->p_vnode; 45227c478bd9Sstevel@tonic-gate kmutex_t *sep; 45237c478bd9Sstevel@tonic-gate 45247c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(old)); 45257c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(new)); 45267c478bd9Sstevel@tonic-gate ASSERT(vp != NULL); 45277c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 45287c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, old->p_offset)))); 45297c478bd9Sstevel@tonic-gate 45307c478bd9Sstevel@tonic-gate /* 45317c478bd9Sstevel@tonic-gate * First find old page on the page hash list 45327c478bd9Sstevel@tonic-gate */ 45337c478bd9Sstevel@tonic-gate hash_list = &page_hash[PAGE_HASH_FUNC(vp, old->p_offset)]; 45347c478bd9Sstevel@tonic-gate 45357c478bd9Sstevel@tonic-gate for (;;) { 45367c478bd9Sstevel@tonic-gate if (*hash_list == old) 45377c478bd9Sstevel@tonic-gate break; 45387c478bd9Sstevel@tonic-gate if (*hash_list == NULL) { 45397c478bd9Sstevel@tonic-gate panic("page_do_hashout"); 45407c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 45417c478bd9Sstevel@tonic-gate } 45427c478bd9Sstevel@tonic-gate hash_list = &(*hash_list)->p_hash; 45437c478bd9Sstevel@tonic-gate } 45447c478bd9Sstevel@tonic-gate 45457c478bd9Sstevel@tonic-gate /* 45467c478bd9Sstevel@tonic-gate * update new and replace old with new on the page hash list 45477c478bd9Sstevel@tonic-gate */ 45487c478bd9Sstevel@tonic-gate new->p_vnode = old->p_vnode; 45497c478bd9Sstevel@tonic-gate new->p_offset = old->p_offset; 45507c478bd9Sstevel@tonic-gate new->p_hash = old->p_hash; 45517c478bd9Sstevel@tonic-gate *hash_list = new; 45527c478bd9Sstevel@tonic-gate 45537c478bd9Sstevel@tonic-gate if ((new->p_vnode->v_flag & VISSWAP) != 0) 45547c478bd9Sstevel@tonic-gate PP_SETSWAP(new); 45557c478bd9Sstevel@tonic-gate 45567c478bd9Sstevel@tonic-gate /* 45577c478bd9Sstevel@tonic-gate * replace old with new on the vnode's page list 45587c478bd9Sstevel@tonic-gate */ 45597c478bd9Sstevel@tonic-gate if (old->p_vpnext == old) { 45607c478bd9Sstevel@tonic-gate new->p_vpnext = new; 45617c478bd9Sstevel@tonic-gate new->p_vpprev = new; 45627c478bd9Sstevel@tonic-gate } else { 45637c478bd9Sstevel@tonic-gate new->p_vpnext = old->p_vpnext; 45647c478bd9Sstevel@tonic-gate new->p_vpprev = old->p_vpprev; 45657c478bd9Sstevel@tonic-gate new->p_vpnext->p_vpprev = new; 45667c478bd9Sstevel@tonic-gate new->p_vpprev->p_vpnext = new; 45677c478bd9Sstevel@tonic-gate } 45687c478bd9Sstevel@tonic-gate if (vp->v_pages == old) 45697c478bd9Sstevel@tonic-gate vp->v_pages = new; 45707c478bd9Sstevel@tonic-gate 45717c478bd9Sstevel@tonic-gate /* 45727c478bd9Sstevel@tonic-gate * clear out the old page 45737c478bd9Sstevel@tonic-gate */ 45747c478bd9Sstevel@tonic-gate old->p_hash = NULL; 45757c478bd9Sstevel@tonic-gate old->p_vpnext = NULL; 45767c478bd9Sstevel@tonic-gate old->p_vpprev = NULL; 45777c478bd9Sstevel@tonic-gate old->p_vnode = NULL; 45787c478bd9Sstevel@tonic-gate PP_CLRSWAP(old); 45797c478bd9Sstevel@tonic-gate old->p_offset = (u_offset_t)-1; 45809d0d62adSJason Beloro page_clr_all_props(old); 45817c478bd9Sstevel@tonic-gate 45827c478bd9Sstevel@tonic-gate /* 45837c478bd9Sstevel@tonic-gate * Wake up processes waiting for this page. The page's 45847c478bd9Sstevel@tonic-gate * identity has been changed, and is probably not the 45857c478bd9Sstevel@tonic-gate * desired page any longer. 45867c478bd9Sstevel@tonic-gate */ 45877c478bd9Sstevel@tonic-gate sep = page_se_mutex(old); 45887c478bd9Sstevel@tonic-gate mutex_enter(sep); 458942787a71Sstans old->p_selock &= ~SE_EWANTED; 45907c478bd9Sstevel@tonic-gate if (CV_HAS_WAITERS(&old->p_cv)) 45917c478bd9Sstevel@tonic-gate cv_broadcast(&old->p_cv); 45927c478bd9Sstevel@tonic-gate mutex_exit(sep); 45937c478bd9Sstevel@tonic-gate } 45947c478bd9Sstevel@tonic-gate 45957c478bd9Sstevel@tonic-gate /* 45967c478bd9Sstevel@tonic-gate * This function moves the identity of page "pp_old" to page "pp_new". 45977c478bd9Sstevel@tonic-gate * Both pages must be locked on entry. "pp_new" is free, has no identity, 45987c478bd9Sstevel@tonic-gate * and need not be hashed out from anywhere. 45997c478bd9Sstevel@tonic-gate */ 46007c478bd9Sstevel@tonic-gate void 46017c478bd9Sstevel@tonic-gate page_relocate_hash(page_t *pp_new, page_t *pp_old) 46027c478bd9Sstevel@tonic-gate { 46037c478bd9Sstevel@tonic-gate vnode_t *vp = pp_old->p_vnode; 46047c478bd9Sstevel@tonic-gate u_offset_t off = pp_old->p_offset; 46057c478bd9Sstevel@tonic-gate kmutex_t *phm, *vphm; 46067c478bd9Sstevel@tonic-gate 46077c478bd9Sstevel@tonic-gate /* 46087c478bd9Sstevel@tonic-gate * Rehash two pages 46097c478bd9Sstevel@tonic-gate */ 46107c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp_old)); 46117c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp_new)); 46127c478bd9Sstevel@tonic-gate ASSERT(vp != NULL); 46137c478bd9Sstevel@tonic-gate ASSERT(pp_new->p_vnode == NULL); 46147c478bd9Sstevel@tonic-gate 46157c478bd9Sstevel@tonic-gate /* 46167c478bd9Sstevel@tonic-gate * hashout then hashin while holding the mutexes 46177c478bd9Sstevel@tonic-gate */ 46187c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, off)); 46197c478bd9Sstevel@tonic-gate mutex_enter(phm); 46207c478bd9Sstevel@tonic-gate vphm = page_vnode_mutex(vp); 46217c478bd9Sstevel@tonic-gate mutex_enter(vphm); 46227c478bd9Sstevel@tonic-gate 46237c478bd9Sstevel@tonic-gate page_do_relocate_hash(pp_new, pp_old); 46247c478bd9Sstevel@tonic-gate 4625c7531c7fSPrakash Sangappa /* The following comment preserved from page_flip(). */ 4626c7531c7fSPrakash Sangappa pp_new->p_fsdata = pp_old->p_fsdata; 4627c7531c7fSPrakash Sangappa pp_old->p_fsdata = 0; 46287c478bd9Sstevel@tonic-gate mutex_exit(vphm); 46297c478bd9Sstevel@tonic-gate mutex_exit(phm); 46307c478bd9Sstevel@tonic-gate 46317c478bd9Sstevel@tonic-gate /* 46327c478bd9Sstevel@tonic-gate * The page_struct_lock need not be acquired for lckcnt and 46337c478bd9Sstevel@tonic-gate * cowcnt since the page has an "exclusive" lock. 46347c478bd9Sstevel@tonic-gate */ 46357c478bd9Sstevel@tonic-gate ASSERT(pp_new->p_lckcnt == 0); 46367c478bd9Sstevel@tonic-gate ASSERT(pp_new->p_cowcnt == 0); 46377c478bd9Sstevel@tonic-gate pp_new->p_lckcnt = pp_old->p_lckcnt; 46387c478bd9Sstevel@tonic-gate pp_new->p_cowcnt = pp_old->p_cowcnt; 46397c478bd9Sstevel@tonic-gate pp_old->p_lckcnt = pp_old->p_cowcnt = 0; 46407c478bd9Sstevel@tonic-gate 46417c478bd9Sstevel@tonic-gate } 46427c478bd9Sstevel@tonic-gate 46437c478bd9Sstevel@tonic-gate /* 46447c478bd9Sstevel@tonic-gate * Helper routine used to lock all remaining members of a 46457c478bd9Sstevel@tonic-gate * large page. The caller is responsible for passing in a locked 46467c478bd9Sstevel@tonic-gate * pp. If pp is a large page, then it succeeds in locking all the 46477c478bd9Sstevel@tonic-gate * remaining constituent pages or it returns with only the 46487c478bd9Sstevel@tonic-gate * original page locked. 46497c478bd9Sstevel@tonic-gate * 46507c478bd9Sstevel@tonic-gate * Returns 1 on success, 0 on failure. 46517c478bd9Sstevel@tonic-gate * 4652da6c28aaSamw * If success is returned this routine guarantees p_szc for all constituent 46537c478bd9Sstevel@tonic-gate * pages of a large page pp belongs to can't change. To achieve this we 46547c478bd9Sstevel@tonic-gate * recheck szc of pp after locking all constituent pages and retry if szc 46557c478bd9Sstevel@tonic-gate * changed (it could only decrease). Since hat_page_demote() needs an EXCL 46567c478bd9Sstevel@tonic-gate * lock on one of constituent pages it can't be running after all constituent 46577c478bd9Sstevel@tonic-gate * pages are locked. hat_page_demote() with a lock on a constituent page 46587c478bd9Sstevel@tonic-gate * outside of this large page (i.e. pp belonged to a larger large page) is 46597c478bd9Sstevel@tonic-gate * already done with all constituent pages of pp since the root's p_szc is 4660da6c28aaSamw * changed last. Therefore no need to synchronize with hat_page_demote() that 46617c478bd9Sstevel@tonic-gate * locked a constituent page outside of pp's current large page. 46627c478bd9Sstevel@tonic-gate */ 46637c478bd9Sstevel@tonic-gate #ifdef DEBUG 46647c478bd9Sstevel@tonic-gate uint32_t gpg_trylock_mtbf = 0; 46657c478bd9Sstevel@tonic-gate #endif 46667c478bd9Sstevel@tonic-gate 46677c478bd9Sstevel@tonic-gate int 46687c478bd9Sstevel@tonic-gate group_page_trylock(page_t *pp, se_t se) 46697c478bd9Sstevel@tonic-gate { 46707c478bd9Sstevel@tonic-gate page_t *tpp; 46717c478bd9Sstevel@tonic-gate pgcnt_t npgs, i, j; 46727c478bd9Sstevel@tonic-gate uint_t pszc = pp->p_szc; 46737c478bd9Sstevel@tonic-gate 46747c478bd9Sstevel@tonic-gate #ifdef DEBUG 46757c478bd9Sstevel@tonic-gate if (gpg_trylock_mtbf && !(gethrtime() % gpg_trylock_mtbf)) { 46767c478bd9Sstevel@tonic-gate return (0); 46777c478bd9Sstevel@tonic-gate } 46787c478bd9Sstevel@tonic-gate #endif 46797c478bd9Sstevel@tonic-gate 46807c478bd9Sstevel@tonic-gate if (pp != PP_GROUPLEADER(pp, pszc)) { 46817c478bd9Sstevel@tonic-gate return (0); 46827c478bd9Sstevel@tonic-gate } 46837c478bd9Sstevel@tonic-gate 46847c478bd9Sstevel@tonic-gate retry: 46857c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED_SE(pp, se)); 46867c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 46877c478bd9Sstevel@tonic-gate if (pszc == 0) { 46887c478bd9Sstevel@tonic-gate return (1); 46897c478bd9Sstevel@tonic-gate } 46907c478bd9Sstevel@tonic-gate npgs = page_get_pagecnt(pszc); 46917c478bd9Sstevel@tonic-gate tpp = pp + 1; 46927c478bd9Sstevel@tonic-gate for (i = 1; i < npgs; i++, tpp++) { 46937c478bd9Sstevel@tonic-gate if (!page_trylock(tpp, se)) { 46947c478bd9Sstevel@tonic-gate tpp = pp + 1; 46957c478bd9Sstevel@tonic-gate for (j = 1; j < i; j++, tpp++) { 46967c478bd9Sstevel@tonic-gate page_unlock(tpp); 46977c478bd9Sstevel@tonic-gate } 46987c478bd9Sstevel@tonic-gate return (0); 46997c478bd9Sstevel@tonic-gate } 47007c478bd9Sstevel@tonic-gate } 47017c478bd9Sstevel@tonic-gate if (pp->p_szc != pszc) { 47027c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc < pszc); 4703ad23a2dbSjohansen ASSERT(pp->p_vnode != NULL && !PP_ISKAS(pp) && 47047c478bd9Sstevel@tonic-gate !IS_SWAPFSVP(pp->p_vnode)); 47057c478bd9Sstevel@tonic-gate tpp = pp + 1; 47067c478bd9Sstevel@tonic-gate for (i = 1; i < npgs; i++, tpp++) { 47077c478bd9Sstevel@tonic-gate page_unlock(tpp); 47087c478bd9Sstevel@tonic-gate } 47097c478bd9Sstevel@tonic-gate pszc = pp->p_szc; 47107c478bd9Sstevel@tonic-gate goto retry; 47117c478bd9Sstevel@tonic-gate } 47127c478bd9Sstevel@tonic-gate return (1); 47137c478bd9Sstevel@tonic-gate } 47147c478bd9Sstevel@tonic-gate 47157c478bd9Sstevel@tonic-gate void 47167c478bd9Sstevel@tonic-gate group_page_unlock(page_t *pp) 47177c478bd9Sstevel@tonic-gate { 47187c478bd9Sstevel@tonic-gate page_t *tpp; 47197c478bd9Sstevel@tonic-gate pgcnt_t npgs, i; 47207c478bd9Sstevel@tonic-gate 47217c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp)); 47227c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 47237c478bd9Sstevel@tonic-gate ASSERT(pp == PP_PAGEROOT(pp)); 47247c478bd9Sstevel@tonic-gate npgs = page_get_pagecnt(pp->p_szc); 47257c478bd9Sstevel@tonic-gate for (i = 1, tpp = pp + 1; i < npgs; i++, tpp++) { 47267c478bd9Sstevel@tonic-gate page_unlock(tpp); 47277c478bd9Sstevel@tonic-gate } 47287c478bd9Sstevel@tonic-gate } 47297c478bd9Sstevel@tonic-gate 47307c478bd9Sstevel@tonic-gate /* 47317c478bd9Sstevel@tonic-gate * returns 47327c478bd9Sstevel@tonic-gate * 0 : on success and *nrelocp is number of relocated PAGESIZE pages 47337c478bd9Sstevel@tonic-gate * ERANGE : this is not a base page 47347c478bd9Sstevel@tonic-gate * EBUSY : failure to get locks on the page/pages 47357c478bd9Sstevel@tonic-gate * ENOMEM : failure to obtain replacement pages 47367c478bd9Sstevel@tonic-gate * EAGAIN : OBP has not yet completed its boot-time handoff to the kernel 47378b464eb8Smec * EIO : An error occurred while trying to copy the page data 47387c478bd9Sstevel@tonic-gate * 47397c478bd9Sstevel@tonic-gate * Return with all constituent members of target and replacement 47407c478bd9Sstevel@tonic-gate * SE_EXCL locked. It is the callers responsibility to drop the 47417c478bd9Sstevel@tonic-gate * locks. 47427c478bd9Sstevel@tonic-gate */ 47437c478bd9Sstevel@tonic-gate int 47447c478bd9Sstevel@tonic-gate do_page_relocate( 47457c478bd9Sstevel@tonic-gate page_t **target, 47467c478bd9Sstevel@tonic-gate page_t **replacement, 47477c478bd9Sstevel@tonic-gate int grouplock, 47487c478bd9Sstevel@tonic-gate spgcnt_t *nrelocp, 47497c478bd9Sstevel@tonic-gate lgrp_t *lgrp) 47507c478bd9Sstevel@tonic-gate { 47517c478bd9Sstevel@tonic-gate page_t *first_repl; 47527c478bd9Sstevel@tonic-gate page_t *repl; 47537c478bd9Sstevel@tonic-gate page_t *targ; 47547c478bd9Sstevel@tonic-gate page_t *pl = NULL; 47557c478bd9Sstevel@tonic-gate uint_t ppattr; 47567c478bd9Sstevel@tonic-gate pfn_t pfn, repl_pfn; 47577c478bd9Sstevel@tonic-gate uint_t szc; 47587c478bd9Sstevel@tonic-gate spgcnt_t npgs, i; 47597c478bd9Sstevel@tonic-gate int repl_contig = 0; 47607c478bd9Sstevel@tonic-gate uint_t flags = 0; 47617c478bd9Sstevel@tonic-gate spgcnt_t dofree = 0; 47627c478bd9Sstevel@tonic-gate 47637c478bd9Sstevel@tonic-gate *nrelocp = 0; 47647c478bd9Sstevel@tonic-gate 47657c478bd9Sstevel@tonic-gate #if defined(__sparc) 47667c478bd9Sstevel@tonic-gate /* 47677c478bd9Sstevel@tonic-gate * We need to wait till OBP has completed 47687c478bd9Sstevel@tonic-gate * its boot-time handoff of its resources to the kernel 47697c478bd9Sstevel@tonic-gate * before we allow page relocation 47707c478bd9Sstevel@tonic-gate */ 47717c478bd9Sstevel@tonic-gate if (page_relocate_ready == 0) { 47727c478bd9Sstevel@tonic-gate return (EAGAIN); 47737c478bd9Sstevel@tonic-gate } 47747c478bd9Sstevel@tonic-gate #endif 47757c478bd9Sstevel@tonic-gate 47767c478bd9Sstevel@tonic-gate /* 47777c478bd9Sstevel@tonic-gate * If this is not a base page, 47787c478bd9Sstevel@tonic-gate * just return with 0x0 pages relocated. 47797c478bd9Sstevel@tonic-gate */ 47807c478bd9Sstevel@tonic-gate targ = *target; 47817c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(targ)); 47827c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(targ)); 47837c478bd9Sstevel@tonic-gate szc = targ->p_szc; 47847c478bd9Sstevel@tonic-gate ASSERT(szc < mmu_page_sizes); 47857c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 47867c478bd9Sstevel@tonic-gate pfn = targ->p_pagenum; 47877c478bd9Sstevel@tonic-gate if (pfn != PFN_BASE(pfn, szc)) { 47887c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_relocnoroot[szc]); 47897c478bd9Sstevel@tonic-gate return (ERANGE); 47907c478bd9Sstevel@tonic-gate } 47917c478bd9Sstevel@tonic-gate 47927c478bd9Sstevel@tonic-gate if ((repl = *replacement) != NULL && repl->p_szc >= szc) { 47937c478bd9Sstevel@tonic-gate repl_pfn = repl->p_pagenum; 47947c478bd9Sstevel@tonic-gate if (repl_pfn != PFN_BASE(repl_pfn, szc)) { 47957c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_reloc_replnoroot[szc]); 47967c478bd9Sstevel@tonic-gate return (ERANGE); 47977c478bd9Sstevel@tonic-gate } 47987c478bd9Sstevel@tonic-gate repl_contig = 1; 47997c478bd9Sstevel@tonic-gate } 48007c478bd9Sstevel@tonic-gate 48017c478bd9Sstevel@tonic-gate /* 48027c478bd9Sstevel@tonic-gate * We must lock all members of this large page or we cannot 48037c478bd9Sstevel@tonic-gate * relocate any part of it. 48047c478bd9Sstevel@tonic-gate */ 48057c478bd9Sstevel@tonic-gate if (grouplock != 0 && !group_page_trylock(targ, SE_EXCL)) { 48067c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_relocnolock[targ->p_szc]); 48077c478bd9Sstevel@tonic-gate return (EBUSY); 48087c478bd9Sstevel@tonic-gate } 48097c478bd9Sstevel@tonic-gate 48107c478bd9Sstevel@tonic-gate /* 48117c478bd9Sstevel@tonic-gate * reread szc it could have been decreased before 48127c478bd9Sstevel@tonic-gate * group_page_trylock() was done. 48137c478bd9Sstevel@tonic-gate */ 48147c478bd9Sstevel@tonic-gate szc = targ->p_szc; 48157c478bd9Sstevel@tonic-gate ASSERT(szc < mmu_page_sizes); 48167c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 48177c478bd9Sstevel@tonic-gate ASSERT(pfn == PFN_BASE(pfn, szc)); 48187c478bd9Sstevel@tonic-gate 48197c478bd9Sstevel@tonic-gate npgs = page_get_pagecnt(targ->p_szc); 48207c478bd9Sstevel@tonic-gate 48217c478bd9Sstevel@tonic-gate if (repl == NULL) { 48227c478bd9Sstevel@tonic-gate dofree = npgs; /* Size of target page in MMU pages */ 48237c478bd9Sstevel@tonic-gate if (!page_create_wait(dofree, 0)) { 48247c478bd9Sstevel@tonic-gate if (grouplock != 0) { 48257c478bd9Sstevel@tonic-gate group_page_unlock(targ); 48267c478bd9Sstevel@tonic-gate } 48277c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 48287c478bd9Sstevel@tonic-gate return (ENOMEM); 48297c478bd9Sstevel@tonic-gate } 48307c478bd9Sstevel@tonic-gate 48317c478bd9Sstevel@tonic-gate /* 48327c478bd9Sstevel@tonic-gate * seg kmem pages require that the target and replacement 48337c478bd9Sstevel@tonic-gate * page be the same pagesize. 48347c478bd9Sstevel@tonic-gate */ 4835ad23a2dbSjohansen flags = (VN_ISKAS(targ->p_vnode)) ? PGR_SAMESZC : 0; 48367c478bd9Sstevel@tonic-gate repl = page_get_replacement_page(targ, lgrp, flags); 48377c478bd9Sstevel@tonic-gate if (repl == NULL) { 48387c478bd9Sstevel@tonic-gate if (grouplock != 0) { 48397c478bd9Sstevel@tonic-gate group_page_unlock(targ); 48407c478bd9Sstevel@tonic-gate } 48417c478bd9Sstevel@tonic-gate page_create_putback(dofree); 48427c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 48437c478bd9Sstevel@tonic-gate return (ENOMEM); 48447c478bd9Sstevel@tonic-gate } 48457c478bd9Sstevel@tonic-gate } 48467c478bd9Sstevel@tonic-gate #ifdef DEBUG 48477c478bd9Sstevel@tonic-gate else { 48487c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(repl)); 48497c478bd9Sstevel@tonic-gate } 48507c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 48517c478bd9Sstevel@tonic-gate 48527c478bd9Sstevel@tonic-gate #if defined(__sparc) 48537c478bd9Sstevel@tonic-gate /* 48547c478bd9Sstevel@tonic-gate * Let hat_page_relocate() complete the relocation if it's kernel page 48557c478bd9Sstevel@tonic-gate */ 4856ad23a2dbSjohansen if (VN_ISKAS(targ->p_vnode)) { 48577c478bd9Sstevel@tonic-gate *replacement = repl; 48587c478bd9Sstevel@tonic-gate if (hat_page_relocate(target, replacement, nrelocp) != 0) { 48597c478bd9Sstevel@tonic-gate if (grouplock != 0) { 48607c478bd9Sstevel@tonic-gate group_page_unlock(targ); 48617c478bd9Sstevel@tonic-gate } 48627c478bd9Sstevel@tonic-gate if (dofree) { 48637c478bd9Sstevel@tonic-gate *replacement = NULL; 48647c478bd9Sstevel@tonic-gate page_free_replacement_page(repl); 48657c478bd9Sstevel@tonic-gate page_create_putback(dofree); 48667c478bd9Sstevel@tonic-gate } 48677c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_krelocfail[szc]); 48687c478bd9Sstevel@tonic-gate return (EAGAIN); 48697c478bd9Sstevel@tonic-gate } 48707c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 48717c478bd9Sstevel@tonic-gate return (0); 48727c478bd9Sstevel@tonic-gate } 48737c478bd9Sstevel@tonic-gate #else 48747c478bd9Sstevel@tonic-gate #if defined(lint) 48757c478bd9Sstevel@tonic-gate dofree = dofree; 48767c478bd9Sstevel@tonic-gate #endif 48777c478bd9Sstevel@tonic-gate #endif 48787c478bd9Sstevel@tonic-gate 48797c478bd9Sstevel@tonic-gate first_repl = repl; 48807c478bd9Sstevel@tonic-gate 48817c478bd9Sstevel@tonic-gate for (i = 0; i < npgs; i++) { 48827c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(targ)); 488307b65a64Saguzovsk ASSERT(targ->p_slckcnt == 0); 488407b65a64Saguzovsk ASSERT(repl->p_slckcnt == 0); 48857c478bd9Sstevel@tonic-gate 48867c478bd9Sstevel@tonic-gate (void) hat_pageunload(targ, HAT_FORCE_PGUNLOAD); 48877c478bd9Sstevel@tonic-gate 48887c478bd9Sstevel@tonic-gate ASSERT(hat_page_getshare(targ) == 0); 48897c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(targ)); 48907c478bd9Sstevel@tonic-gate ASSERT(targ->p_pagenum == (pfn + i)); 48917c478bd9Sstevel@tonic-gate ASSERT(repl_contig == 0 || 48927c478bd9Sstevel@tonic-gate repl->p_pagenum == (repl_pfn + i)); 48937c478bd9Sstevel@tonic-gate 48947c478bd9Sstevel@tonic-gate /* 48957c478bd9Sstevel@tonic-gate * Copy the page contents and attributes then 48967c478bd9Sstevel@tonic-gate * relocate the page in the page hash. 48977c478bd9Sstevel@tonic-gate */ 48988b464eb8Smec if (ppcopy(targ, repl) == 0) { 48998b464eb8Smec targ = *target; 49008b464eb8Smec repl = first_repl; 49018b464eb8Smec VM_STAT_ADD(vmm_vmstats.ppr_copyfail); 49028b464eb8Smec if (grouplock != 0) { 49038b464eb8Smec group_page_unlock(targ); 49048b464eb8Smec } 49058b464eb8Smec if (dofree) { 49068b464eb8Smec *replacement = NULL; 49078b464eb8Smec page_free_replacement_page(repl); 49088b464eb8Smec page_create_putback(dofree); 49098b464eb8Smec } 49108b464eb8Smec return (EIO); 49118b464eb8Smec } 49128b464eb8Smec 49138b464eb8Smec targ++; 49148b464eb8Smec if (repl_contig != 0) { 49158b464eb8Smec repl++; 49168b464eb8Smec } else { 49178b464eb8Smec repl = repl->p_next; 49188b464eb8Smec } 49198b464eb8Smec } 49208b464eb8Smec 49218b464eb8Smec repl = first_repl; 49228b464eb8Smec targ = *target; 49238b464eb8Smec 49248b464eb8Smec for (i = 0; i < npgs; i++) { 49257c478bd9Sstevel@tonic-gate ppattr = hat_page_getattr(targ, (P_MOD | P_REF | P_RO)); 49269d0d62adSJason Beloro page_clr_all_props(repl); 49277c478bd9Sstevel@tonic-gate page_set_props(repl, ppattr); 49287c478bd9Sstevel@tonic-gate page_relocate_hash(repl, targ); 49297c478bd9Sstevel@tonic-gate 49307c478bd9Sstevel@tonic-gate ASSERT(hat_page_getshare(targ) == 0); 49317c478bd9Sstevel@tonic-gate ASSERT(hat_page_getshare(repl) == 0); 49327c478bd9Sstevel@tonic-gate /* 49337c478bd9Sstevel@tonic-gate * Now clear the props on targ, after the 49347c478bd9Sstevel@tonic-gate * page_relocate_hash(), they no longer 49357c478bd9Sstevel@tonic-gate * have any meaning. 49367c478bd9Sstevel@tonic-gate */ 49379d0d62adSJason Beloro page_clr_all_props(targ); 49387c478bd9Sstevel@tonic-gate ASSERT(targ->p_next == targ); 49397c478bd9Sstevel@tonic-gate ASSERT(targ->p_prev == targ); 49407c478bd9Sstevel@tonic-gate page_list_concat(&pl, &targ); 49417c478bd9Sstevel@tonic-gate 49427c478bd9Sstevel@tonic-gate targ++; 49437c478bd9Sstevel@tonic-gate if (repl_contig != 0) { 49447c478bd9Sstevel@tonic-gate repl++; 49457c478bd9Sstevel@tonic-gate } else { 49467c478bd9Sstevel@tonic-gate repl = repl->p_next; 49477c478bd9Sstevel@tonic-gate } 49487c478bd9Sstevel@tonic-gate } 49497c478bd9Sstevel@tonic-gate /* assert that we have come full circle with repl */ 49507c478bd9Sstevel@tonic-gate ASSERT(repl_contig == 1 || first_repl == repl); 49517c478bd9Sstevel@tonic-gate 49527c478bd9Sstevel@tonic-gate *target = pl; 49537c478bd9Sstevel@tonic-gate if (*replacement == NULL) { 49547c478bd9Sstevel@tonic-gate ASSERT(first_repl == repl); 49557c478bd9Sstevel@tonic-gate *replacement = repl; 49567c478bd9Sstevel@tonic-gate } 49577c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 49587c478bd9Sstevel@tonic-gate *nrelocp = npgs; 49597c478bd9Sstevel@tonic-gate return (0); 49607c478bd9Sstevel@tonic-gate } 49617c478bd9Sstevel@tonic-gate /* 49627c478bd9Sstevel@tonic-gate * On success returns 0 and *nrelocp the number of PAGESIZE pages relocated. 49637c478bd9Sstevel@tonic-gate */ 49647c478bd9Sstevel@tonic-gate int 49657c478bd9Sstevel@tonic-gate page_relocate( 49667c478bd9Sstevel@tonic-gate page_t **target, 49677c478bd9Sstevel@tonic-gate page_t **replacement, 49687c478bd9Sstevel@tonic-gate int grouplock, 49697c478bd9Sstevel@tonic-gate int freetarget, 49707c478bd9Sstevel@tonic-gate spgcnt_t *nrelocp, 49717c478bd9Sstevel@tonic-gate lgrp_t *lgrp) 49727c478bd9Sstevel@tonic-gate { 49737c478bd9Sstevel@tonic-gate spgcnt_t ret; 49747c478bd9Sstevel@tonic-gate 49757c478bd9Sstevel@tonic-gate /* do_page_relocate returns 0 on success or errno value */ 49767c478bd9Sstevel@tonic-gate ret = do_page_relocate(target, replacement, grouplock, nrelocp, lgrp); 49777c478bd9Sstevel@tonic-gate 49787c478bd9Sstevel@tonic-gate if (ret != 0 || freetarget == 0) { 49797c478bd9Sstevel@tonic-gate return (ret); 49807c478bd9Sstevel@tonic-gate } 49817c478bd9Sstevel@tonic-gate if (*nrelocp == 1) { 49827c478bd9Sstevel@tonic-gate ASSERT(*target != NULL); 49837c478bd9Sstevel@tonic-gate page_free(*target, 1); 49847c478bd9Sstevel@tonic-gate } else { 49857c478bd9Sstevel@tonic-gate page_t *tpp = *target; 49867c478bd9Sstevel@tonic-gate uint_t szc = tpp->p_szc; 49877c478bd9Sstevel@tonic-gate pgcnt_t npgs = page_get_pagecnt(szc); 49887c478bd9Sstevel@tonic-gate ASSERT(npgs > 1); 49897c478bd9Sstevel@tonic-gate ASSERT(szc != 0); 49907c478bd9Sstevel@tonic-gate do { 49917c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(tpp)); 49927c478bd9Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(tpp)); 49937c478bd9Sstevel@tonic-gate ASSERT(tpp->p_szc == szc); 49947c478bd9Sstevel@tonic-gate PP_SETFREE(tpp); 49957c478bd9Sstevel@tonic-gate PP_SETAGED(tpp); 49967c478bd9Sstevel@tonic-gate npgs--; 49977c478bd9Sstevel@tonic-gate } while ((tpp = tpp->p_next) != *target); 49987c478bd9Sstevel@tonic-gate ASSERT(npgs == 0); 49997c478bd9Sstevel@tonic-gate page_list_add_pages(*target, 0); 50007c478bd9Sstevel@tonic-gate npgs = page_get_pagecnt(szc); 50017c478bd9Sstevel@tonic-gate page_create_putback(npgs); 50027c478bd9Sstevel@tonic-gate } 50037c478bd9Sstevel@tonic-gate return (ret); 50047c478bd9Sstevel@tonic-gate } 50057c478bd9Sstevel@tonic-gate 50067c478bd9Sstevel@tonic-gate /* 50077c478bd9Sstevel@tonic-gate * it is up to the caller to deal with pcf accounting. 50087c478bd9Sstevel@tonic-gate */ 50097c478bd9Sstevel@tonic-gate void 50107c478bd9Sstevel@tonic-gate page_free_replacement_page(page_t *pplist) 50117c478bd9Sstevel@tonic-gate { 50127c478bd9Sstevel@tonic-gate page_t *pp; 50137c478bd9Sstevel@tonic-gate 50147c478bd9Sstevel@tonic-gate while (pplist != NULL) { 50157c478bd9Sstevel@tonic-gate /* 50167c478bd9Sstevel@tonic-gate * pp_targ is a linked list. 50177c478bd9Sstevel@tonic-gate */ 50187c478bd9Sstevel@tonic-gate pp = pplist; 50197c478bd9Sstevel@tonic-gate if (pp->p_szc == 0) { 50207c478bd9Sstevel@tonic-gate page_sub(&pplist, pp); 50219d0d62adSJason Beloro page_clr_all_props(pp); 50227c478bd9Sstevel@tonic-gate PP_SETFREE(pp); 50237c478bd9Sstevel@tonic-gate PP_SETAGED(pp); 50247c478bd9Sstevel@tonic-gate page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 50257c478bd9Sstevel@tonic-gate page_unlock(pp); 50267c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_replacement_page[0]); 50277c478bd9Sstevel@tonic-gate } else { 50287c478bd9Sstevel@tonic-gate spgcnt_t curnpgs = page_get_pagecnt(pp->p_szc); 50297c478bd9Sstevel@tonic-gate page_t *tpp; 50307c478bd9Sstevel@tonic-gate page_list_break(&pp, &pplist, curnpgs); 50317c478bd9Sstevel@tonic-gate tpp = pp; 50327c478bd9Sstevel@tonic-gate do { 50337c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(tpp)); 50347c478bd9Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(tpp)); 50359d0d62adSJason Beloro page_clr_all_props(tpp); 50367c478bd9Sstevel@tonic-gate PP_SETFREE(tpp); 50377c478bd9Sstevel@tonic-gate PP_SETAGED(tpp); 50387c478bd9Sstevel@tonic-gate } while ((tpp = tpp->p_next) != pp); 50397c478bd9Sstevel@tonic-gate page_list_add_pages(pp, 0); 50407c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_replacement_page[1]); 50417c478bd9Sstevel@tonic-gate } 50427c478bd9Sstevel@tonic-gate } 50437c478bd9Sstevel@tonic-gate } 50447c478bd9Sstevel@tonic-gate 50457c478bd9Sstevel@tonic-gate /* 50467c478bd9Sstevel@tonic-gate * Relocate target to non-relocatable replacement page. 50477c478bd9Sstevel@tonic-gate */ 50487c478bd9Sstevel@tonic-gate int 50497c478bd9Sstevel@tonic-gate page_relocate_cage(page_t **target, page_t **replacement) 50507c478bd9Sstevel@tonic-gate { 50517c478bd9Sstevel@tonic-gate page_t *tpp, *rpp; 50527c478bd9Sstevel@tonic-gate spgcnt_t pgcnt, npgs; 50537c478bd9Sstevel@tonic-gate int result; 50547c478bd9Sstevel@tonic-gate 50557c478bd9Sstevel@tonic-gate tpp = *target; 50567c478bd9Sstevel@tonic-gate 50577c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(tpp)); 50587c478bd9Sstevel@tonic-gate ASSERT(tpp->p_szc == 0); 50597c478bd9Sstevel@tonic-gate 50607c478bd9Sstevel@tonic-gate pgcnt = btop(page_get_pagesize(tpp->p_szc)); 50617c478bd9Sstevel@tonic-gate 50627c478bd9Sstevel@tonic-gate do { 50637c478bd9Sstevel@tonic-gate (void) page_create_wait(pgcnt, PG_WAIT | PG_NORELOC); 50647c478bd9Sstevel@tonic-gate rpp = page_get_replacement_page(tpp, NULL, PGR_NORELOC); 50657c478bd9Sstevel@tonic-gate if (rpp == NULL) { 50667c478bd9Sstevel@tonic-gate page_create_putback(pgcnt); 50677c478bd9Sstevel@tonic-gate kcage_cageout_wakeup(); 50687c478bd9Sstevel@tonic-gate } 50697c478bd9Sstevel@tonic-gate } while (rpp == NULL); 50707c478bd9Sstevel@tonic-gate 50717c478bd9Sstevel@tonic-gate ASSERT(PP_ISNORELOC(rpp)); 50727c478bd9Sstevel@tonic-gate 50737c478bd9Sstevel@tonic-gate result = page_relocate(&tpp, &rpp, 0, 1, &npgs, NULL); 50747c478bd9Sstevel@tonic-gate 50757c478bd9Sstevel@tonic-gate if (result == 0) { 50767c478bd9Sstevel@tonic-gate *replacement = rpp; 50777c478bd9Sstevel@tonic-gate if (pgcnt != npgs) 50787c478bd9Sstevel@tonic-gate panic("page_relocate_cage: partial relocation"); 50797c478bd9Sstevel@tonic-gate } 50807c478bd9Sstevel@tonic-gate 50817c478bd9Sstevel@tonic-gate return (result); 50827c478bd9Sstevel@tonic-gate } 50837c478bd9Sstevel@tonic-gate 50847c478bd9Sstevel@tonic-gate /* 50857c478bd9Sstevel@tonic-gate * Release the page lock on a page, place on cachelist 50867c478bd9Sstevel@tonic-gate * tail if no longer mapped. Caller can let us know if 50877c478bd9Sstevel@tonic-gate * the page is known to be clean. 50887c478bd9Sstevel@tonic-gate */ 50897c478bd9Sstevel@tonic-gate int 50907c478bd9Sstevel@tonic-gate page_release(page_t *pp, int checkmod) 50917c478bd9Sstevel@tonic-gate { 50927c478bd9Sstevel@tonic-gate int status; 50937c478bd9Sstevel@tonic-gate 50947c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp) && !PP_ISFREE(pp) && 50957c478bd9Sstevel@tonic-gate (pp->p_vnode != NULL)); 50967c478bd9Sstevel@tonic-gate 50977c478bd9Sstevel@tonic-gate if (!hat_page_is_mapped(pp) && !IS_SWAPVP(pp->p_vnode) && 50987c478bd9Sstevel@tonic-gate ((PAGE_SHARED(pp) && page_tryupgrade(pp)) || PAGE_EXCL(pp)) && 50997c478bd9Sstevel@tonic-gate pp->p_lckcnt == 0 && pp->p_cowcnt == 0 && 51007c478bd9Sstevel@tonic-gate !hat_page_is_mapped(pp)) { 51017c478bd9Sstevel@tonic-gate 51027c478bd9Sstevel@tonic-gate /* 51037c478bd9Sstevel@tonic-gate * If page is modified, unlock it 51047c478bd9Sstevel@tonic-gate * 51057c478bd9Sstevel@tonic-gate * (p_nrm & P_MOD) bit has the latest stuff because: 51067c478bd9Sstevel@tonic-gate * (1) We found that this page doesn't have any mappings 51077c478bd9Sstevel@tonic-gate * _after_ holding SE_EXCL and 51087c478bd9Sstevel@tonic-gate * (2) We didn't drop SE_EXCL lock after the check in (1) 51097c478bd9Sstevel@tonic-gate */ 51107c478bd9Sstevel@tonic-gate if (checkmod && hat_ismod(pp)) { 51117c478bd9Sstevel@tonic-gate page_unlock(pp); 51127c478bd9Sstevel@tonic-gate status = PGREL_MOD; 51137c478bd9Sstevel@tonic-gate } else { 51147c478bd9Sstevel@tonic-gate /*LINTED: constant in conditional context*/ 51157c478bd9Sstevel@tonic-gate VN_DISPOSE(pp, B_FREE, 0, kcred); 51167c478bd9Sstevel@tonic-gate status = PGREL_CLEAN; 51177c478bd9Sstevel@tonic-gate } 51187c478bd9Sstevel@tonic-gate } else { 51197c478bd9Sstevel@tonic-gate page_unlock(pp); 51207c478bd9Sstevel@tonic-gate status = PGREL_NOTREL; 51217c478bd9Sstevel@tonic-gate } 51227c478bd9Sstevel@tonic-gate return (status); 51237c478bd9Sstevel@tonic-gate } 51247c478bd9Sstevel@tonic-gate 5125db874c57Selowe /* 5126db874c57Selowe * Given a constituent page, try to demote the large page on the freelist. 5127db874c57Selowe * 5128db874c57Selowe * Returns nonzero if the page could be demoted successfully. Returns with 5129db874c57Selowe * the constituent page still locked. 5130db874c57Selowe */ 5131db874c57Selowe int 5132db874c57Selowe page_try_demote_free_pages(page_t *pp) 5133db874c57Selowe { 5134db874c57Selowe page_t *rootpp = pp; 5135db874c57Selowe pfn_t pfn = page_pptonum(pp); 5136db874c57Selowe spgcnt_t npgs; 5137db874c57Selowe uint_t szc = pp->p_szc; 5138db874c57Selowe 5139db874c57Selowe ASSERT(PP_ISFREE(pp)); 5140db874c57Selowe ASSERT(PAGE_EXCL(pp)); 5141db874c57Selowe 5142db874c57Selowe /* 5143db874c57Selowe * Adjust rootpp and lock it, if `pp' is not the base 5144db874c57Selowe * constituent page. 5145db874c57Selowe */ 5146db874c57Selowe npgs = page_get_pagecnt(pp->p_szc); 5147db874c57Selowe if (npgs == 1) { 5148db874c57Selowe return (0); 5149db874c57Selowe } 5150db874c57Selowe 5151db874c57Selowe if (!IS_P2ALIGNED(pfn, npgs)) { 5152db874c57Selowe pfn = P2ALIGN(pfn, npgs); 5153db874c57Selowe rootpp = page_numtopp_nolock(pfn); 5154db874c57Selowe } 5155db874c57Selowe 5156db874c57Selowe if (pp != rootpp && !page_trylock(rootpp, SE_EXCL)) { 5157db874c57Selowe return (0); 5158db874c57Selowe } 5159db874c57Selowe 5160db874c57Selowe if (rootpp->p_szc != szc) { 5161db874c57Selowe if (pp != rootpp) 5162db874c57Selowe page_unlock(rootpp); 5163db874c57Selowe return (0); 5164db874c57Selowe } 5165db874c57Selowe 5166db874c57Selowe page_demote_free_pages(rootpp); 5167db874c57Selowe 5168db874c57Selowe if (pp != rootpp) 5169db874c57Selowe page_unlock(rootpp); 5170db874c57Selowe 5171db874c57Selowe ASSERT(PP_ISFREE(pp)); 5172db874c57Selowe ASSERT(PAGE_EXCL(pp)); 5173db874c57Selowe return (1); 5174db874c57Selowe } 5175db874c57Selowe 5176db874c57Selowe /* 5177db874c57Selowe * Given a constituent page, try to demote the large page. 5178db874c57Selowe * 5179db874c57Selowe * Returns nonzero if the page could be demoted successfully. Returns with 5180db874c57Selowe * the constituent page still locked. 5181db874c57Selowe */ 51827c478bd9Sstevel@tonic-gate int 51837c478bd9Sstevel@tonic-gate page_try_demote_pages(page_t *pp) 51847c478bd9Sstevel@tonic-gate { 51857c478bd9Sstevel@tonic-gate page_t *tpp, *rootpp = pp; 51867c478bd9Sstevel@tonic-gate pfn_t pfn = page_pptonum(pp); 51877c478bd9Sstevel@tonic-gate spgcnt_t i, npgs; 5188f045d8d6SAmritpal Sandhu uint_t szc = pp->p_szc; 5189d94ffb28Sjmcp vnode_t *vp = pp->p_vnode; 51907c478bd9Sstevel@tonic-gate 5191db874c57Selowe ASSERT(PAGE_EXCL(pp)); 51927c478bd9Sstevel@tonic-gate 51937c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_try_demote_pages[0]); 51947c478bd9Sstevel@tonic-gate 5195db874c57Selowe if (pp->p_szc == 0) { 51967c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_try_demote_pages[1]); 51977c478bd9Sstevel@tonic-gate return (1); 51987c478bd9Sstevel@tonic-gate } 51997c478bd9Sstevel@tonic-gate 5200ad23a2dbSjohansen if (vp != NULL && !IS_SWAPFSVP(vp) && !VN_ISKAS(vp)) { 52017c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_try_demote_pages[2]); 5202db874c57Selowe page_demote_vp_pages(pp); 52037c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 52047c478bd9Sstevel@tonic-gate return (1); 52057c478bd9Sstevel@tonic-gate } 52067c478bd9Sstevel@tonic-gate 52077c478bd9Sstevel@tonic-gate /* 52087c478bd9Sstevel@tonic-gate * Adjust rootpp if passed in is not the base 52097c478bd9Sstevel@tonic-gate * constituent page. 52107c478bd9Sstevel@tonic-gate */ 5211db874c57Selowe npgs = page_get_pagecnt(pp->p_szc); 52127c478bd9Sstevel@tonic-gate ASSERT(npgs > 1); 52137c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(pfn, npgs)) { 52147c478bd9Sstevel@tonic-gate pfn = P2ALIGN(pfn, npgs); 52157c478bd9Sstevel@tonic-gate rootpp = page_numtopp_nolock(pfn); 52167c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_try_demote_pages[3]); 52177c478bd9Sstevel@tonic-gate ASSERT(rootpp->p_vnode != NULL); 52187c478bd9Sstevel@tonic-gate ASSERT(rootpp->p_szc == szc); 52197c478bd9Sstevel@tonic-gate } 52207c478bd9Sstevel@tonic-gate 52217c478bd9Sstevel@tonic-gate /* 52227c478bd9Sstevel@tonic-gate * We can't demote kernel pages since we can't hat_unload() 52237c478bd9Sstevel@tonic-gate * the mappings. 52247c478bd9Sstevel@tonic-gate */ 5225ad23a2dbSjohansen if (VN_ISKAS(rootpp->p_vnode)) 52267c478bd9Sstevel@tonic-gate return (0); 52277c478bd9Sstevel@tonic-gate 52287c478bd9Sstevel@tonic-gate /* 52297c478bd9Sstevel@tonic-gate * Attempt to lock all constituent pages except the page passed 52307c478bd9Sstevel@tonic-gate * in since it's already locked. 52317c478bd9Sstevel@tonic-gate */ 5232affbd3ccSkchow for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 52337c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(tpp)); 52347c478bd9Sstevel@tonic-gate ASSERT(tpp->p_vnode != NULL); 52357c478bd9Sstevel@tonic-gate 52367c478bd9Sstevel@tonic-gate if (tpp != pp && !page_trylock(tpp, SE_EXCL)) 52377c478bd9Sstevel@tonic-gate break; 52387c478bd9Sstevel@tonic-gate ASSERT(tpp->p_szc == rootpp->p_szc); 52397c478bd9Sstevel@tonic-gate ASSERT(page_pptonum(tpp) == page_pptonum(rootpp) + i); 52407c478bd9Sstevel@tonic-gate } 52417c478bd9Sstevel@tonic-gate 52427c478bd9Sstevel@tonic-gate /* 5243db874c57Selowe * If we failed to lock them all then unlock what we have 5244db874c57Selowe * locked so far and bail. 52457c478bd9Sstevel@tonic-gate */ 52467c478bd9Sstevel@tonic-gate if (i < npgs) { 52477c478bd9Sstevel@tonic-gate tpp = rootpp; 52487c478bd9Sstevel@tonic-gate while (i-- > 0) { 52497c478bd9Sstevel@tonic-gate if (tpp != pp) 52507c478bd9Sstevel@tonic-gate page_unlock(tpp); 5251affbd3ccSkchow tpp++; 52527c478bd9Sstevel@tonic-gate } 52537c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_try_demote_pages[4]); 52547c478bd9Sstevel@tonic-gate return (0); 52557c478bd9Sstevel@tonic-gate } 52567c478bd9Sstevel@tonic-gate 5257affbd3ccSkchow for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 52587c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(tpp)); 525907b65a64Saguzovsk ASSERT(tpp->p_slckcnt == 0); 5260db874c57Selowe (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 52617c478bd9Sstevel@tonic-gate tpp->p_szc = 0; 52627c478bd9Sstevel@tonic-gate } 52637c478bd9Sstevel@tonic-gate 52647c478bd9Sstevel@tonic-gate /* 52657c478bd9Sstevel@tonic-gate * Unlock all pages except the page passed in. 52667c478bd9Sstevel@tonic-gate */ 5267affbd3ccSkchow for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 52687c478bd9Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(tpp)); 52697c478bd9Sstevel@tonic-gate if (tpp != pp) 52707c478bd9Sstevel@tonic-gate page_unlock(tpp); 52717c478bd9Sstevel@tonic-gate } 5272db874c57Selowe 52737c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_try_demote_pages[5]); 52747c478bd9Sstevel@tonic-gate return (1); 52757c478bd9Sstevel@tonic-gate } 52767c478bd9Sstevel@tonic-gate 52777c478bd9Sstevel@tonic-gate /* 52787c478bd9Sstevel@tonic-gate * Called by page_free() and page_destroy() to demote the page size code 52797c478bd9Sstevel@tonic-gate * (p_szc) to 0 (since we can't just put a single PAGESIZE page with non zero 52807c478bd9Sstevel@tonic-gate * p_szc on free list, neither can we just clear p_szc of a single page_t 52817c478bd9Sstevel@tonic-gate * within a large page since it will break other code that relies on p_szc 52827c478bd9Sstevel@tonic-gate * being the same for all page_t's of a large page). Anonymous pages should 52837c478bd9Sstevel@tonic-gate * never end up here because anon_map_getpages() cannot deal with p_szc 52847c478bd9Sstevel@tonic-gate * changes after a single constituent page is locked. While anonymous or 52857c478bd9Sstevel@tonic-gate * kernel large pages are demoted or freed the entire large page at a time 52867c478bd9Sstevel@tonic-gate * with all constituent pages locked EXCL for the file system pages we 52877c478bd9Sstevel@tonic-gate * have to be able to demote a large page (i.e. decrease all constituent pages 52887c478bd9Sstevel@tonic-gate * p_szc) with only just an EXCL lock on one of constituent pages. The reason 52897c478bd9Sstevel@tonic-gate * we can easily deal with anonymous page demotion the entire large page at a 52907c478bd9Sstevel@tonic-gate * time is that those operation originate at address space level and concern 52917c478bd9Sstevel@tonic-gate * the entire large page region with actual demotion only done when pages are 52927c478bd9Sstevel@tonic-gate * not shared with any other processes (therefore we can always get EXCL lock 52937c478bd9Sstevel@tonic-gate * on all anonymous constituent pages after clearing segment page 52947c478bd9Sstevel@tonic-gate * cache). However file system pages can be truncated or invalidated at a 52957c478bd9Sstevel@tonic-gate * PAGESIZE level from the file system side and end up in page_free() or 52967c478bd9Sstevel@tonic-gate * page_destroy() (we also allow only part of the large page to be SOFTLOCKed 5297da6c28aaSamw * and therefore pageout should be able to demote a large page by EXCL locking 52987c478bd9Sstevel@tonic-gate * any constituent page that is not under SOFTLOCK). In those cases we cannot 52997c478bd9Sstevel@tonic-gate * rely on being able to lock EXCL all constituent pages. 53007c478bd9Sstevel@tonic-gate * 53017c478bd9Sstevel@tonic-gate * To prevent szc changes on file system pages one has to lock all constituent 53027c478bd9Sstevel@tonic-gate * pages at least SHARED (or call page_szc_lock()). The only subsystem that 53037c478bd9Sstevel@tonic-gate * doesn't rely on locking all constituent pages (or using page_szc_lock()) to 53047c478bd9Sstevel@tonic-gate * prevent szc changes is hat layer that uses its own page level mlist 53057c478bd9Sstevel@tonic-gate * locks. hat assumes that szc doesn't change after mlist lock for a page is 53067c478bd9Sstevel@tonic-gate * taken. Therefore we need to change szc under hat level locks if we only 53077c478bd9Sstevel@tonic-gate * have an EXCL lock on a single constituent page and hat still references any 53087c478bd9Sstevel@tonic-gate * of constituent pages. (Note we can't "ignore" hat layer by simply 53097c478bd9Sstevel@tonic-gate * hat_pageunload() all constituent pages without having EXCL locks on all of 53107c478bd9Sstevel@tonic-gate * constituent pages). We use hat_page_demote() call to safely demote szc of 53117c478bd9Sstevel@tonic-gate * all constituent pages under hat locks when we only have an EXCL lock on one 53127c478bd9Sstevel@tonic-gate * of constituent pages. 53137c478bd9Sstevel@tonic-gate * 53147c478bd9Sstevel@tonic-gate * This routine calls page_szc_lock() before calling hat_page_demote() to 53157c478bd9Sstevel@tonic-gate * allow segvn in one special case not to lock all constituent pages SHARED 5316da6c28aaSamw * before calling hat_memload_array() that relies on p_szc not changing even 53177c478bd9Sstevel@tonic-gate * before hat level mlist lock is taken. In that case segvn uses 5318da6c28aaSamw * page_szc_lock() to prevent hat_page_demote() changing p_szc values. 53197c478bd9Sstevel@tonic-gate * 53207c478bd9Sstevel@tonic-gate * Anonymous or kernel page demotion still has to lock all pages exclusively 53217c478bd9Sstevel@tonic-gate * and do hat_pageunload() on all constituent pages before demoting the page 53227c478bd9Sstevel@tonic-gate * therefore there's no need for anonymous or kernel page demotion to use 53237c478bd9Sstevel@tonic-gate * hat_page_demote() mechanism. 53247c478bd9Sstevel@tonic-gate * 53257c478bd9Sstevel@tonic-gate * hat_page_demote() removes all large mappings that map pp and then decreases 53267c478bd9Sstevel@tonic-gate * p_szc starting from the last constituent page of the large page. By working 53277c478bd9Sstevel@tonic-gate * from the tail of a large page in pfn decreasing order allows one looking at 53287c478bd9Sstevel@tonic-gate * the root page to know that hat_page_demote() is done for root's szc area. 53297c478bd9Sstevel@tonic-gate * e.g. if a root page has szc 1 one knows it only has to lock all constituent 53307c478bd9Sstevel@tonic-gate * pages within szc 1 area to prevent szc changes because hat_page_demote() 53317c478bd9Sstevel@tonic-gate * that started on this page when it had szc > 1 is done for this szc 1 area. 53327c478bd9Sstevel@tonic-gate * 5333da6c28aaSamw * We are guaranteed that all constituent pages of pp's large page belong to 53347c478bd9Sstevel@tonic-gate * the same vnode with the consecutive offsets increasing in the direction of 53357c478bd9Sstevel@tonic-gate * the pfn i.e. the identity of constituent pages can't change until their 53367c478bd9Sstevel@tonic-gate * p_szc is decreased. Therefore it's safe for hat_page_demote() to remove 53377c478bd9Sstevel@tonic-gate * large mappings to pp even though we don't lock any constituent page except 53387c478bd9Sstevel@tonic-gate * pp (i.e. we won't unload e.g. kernel locked page). 53397c478bd9Sstevel@tonic-gate */ 53407c478bd9Sstevel@tonic-gate static void 53417c478bd9Sstevel@tonic-gate page_demote_vp_pages(page_t *pp) 53427c478bd9Sstevel@tonic-gate { 53437c478bd9Sstevel@tonic-gate kmutex_t *mtx; 53447c478bd9Sstevel@tonic-gate 53457c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 53467c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 53477c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode != NULL); 53487c478bd9Sstevel@tonic-gate ASSERT(!IS_SWAPFSVP(pp->p_vnode)); 5349ad23a2dbSjohansen ASSERT(!PP_ISKAS(pp)); 53507c478bd9Sstevel@tonic-gate 53517c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_demote_pages[0]); 53527c478bd9Sstevel@tonic-gate 53537c478bd9Sstevel@tonic-gate mtx = page_szc_lock(pp); 53547c478bd9Sstevel@tonic-gate if (mtx != NULL) { 53557c478bd9Sstevel@tonic-gate hat_page_demote(pp); 53567c478bd9Sstevel@tonic-gate mutex_exit(mtx); 53577c478bd9Sstevel@tonic-gate } 53587c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc == 0); 53597c478bd9Sstevel@tonic-gate } 53607c478bd9Sstevel@tonic-gate 53617c478bd9Sstevel@tonic-gate /* 53627c478bd9Sstevel@tonic-gate * Mark any existing pages for migration in the given range 53637c478bd9Sstevel@tonic-gate */ 53647c478bd9Sstevel@tonic-gate void 53657c478bd9Sstevel@tonic-gate page_mark_migrate(struct seg *seg, caddr_t addr, size_t len, 53667c478bd9Sstevel@tonic-gate struct anon_map *amp, ulong_t anon_index, vnode_t *vp, 53677c478bd9Sstevel@tonic-gate u_offset_t vnoff, int rflag) 53687c478bd9Sstevel@tonic-gate { 53697c478bd9Sstevel@tonic-gate struct anon *ap; 53707c478bd9Sstevel@tonic-gate vnode_t *curvp; 53717c478bd9Sstevel@tonic-gate lgrp_t *from; 53727c478bd9Sstevel@tonic-gate pgcnt_t nlocked; 53737c478bd9Sstevel@tonic-gate u_offset_t off; 53747c478bd9Sstevel@tonic-gate pfn_t pfn; 53757c478bd9Sstevel@tonic-gate size_t pgsz; 53767c478bd9Sstevel@tonic-gate size_t segpgsz; 53777c478bd9Sstevel@tonic-gate pgcnt_t pages; 53787c478bd9Sstevel@tonic-gate uint_t pszc; 53795c16be9bSDonghai Qiao page_t *pp0, *pp; 53807c478bd9Sstevel@tonic-gate caddr_t va; 53817c478bd9Sstevel@tonic-gate ulong_t an_idx; 53827c478bd9Sstevel@tonic-gate anon_sync_obj_t cookie; 53837c478bd9Sstevel@tonic-gate 5384*dc32d872SJosef 'Jeff' Sipek ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 53857c478bd9Sstevel@tonic-gate 53867c478bd9Sstevel@tonic-gate /* 53877c478bd9Sstevel@tonic-gate * Don't do anything if don't need to do lgroup optimizations 53887c478bd9Sstevel@tonic-gate * on this system 53897c478bd9Sstevel@tonic-gate */ 53907c478bd9Sstevel@tonic-gate if (!lgrp_optimizations()) 53917c478bd9Sstevel@tonic-gate return; 53927c478bd9Sstevel@tonic-gate 53937c478bd9Sstevel@tonic-gate /* 53947c478bd9Sstevel@tonic-gate * Align address and length to (potentially large) page boundary 53957c478bd9Sstevel@tonic-gate */ 53967c478bd9Sstevel@tonic-gate segpgsz = page_get_pagesize(seg->s_szc); 53977c478bd9Sstevel@tonic-gate addr = (caddr_t)P2ALIGN((uintptr_t)addr, segpgsz); 53987c478bd9Sstevel@tonic-gate if (rflag) 53997c478bd9Sstevel@tonic-gate len = P2ROUNDUP(len, segpgsz); 54007c478bd9Sstevel@tonic-gate 54017c478bd9Sstevel@tonic-gate /* 54027c478bd9Sstevel@tonic-gate * Do one (large) page at a time 54037c478bd9Sstevel@tonic-gate */ 54047c478bd9Sstevel@tonic-gate va = addr; 54057c478bd9Sstevel@tonic-gate while (va < addr + len) { 54067c478bd9Sstevel@tonic-gate /* 54077c478bd9Sstevel@tonic-gate * Lookup (root) page for vnode and offset corresponding to 54087c478bd9Sstevel@tonic-gate * this virtual address 54097c478bd9Sstevel@tonic-gate * Try anonmap first since there may be copy-on-write 54107c478bd9Sstevel@tonic-gate * pages, but initialize vnode pointer and offset using 54117c478bd9Sstevel@tonic-gate * vnode arguments just in case there isn't an amp. 54127c478bd9Sstevel@tonic-gate */ 54137c478bd9Sstevel@tonic-gate curvp = vp; 54147c478bd9Sstevel@tonic-gate off = vnoff + va - seg->s_base; 54157c478bd9Sstevel@tonic-gate if (amp) { 54167c478bd9Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 54177c478bd9Sstevel@tonic-gate an_idx = anon_index + seg_page(seg, va); 54187c478bd9Sstevel@tonic-gate anon_array_enter(amp, an_idx, &cookie); 54197c478bd9Sstevel@tonic-gate ap = anon_get_ptr(amp->ahp, an_idx); 54207c478bd9Sstevel@tonic-gate if (ap) 54217c478bd9Sstevel@tonic-gate swap_xlate(ap, &curvp, &off); 54227c478bd9Sstevel@tonic-gate anon_array_exit(&cookie); 54237c478bd9Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 54247c478bd9Sstevel@tonic-gate } 54257c478bd9Sstevel@tonic-gate 54267c478bd9Sstevel@tonic-gate pp = NULL; 54277c478bd9Sstevel@tonic-gate if (curvp) 54287c478bd9Sstevel@tonic-gate pp = page_lookup(curvp, off, SE_SHARED); 54297c478bd9Sstevel@tonic-gate 54307c478bd9Sstevel@tonic-gate /* 54317c478bd9Sstevel@tonic-gate * If there isn't a page at this virtual address, 54327c478bd9Sstevel@tonic-gate * skip to next page 54337c478bd9Sstevel@tonic-gate */ 54347c478bd9Sstevel@tonic-gate if (pp == NULL) { 54357c478bd9Sstevel@tonic-gate va += PAGESIZE; 54367c478bd9Sstevel@tonic-gate continue; 54377c478bd9Sstevel@tonic-gate } 54387c478bd9Sstevel@tonic-gate 54397c478bd9Sstevel@tonic-gate /* 54407c478bd9Sstevel@tonic-gate * Figure out which lgroup this page is in for kstats 54417c478bd9Sstevel@tonic-gate */ 54427c478bd9Sstevel@tonic-gate pfn = page_pptonum(pp); 54437c478bd9Sstevel@tonic-gate from = lgrp_pfn_to_lgrp(pfn); 54447c478bd9Sstevel@tonic-gate 54457c478bd9Sstevel@tonic-gate /* 54467c478bd9Sstevel@tonic-gate * Get page size, and round up and skip to next page boundary 54477c478bd9Sstevel@tonic-gate * if unaligned address 54487c478bd9Sstevel@tonic-gate */ 54497c478bd9Sstevel@tonic-gate pszc = pp->p_szc; 54507c478bd9Sstevel@tonic-gate pgsz = page_get_pagesize(pszc); 54517c478bd9Sstevel@tonic-gate pages = btop(pgsz); 54527c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(va, pgsz) || 54537c478bd9Sstevel@tonic-gate !IS_P2ALIGNED(pfn, pages) || 54547c478bd9Sstevel@tonic-gate pgsz > segpgsz) { 54557c478bd9Sstevel@tonic-gate pgsz = MIN(pgsz, segpgsz); 54567c478bd9Sstevel@tonic-gate page_unlock(pp); 54575c16be9bSDonghai Qiao pages = btop(P2END((uintptr_t)va, pgsz) - 54587c478bd9Sstevel@tonic-gate (uintptr_t)va); 54597c478bd9Sstevel@tonic-gate va = (caddr_t)P2END((uintptr_t)va, pgsz); 54605c16be9bSDonghai Qiao lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, pages); 54617c478bd9Sstevel@tonic-gate continue; 54627c478bd9Sstevel@tonic-gate } 54637c478bd9Sstevel@tonic-gate 54647c478bd9Sstevel@tonic-gate /* 54657c478bd9Sstevel@tonic-gate * Upgrade to exclusive lock on page 54667c478bd9Sstevel@tonic-gate */ 54677c478bd9Sstevel@tonic-gate if (!page_tryupgrade(pp)) { 54687c478bd9Sstevel@tonic-gate page_unlock(pp); 54697c478bd9Sstevel@tonic-gate va += pgsz; 54707c478bd9Sstevel@tonic-gate lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 54717c478bd9Sstevel@tonic-gate btop(pgsz)); 54727c478bd9Sstevel@tonic-gate continue; 54737c478bd9Sstevel@tonic-gate } 54747c478bd9Sstevel@tonic-gate 54755c16be9bSDonghai Qiao pp0 = pp++; 54767c478bd9Sstevel@tonic-gate nlocked = 1; 54777c478bd9Sstevel@tonic-gate 54787c478bd9Sstevel@tonic-gate /* 54797c478bd9Sstevel@tonic-gate * Lock constituent pages if this is large page 54807c478bd9Sstevel@tonic-gate */ 54817c478bd9Sstevel@tonic-gate if (pages > 1) { 54827c478bd9Sstevel@tonic-gate /* 54837c478bd9Sstevel@tonic-gate * Lock all constituents except root page, since it 54847c478bd9Sstevel@tonic-gate * should be locked already. 54857c478bd9Sstevel@tonic-gate */ 54865c16be9bSDonghai Qiao for (; nlocked < pages; nlocked++) { 54877c478bd9Sstevel@tonic-gate if (!page_trylock(pp, SE_EXCL)) { 54887c478bd9Sstevel@tonic-gate break; 54897c478bd9Sstevel@tonic-gate } 54907c478bd9Sstevel@tonic-gate if (PP_ISFREE(pp) || 54917c478bd9Sstevel@tonic-gate pp->p_szc != pszc) { 54927c478bd9Sstevel@tonic-gate /* 54937c478bd9Sstevel@tonic-gate * hat_page_demote() raced in with us. 54947c478bd9Sstevel@tonic-gate */ 54957c478bd9Sstevel@tonic-gate ASSERT(!IS_SWAPFSVP(curvp)); 54967c478bd9Sstevel@tonic-gate page_unlock(pp); 54977c478bd9Sstevel@tonic-gate break; 54987c478bd9Sstevel@tonic-gate } 54995c16be9bSDonghai Qiao pp++; 55007c478bd9Sstevel@tonic-gate } 55017c478bd9Sstevel@tonic-gate } 55027c478bd9Sstevel@tonic-gate 55037c478bd9Sstevel@tonic-gate /* 55047c478bd9Sstevel@tonic-gate * If all constituent pages couldn't be locked, 55057c478bd9Sstevel@tonic-gate * unlock pages locked so far and skip to next page. 55067c478bd9Sstevel@tonic-gate */ 55075c16be9bSDonghai Qiao if (nlocked < pages) { 55085c16be9bSDonghai Qiao while (pp0 < pp) { 55095c16be9bSDonghai Qiao page_unlock(pp0++); 55105c16be9bSDonghai Qiao } 55117c478bd9Sstevel@tonic-gate va += pgsz; 55127c478bd9Sstevel@tonic-gate lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 55137c478bd9Sstevel@tonic-gate btop(pgsz)); 55147c478bd9Sstevel@tonic-gate continue; 55157c478bd9Sstevel@tonic-gate } 55167c478bd9Sstevel@tonic-gate 55177c478bd9Sstevel@tonic-gate /* 55187c478bd9Sstevel@tonic-gate * hat_page_demote() can no longer happen 55197c478bd9Sstevel@tonic-gate * since last cons page had the right p_szc after 55207c478bd9Sstevel@tonic-gate * all cons pages were locked. all cons pages 55217c478bd9Sstevel@tonic-gate * should now have the same p_szc. 55227c478bd9Sstevel@tonic-gate */ 55237c478bd9Sstevel@tonic-gate 55247c478bd9Sstevel@tonic-gate /* 55257c478bd9Sstevel@tonic-gate * All constituent pages locked successfully, so mark 55267c478bd9Sstevel@tonic-gate * large page for migration and unload the mappings of 55277c478bd9Sstevel@tonic-gate * constituent pages, so a fault will occur on any part of the 55287c478bd9Sstevel@tonic-gate * large page 55297c478bd9Sstevel@tonic-gate */ 55305c16be9bSDonghai Qiao PP_SETMIGRATE(pp0); 55315c16be9bSDonghai Qiao while (pp0 < pp) { 55325c16be9bSDonghai Qiao (void) hat_pageunload(pp0, HAT_FORCE_PGUNLOAD); 55335c16be9bSDonghai Qiao ASSERT(hat_page_getshare(pp0) == 0); 55345c16be9bSDonghai Qiao page_unlock(pp0++); 55357c478bd9Sstevel@tonic-gate } 55367c478bd9Sstevel@tonic-gate lgrp_stat_add(from->lgrp_id, LGRP_PMM_PGS, nlocked); 55377c478bd9Sstevel@tonic-gate 55387c478bd9Sstevel@tonic-gate va += pgsz; 55397c478bd9Sstevel@tonic-gate } 55407c478bd9Sstevel@tonic-gate } 55417c478bd9Sstevel@tonic-gate 55427c478bd9Sstevel@tonic-gate /* 55437c478bd9Sstevel@tonic-gate * Migrate any pages that have been marked for migration in the given range 55447c478bd9Sstevel@tonic-gate */ 55457c478bd9Sstevel@tonic-gate void 55467c478bd9Sstevel@tonic-gate page_migrate( 55477c478bd9Sstevel@tonic-gate struct seg *seg, 55487c478bd9Sstevel@tonic-gate caddr_t addr, 55497c478bd9Sstevel@tonic-gate page_t **ppa, 55507c478bd9Sstevel@tonic-gate pgcnt_t npages) 55517c478bd9Sstevel@tonic-gate { 55527c478bd9Sstevel@tonic-gate lgrp_t *from; 55537c478bd9Sstevel@tonic-gate lgrp_t *to; 55547c478bd9Sstevel@tonic-gate page_t *newpp; 55557c478bd9Sstevel@tonic-gate page_t *pp; 55567c478bd9Sstevel@tonic-gate pfn_t pfn; 55577c478bd9Sstevel@tonic-gate size_t pgsz; 55587c478bd9Sstevel@tonic-gate spgcnt_t page_cnt; 55597c478bd9Sstevel@tonic-gate spgcnt_t i; 55607c478bd9Sstevel@tonic-gate uint_t pszc; 55617c478bd9Sstevel@tonic-gate 5562*dc32d872SJosef 'Jeff' Sipek ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 55637c478bd9Sstevel@tonic-gate 55647c478bd9Sstevel@tonic-gate while (npages > 0) { 55657c478bd9Sstevel@tonic-gate pp = *ppa; 55667c478bd9Sstevel@tonic-gate pszc = pp->p_szc; 55677c478bd9Sstevel@tonic-gate pgsz = page_get_pagesize(pszc); 55687c478bd9Sstevel@tonic-gate page_cnt = btop(pgsz); 55697c478bd9Sstevel@tonic-gate 55707c478bd9Sstevel@tonic-gate /* 55717c478bd9Sstevel@tonic-gate * Check to see whether this page is marked for migration 55727c478bd9Sstevel@tonic-gate * 55737c478bd9Sstevel@tonic-gate * Assume that root page of large page is marked for 55747c478bd9Sstevel@tonic-gate * migration and none of the other constituent pages 55757c478bd9Sstevel@tonic-gate * are marked. This really simplifies clearing the 55767c478bd9Sstevel@tonic-gate * migrate bit by not having to clear it from each 55777c478bd9Sstevel@tonic-gate * constituent page. 55787c478bd9Sstevel@tonic-gate * 55797c478bd9Sstevel@tonic-gate * note we don't want to relocate an entire large page if 55807c478bd9Sstevel@tonic-gate * someone is only using one subpage. 55817c478bd9Sstevel@tonic-gate */ 55827c478bd9Sstevel@tonic-gate if (npages < page_cnt) 55837c478bd9Sstevel@tonic-gate break; 55847c478bd9Sstevel@tonic-gate 55857c478bd9Sstevel@tonic-gate /* 55867c478bd9Sstevel@tonic-gate * Is it marked for migration? 55877c478bd9Sstevel@tonic-gate */ 55887c478bd9Sstevel@tonic-gate if (!PP_ISMIGRATE(pp)) 55897c478bd9Sstevel@tonic-gate goto next; 55907c478bd9Sstevel@tonic-gate 55917c478bd9Sstevel@tonic-gate /* 55927c478bd9Sstevel@tonic-gate * Determine lgroups that page is being migrated between 55937c478bd9Sstevel@tonic-gate */ 55947c478bd9Sstevel@tonic-gate pfn = page_pptonum(pp); 55957c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(pfn, page_cnt)) { 55967c478bd9Sstevel@tonic-gate break; 55977c478bd9Sstevel@tonic-gate } 55987c478bd9Sstevel@tonic-gate from = lgrp_pfn_to_lgrp(pfn); 55997c478bd9Sstevel@tonic-gate to = lgrp_mem_choose(seg, addr, pgsz); 56007c478bd9Sstevel@tonic-gate 56017c478bd9Sstevel@tonic-gate /* 56027c478bd9Sstevel@tonic-gate * Need to get exclusive lock's to migrate 56037c478bd9Sstevel@tonic-gate */ 56047c478bd9Sstevel@tonic-gate for (i = 0; i < page_cnt; i++) { 56057c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(ppa[i])); 56067c478bd9Sstevel@tonic-gate if (page_pptonum(ppa[i]) != pfn + i || 56077c478bd9Sstevel@tonic-gate ppa[i]->p_szc != pszc) { 56087c478bd9Sstevel@tonic-gate break; 56097c478bd9Sstevel@tonic-gate } 56107c478bd9Sstevel@tonic-gate if (!page_tryupgrade(ppa[i])) { 56117c478bd9Sstevel@tonic-gate lgrp_stat_add(from->lgrp_id, 56127c478bd9Sstevel@tonic-gate LGRP_PM_FAIL_LOCK_PGS, 56137c478bd9Sstevel@tonic-gate page_cnt); 56147c478bd9Sstevel@tonic-gate break; 56157c478bd9Sstevel@tonic-gate } 56166bc16138Sjj209869 56176bc16138Sjj209869 /* 56186bc16138Sjj209869 * Check to see whether we are trying to migrate 56196bc16138Sjj209869 * page to lgroup where it is allocated already. 56206bc16138Sjj209869 * If so, clear the migrate bit and skip to next 56216bc16138Sjj209869 * page. 56226bc16138Sjj209869 */ 56236bc16138Sjj209869 if (i == 0 && to == from) { 56246bc16138Sjj209869 PP_CLRMIGRATE(ppa[0]); 56256bc16138Sjj209869 page_downgrade(ppa[0]); 56266bc16138Sjj209869 goto next; 56277c478bd9Sstevel@tonic-gate } 56286bc16138Sjj209869 } 56296bc16138Sjj209869 56306bc16138Sjj209869 /* 56316bc16138Sjj209869 * If all constituent pages couldn't be locked, 56326bc16138Sjj209869 * unlock pages locked so far and skip to next page. 56336bc16138Sjj209869 */ 56347c478bd9Sstevel@tonic-gate if (i != page_cnt) { 56357c478bd9Sstevel@tonic-gate while (--i != -1) { 56367c478bd9Sstevel@tonic-gate page_downgrade(ppa[i]); 56377c478bd9Sstevel@tonic-gate } 56387c478bd9Sstevel@tonic-gate goto next; 56397c478bd9Sstevel@tonic-gate } 56407c478bd9Sstevel@tonic-gate 56417c478bd9Sstevel@tonic-gate (void) page_create_wait(page_cnt, PG_WAIT); 56427c478bd9Sstevel@tonic-gate newpp = page_get_replacement_page(pp, to, PGR_SAMESZC); 56437c478bd9Sstevel@tonic-gate if (newpp == NULL) { 56447c478bd9Sstevel@tonic-gate page_create_putback(page_cnt); 56457c478bd9Sstevel@tonic-gate for (i = 0; i < page_cnt; i++) { 56467c478bd9Sstevel@tonic-gate page_downgrade(ppa[i]); 56477c478bd9Sstevel@tonic-gate } 56487c478bd9Sstevel@tonic-gate lgrp_stat_add(to->lgrp_id, LGRP_PM_FAIL_ALLOC_PGS, 56497c478bd9Sstevel@tonic-gate page_cnt); 56507c478bd9Sstevel@tonic-gate goto next; 56517c478bd9Sstevel@tonic-gate } 56527c478bd9Sstevel@tonic-gate ASSERT(newpp->p_szc == pszc); 56537c478bd9Sstevel@tonic-gate /* 56547c478bd9Sstevel@tonic-gate * Clear migrate bit and relocate page 56557c478bd9Sstevel@tonic-gate */ 56567c478bd9Sstevel@tonic-gate PP_CLRMIGRATE(pp); 56577c478bd9Sstevel@tonic-gate if (page_relocate(&pp, &newpp, 0, 1, &page_cnt, to)) { 56587c478bd9Sstevel@tonic-gate panic("page_migrate: page_relocate failed"); 56597c478bd9Sstevel@tonic-gate } 56607c478bd9Sstevel@tonic-gate ASSERT(page_cnt * PAGESIZE == pgsz); 56617c478bd9Sstevel@tonic-gate 56627c478bd9Sstevel@tonic-gate /* 56637c478bd9Sstevel@tonic-gate * Keep stats for number of pages migrated from and to 56647c478bd9Sstevel@tonic-gate * each lgroup 56657c478bd9Sstevel@tonic-gate */ 56667c478bd9Sstevel@tonic-gate lgrp_stat_add(from->lgrp_id, LGRP_PM_SRC_PGS, page_cnt); 56677c478bd9Sstevel@tonic-gate lgrp_stat_add(to->lgrp_id, LGRP_PM_DEST_PGS, page_cnt); 56687c478bd9Sstevel@tonic-gate /* 56697c478bd9Sstevel@tonic-gate * update the page_t array we were passed in and 56707c478bd9Sstevel@tonic-gate * unlink constituent pages of a large page. 56717c478bd9Sstevel@tonic-gate */ 56727c478bd9Sstevel@tonic-gate for (i = 0; i < page_cnt; ++i, ++pp) { 56737c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(newpp)); 56747c478bd9Sstevel@tonic-gate ASSERT(newpp->p_szc == pszc); 56757c478bd9Sstevel@tonic-gate ppa[i] = newpp; 56767c478bd9Sstevel@tonic-gate pp = newpp; 56777c478bd9Sstevel@tonic-gate page_sub(&newpp, pp); 56787c478bd9Sstevel@tonic-gate page_downgrade(pp); 56797c478bd9Sstevel@tonic-gate } 56807c478bd9Sstevel@tonic-gate ASSERT(newpp == NULL); 56817c478bd9Sstevel@tonic-gate next: 56827c478bd9Sstevel@tonic-gate addr += pgsz; 56837c478bd9Sstevel@tonic-gate ppa += page_cnt; 56847c478bd9Sstevel@tonic-gate npages -= page_cnt; 56857c478bd9Sstevel@tonic-gate } 56867c478bd9Sstevel@tonic-gate } 56877c478bd9Sstevel@tonic-gate 56883cff2f43Sstans #define MAX_CNT 60 /* max num of iterations */ 56893cff2f43Sstans /* 56903cff2f43Sstans * Reclaim/reserve availrmem for npages. 56913cff2f43Sstans * If there is not enough memory start reaping seg, kmem caches. 56923cff2f43Sstans * Start pageout scanner (via page_needfree()). 56933cff2f43Sstans * Exit after ~ MAX_CNT s regardless of how much memory has been released. 56943cff2f43Sstans * Note: There is no guarantee that any availrmem will be freed as 56953cff2f43Sstans * this memory typically is locked (kernel heap) or reserved for swap. 56963cff2f43Sstans * Also due to memory fragmentation kmem allocator may not be able 56973cff2f43Sstans * to free any memory (single user allocated buffer will prevent 56983cff2f43Sstans * freeing slab or a page). 56993cff2f43Sstans */ 57003cff2f43Sstans int 57013cff2f43Sstans page_reclaim_mem(pgcnt_t npages, pgcnt_t epages, int adjust) 57023cff2f43Sstans { 57033cff2f43Sstans int i = 0; 57043cff2f43Sstans int ret = 0; 57053cff2f43Sstans pgcnt_t deficit; 57063cff2f43Sstans pgcnt_t old_availrmem; 57073cff2f43Sstans 57083cff2f43Sstans mutex_enter(&freemem_lock); 57093cff2f43Sstans old_availrmem = availrmem - 1; 57103cff2f43Sstans while ((availrmem < tune.t_minarmem + npages + epages) && 57113cff2f43Sstans (old_availrmem < availrmem) && (i++ < MAX_CNT)) { 57123cff2f43Sstans old_availrmem = availrmem; 57133cff2f43Sstans deficit = tune.t_minarmem + npages + epages - availrmem; 57143cff2f43Sstans mutex_exit(&freemem_lock); 57153cff2f43Sstans page_needfree(deficit); 57163cff2f43Sstans kmem_reap(); 57173cff2f43Sstans delay(hz); 57183cff2f43Sstans page_needfree(-(spgcnt_t)deficit); 57193cff2f43Sstans mutex_enter(&freemem_lock); 57203cff2f43Sstans } 57213cff2f43Sstans 57223cff2f43Sstans if (adjust && (availrmem >= tune.t_minarmem + npages + epages)) { 57233cff2f43Sstans availrmem -= npages; 57243cff2f43Sstans ret = 1; 57253cff2f43Sstans } 57263cff2f43Sstans 57273cff2f43Sstans mutex_exit(&freemem_lock); 57283cff2f43Sstans 57293cff2f43Sstans return (ret); 57303cff2f43Sstans } 57317c478bd9Sstevel@tonic-gate 57327c478bd9Sstevel@tonic-gate /* 57337c478bd9Sstevel@tonic-gate * Search the memory segments to locate the desired page. Within a 57347c478bd9Sstevel@tonic-gate * segment, pages increase linearly with one page structure per 57357c478bd9Sstevel@tonic-gate * physical page frame (size PAGESIZE). The search begins 57367c478bd9Sstevel@tonic-gate * with the segment that was accessed last, to take advantage of locality. 57377c478bd9Sstevel@tonic-gate * If the hint misses, we start from the beginning of the sorted memseg list 57387c478bd9Sstevel@tonic-gate */ 57397c478bd9Sstevel@tonic-gate 57407c478bd9Sstevel@tonic-gate 57417c478bd9Sstevel@tonic-gate /* 57427c478bd9Sstevel@tonic-gate * Some data structures for pfn to pp lookup. 57437c478bd9Sstevel@tonic-gate */ 57447c478bd9Sstevel@tonic-gate ulong_t mhash_per_slot; 57457c478bd9Sstevel@tonic-gate struct memseg *memseg_hash[N_MEM_SLOTS]; 57467c478bd9Sstevel@tonic-gate 57477c478bd9Sstevel@tonic-gate page_t * 57487c478bd9Sstevel@tonic-gate page_numtopp_nolock(pfn_t pfnum) 57497c478bd9Sstevel@tonic-gate { 57507c478bd9Sstevel@tonic-gate struct memseg *seg; 57517c478bd9Sstevel@tonic-gate page_t *pp; 57522af6eb52SMichael Corcoran vm_cpu_data_t *vc; 57537c478bd9Sstevel@tonic-gate 57542af6eb52SMichael Corcoran /* 57552af6eb52SMichael Corcoran * We need to disable kernel preemption while referencing the 57562af6eb52SMichael Corcoran * cpu_vm_data field in order to prevent us from being switched to 57572af6eb52SMichael Corcoran * another cpu and trying to reference it after it has been freed. 57582af6eb52SMichael Corcoran * This will keep us on cpu and prevent it from being removed while 57592af6eb52SMichael Corcoran * we are still on it. 57609853d9e8SJason Beloro * 57619853d9e8SJason Beloro * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg 57629853d9e8SJason Beloro * which is being resued by DR who will flush those references 57639853d9e8SJason Beloro * before modifying the reused memseg. See memseg_cpu_vm_flush(). 57642af6eb52SMichael Corcoran */ 57652af6eb52SMichael Corcoran kpreempt_disable(); 57662af6eb52SMichael Corcoran vc = CPU->cpu_vm_data; 5767affbd3ccSkchow ASSERT(vc != NULL); 57687c478bd9Sstevel@tonic-gate 57697c478bd9Sstevel@tonic-gate MEMSEG_STAT_INCR(nsearch); 57707c478bd9Sstevel@tonic-gate 57717c478bd9Sstevel@tonic-gate /* Try last winner first */ 5772affbd3ccSkchow if (((seg = vc->vc_pnum_memseg) != NULL) && 57737c478bd9Sstevel@tonic-gate (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 57747c478bd9Sstevel@tonic-gate MEMSEG_STAT_INCR(nlastwon); 57757c478bd9Sstevel@tonic-gate pp = seg->pages + (pfnum - seg->pages_base); 57762af6eb52SMichael Corcoran if (pp->p_pagenum == pfnum) { 57772af6eb52SMichael Corcoran kpreempt_enable(); 57787c478bd9Sstevel@tonic-gate return ((page_t *)pp); 57797c478bd9Sstevel@tonic-gate } 57802af6eb52SMichael Corcoran } 57817c478bd9Sstevel@tonic-gate 57827c478bd9Sstevel@tonic-gate /* Else Try hash */ 57837c478bd9Sstevel@tonic-gate if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 57847c478bd9Sstevel@tonic-gate (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 57857c478bd9Sstevel@tonic-gate MEMSEG_STAT_INCR(nhashwon); 5786affbd3ccSkchow vc->vc_pnum_memseg = seg; 57877c478bd9Sstevel@tonic-gate pp = seg->pages + (pfnum - seg->pages_base); 57882af6eb52SMichael Corcoran if (pp->p_pagenum == pfnum) { 57892af6eb52SMichael Corcoran kpreempt_enable(); 57907c478bd9Sstevel@tonic-gate return ((page_t *)pp); 57917c478bd9Sstevel@tonic-gate } 57922af6eb52SMichael Corcoran } 57937c478bd9Sstevel@tonic-gate 57947c478bd9Sstevel@tonic-gate /* Else Brute force */ 57957c478bd9Sstevel@tonic-gate for (seg = memsegs; seg != NULL; seg = seg->next) { 57967c478bd9Sstevel@tonic-gate if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5797affbd3ccSkchow vc->vc_pnum_memseg = seg; 57987c478bd9Sstevel@tonic-gate pp = seg->pages + (pfnum - seg->pages_base); 57999853d9e8SJason Beloro if (pp->p_pagenum == pfnum) { 58002af6eb52SMichael Corcoran kpreempt_enable(); 58017c478bd9Sstevel@tonic-gate return ((page_t *)pp); 58027c478bd9Sstevel@tonic-gate } 58037c478bd9Sstevel@tonic-gate } 58049853d9e8SJason Beloro } 5805affbd3ccSkchow vc->vc_pnum_memseg = NULL; 58062af6eb52SMichael Corcoran kpreempt_enable(); 58077c478bd9Sstevel@tonic-gate MEMSEG_STAT_INCR(nnotfound); 58087c478bd9Sstevel@tonic-gate return ((page_t *)NULL); 58097c478bd9Sstevel@tonic-gate 58107c478bd9Sstevel@tonic-gate } 58117c478bd9Sstevel@tonic-gate 58127c478bd9Sstevel@tonic-gate struct memseg * 58137c478bd9Sstevel@tonic-gate page_numtomemseg_nolock(pfn_t pfnum) 58147c478bd9Sstevel@tonic-gate { 58157c478bd9Sstevel@tonic-gate struct memseg *seg; 58167c478bd9Sstevel@tonic-gate page_t *pp; 58177c478bd9Sstevel@tonic-gate 58189853d9e8SJason Beloro /* 58199853d9e8SJason Beloro * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg 58209853d9e8SJason Beloro * which is being resued by DR who will flush those references 58219853d9e8SJason Beloro * before modifying the reused memseg. See memseg_cpu_vm_flush(). 58229853d9e8SJason Beloro */ 58239853d9e8SJason Beloro kpreempt_disable(); 58247c478bd9Sstevel@tonic-gate /* Try hash */ 58257c478bd9Sstevel@tonic-gate if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 58267c478bd9Sstevel@tonic-gate (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 58277c478bd9Sstevel@tonic-gate pp = seg->pages + (pfnum - seg->pages_base); 58289853d9e8SJason Beloro if (pp->p_pagenum == pfnum) { 58299853d9e8SJason Beloro kpreempt_enable(); 58307c478bd9Sstevel@tonic-gate return (seg); 58317c478bd9Sstevel@tonic-gate } 58329853d9e8SJason Beloro } 58337c478bd9Sstevel@tonic-gate 58347c478bd9Sstevel@tonic-gate /* Else Brute force */ 58357c478bd9Sstevel@tonic-gate for (seg = memsegs; seg != NULL; seg = seg->next) { 58367c478bd9Sstevel@tonic-gate if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 58379853d9e8SJason Beloro pp = seg->pages + (pfnum - seg->pages_base); 58389853d9e8SJason Beloro if (pp->p_pagenum == pfnum) { 58399853d9e8SJason Beloro kpreempt_enable(); 58407c478bd9Sstevel@tonic-gate return (seg); 58417c478bd9Sstevel@tonic-gate } 58427c478bd9Sstevel@tonic-gate } 58439853d9e8SJason Beloro } 58449853d9e8SJason Beloro kpreempt_enable(); 58457c478bd9Sstevel@tonic-gate return ((struct memseg *)NULL); 58467c478bd9Sstevel@tonic-gate } 58477c478bd9Sstevel@tonic-gate 58487c478bd9Sstevel@tonic-gate /* 58497c478bd9Sstevel@tonic-gate * Given a page and a count return the page struct that is 58507c478bd9Sstevel@tonic-gate * n structs away from the current one in the global page 58517c478bd9Sstevel@tonic-gate * list. 58527c478bd9Sstevel@tonic-gate * 58537c478bd9Sstevel@tonic-gate * This function wraps to the first page upon 58547c478bd9Sstevel@tonic-gate * reaching the end of the memseg list. 58557c478bd9Sstevel@tonic-gate */ 58567c478bd9Sstevel@tonic-gate page_t * 58577c478bd9Sstevel@tonic-gate page_nextn(page_t *pp, ulong_t n) 58587c478bd9Sstevel@tonic-gate { 58597c478bd9Sstevel@tonic-gate struct memseg *seg; 58607c478bd9Sstevel@tonic-gate page_t *ppn; 58612af6eb52SMichael Corcoran vm_cpu_data_t *vc; 58622af6eb52SMichael Corcoran 58632af6eb52SMichael Corcoran /* 58642af6eb52SMichael Corcoran * We need to disable kernel preemption while referencing the 58652af6eb52SMichael Corcoran * cpu_vm_data field in order to prevent us from being switched to 58662af6eb52SMichael Corcoran * another cpu and trying to reference it after it has been freed. 58672af6eb52SMichael Corcoran * This will keep us on cpu and prevent it from being removed while 58682af6eb52SMichael Corcoran * we are still on it. 58699853d9e8SJason Beloro * 58709853d9e8SJason Beloro * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg 58719853d9e8SJason Beloro * which is being resued by DR who will flush those references 58729853d9e8SJason Beloro * before modifying the reused memseg. See memseg_cpu_vm_flush(). 58732af6eb52SMichael Corcoran */ 58742af6eb52SMichael Corcoran kpreempt_disable(); 58752af6eb52SMichael Corcoran vc = (vm_cpu_data_t *)CPU->cpu_vm_data; 58767c478bd9Sstevel@tonic-gate 5877affbd3ccSkchow ASSERT(vc != NULL); 5878affbd3ccSkchow 5879affbd3ccSkchow if (((seg = vc->vc_pnext_memseg) == NULL) || 58807c478bd9Sstevel@tonic-gate (seg->pages_base == seg->pages_end) || 58817c478bd9Sstevel@tonic-gate !(pp >= seg->pages && pp < seg->epages)) { 58827c478bd9Sstevel@tonic-gate 58837c478bd9Sstevel@tonic-gate for (seg = memsegs; seg; seg = seg->next) { 58847c478bd9Sstevel@tonic-gate if (pp >= seg->pages && pp < seg->epages) 58857c478bd9Sstevel@tonic-gate break; 58867c478bd9Sstevel@tonic-gate } 58877c478bd9Sstevel@tonic-gate 58887c478bd9Sstevel@tonic-gate if (seg == NULL) { 58897c478bd9Sstevel@tonic-gate /* Memory delete got in, return something valid. */ 58907c478bd9Sstevel@tonic-gate /* TODO: fix me. */ 58917c478bd9Sstevel@tonic-gate seg = memsegs; 58927c478bd9Sstevel@tonic-gate pp = seg->pages; 58937c478bd9Sstevel@tonic-gate } 58947c478bd9Sstevel@tonic-gate } 58957c478bd9Sstevel@tonic-gate 58967c478bd9Sstevel@tonic-gate /* check for wraparound - possible if n is large */ 58977c478bd9Sstevel@tonic-gate while ((ppn = (pp + n)) >= seg->epages || ppn < pp) { 58987c478bd9Sstevel@tonic-gate n -= seg->epages - pp; 58997c478bd9Sstevel@tonic-gate seg = seg->next; 59007c478bd9Sstevel@tonic-gate if (seg == NULL) 59017c478bd9Sstevel@tonic-gate seg = memsegs; 59027c478bd9Sstevel@tonic-gate pp = seg->pages; 59037c478bd9Sstevel@tonic-gate } 5904affbd3ccSkchow vc->vc_pnext_memseg = seg; 59052af6eb52SMichael Corcoran kpreempt_enable(); 59067c478bd9Sstevel@tonic-gate return (ppn); 59077c478bd9Sstevel@tonic-gate } 59087c478bd9Sstevel@tonic-gate 59097c478bd9Sstevel@tonic-gate /* 59107c478bd9Sstevel@tonic-gate * Initialize for a loop using page_next_scan_large(). 59117c478bd9Sstevel@tonic-gate */ 59127c478bd9Sstevel@tonic-gate page_t * 59137c478bd9Sstevel@tonic-gate page_next_scan_init(void **cookie) 59147c478bd9Sstevel@tonic-gate { 59157c478bd9Sstevel@tonic-gate ASSERT(cookie != NULL); 59167c478bd9Sstevel@tonic-gate *cookie = (void *)memsegs; 59177c478bd9Sstevel@tonic-gate return ((page_t *)memsegs->pages); 59187c478bd9Sstevel@tonic-gate } 59197c478bd9Sstevel@tonic-gate 59207c478bd9Sstevel@tonic-gate /* 59217c478bd9Sstevel@tonic-gate * Return the next page in a scan of page_t's, assuming we want 59227c478bd9Sstevel@tonic-gate * to skip over sub-pages within larger page sizes. 59237c478bd9Sstevel@tonic-gate * 59247c478bd9Sstevel@tonic-gate * The cookie is used to keep track of the current memseg. 59257c478bd9Sstevel@tonic-gate */ 59267c478bd9Sstevel@tonic-gate page_t * 59277c478bd9Sstevel@tonic-gate page_next_scan_large( 59287c478bd9Sstevel@tonic-gate page_t *pp, 59297c478bd9Sstevel@tonic-gate ulong_t *n, 59307c478bd9Sstevel@tonic-gate void **cookie) 59317c478bd9Sstevel@tonic-gate { 59327c478bd9Sstevel@tonic-gate struct memseg *seg = (struct memseg *)*cookie; 59337c478bd9Sstevel@tonic-gate page_t *new_pp; 59347c478bd9Sstevel@tonic-gate ulong_t cnt; 59357c478bd9Sstevel@tonic-gate pfn_t pfn; 59367c478bd9Sstevel@tonic-gate 59377c478bd9Sstevel@tonic-gate 59387c478bd9Sstevel@tonic-gate /* 59397c478bd9Sstevel@tonic-gate * get the count of page_t's to skip based on the page size 59407c478bd9Sstevel@tonic-gate */ 59417c478bd9Sstevel@tonic-gate ASSERT(pp != NULL); 59427c478bd9Sstevel@tonic-gate if (pp->p_szc == 0) { 59437c478bd9Sstevel@tonic-gate cnt = 1; 59447c478bd9Sstevel@tonic-gate } else { 59457c478bd9Sstevel@tonic-gate pfn = page_pptonum(pp); 59467c478bd9Sstevel@tonic-gate cnt = page_get_pagecnt(pp->p_szc); 59477c478bd9Sstevel@tonic-gate cnt -= pfn & (cnt - 1); 59487c478bd9Sstevel@tonic-gate } 59497c478bd9Sstevel@tonic-gate *n += cnt; 59507c478bd9Sstevel@tonic-gate new_pp = pp + cnt; 59517c478bd9Sstevel@tonic-gate 59527c478bd9Sstevel@tonic-gate /* 59537c478bd9Sstevel@tonic-gate * Catch if we went past the end of the current memory segment. If so, 59547c478bd9Sstevel@tonic-gate * just move to the next segment with pages. 59557c478bd9Sstevel@tonic-gate */ 59569853d9e8SJason Beloro if (new_pp >= seg->epages || seg->pages_base == seg->pages_end) { 59577c478bd9Sstevel@tonic-gate do { 59587c478bd9Sstevel@tonic-gate seg = seg->next; 59597c478bd9Sstevel@tonic-gate if (seg == NULL) 59607c478bd9Sstevel@tonic-gate seg = memsegs; 59619853d9e8SJason Beloro } while (seg->pages_base == seg->pages_end); 59627c478bd9Sstevel@tonic-gate new_pp = seg->pages; 59637c478bd9Sstevel@tonic-gate *cookie = (void *)seg; 59647c478bd9Sstevel@tonic-gate } 59657c478bd9Sstevel@tonic-gate 59667c478bd9Sstevel@tonic-gate return (new_pp); 59677c478bd9Sstevel@tonic-gate } 59687c478bd9Sstevel@tonic-gate 59697c478bd9Sstevel@tonic-gate 59707c478bd9Sstevel@tonic-gate /* 59717c478bd9Sstevel@tonic-gate * Returns next page in list. Note: this function wraps 59727c478bd9Sstevel@tonic-gate * to the first page in the list upon reaching the end 59737c478bd9Sstevel@tonic-gate * of the list. Callers should be aware of this fact. 59747c478bd9Sstevel@tonic-gate */ 59757c478bd9Sstevel@tonic-gate 59767c478bd9Sstevel@tonic-gate /* We should change this be a #define */ 59777c478bd9Sstevel@tonic-gate 59787c478bd9Sstevel@tonic-gate page_t * 59797c478bd9Sstevel@tonic-gate page_next(page_t *pp) 59807c478bd9Sstevel@tonic-gate { 59817c478bd9Sstevel@tonic-gate return (page_nextn(pp, 1)); 59827c478bd9Sstevel@tonic-gate } 59837c478bd9Sstevel@tonic-gate 59847c478bd9Sstevel@tonic-gate page_t * 59857c478bd9Sstevel@tonic-gate page_first() 59867c478bd9Sstevel@tonic-gate { 59877c478bd9Sstevel@tonic-gate return ((page_t *)memsegs->pages); 59887c478bd9Sstevel@tonic-gate } 59897c478bd9Sstevel@tonic-gate 59907c478bd9Sstevel@tonic-gate 59917c478bd9Sstevel@tonic-gate /* 59927c478bd9Sstevel@tonic-gate * This routine is called at boot with the initial memory configuration 59937c478bd9Sstevel@tonic-gate * and when memory is added or removed. 59947c478bd9Sstevel@tonic-gate */ 59957c478bd9Sstevel@tonic-gate void 59967c478bd9Sstevel@tonic-gate build_pfn_hash() 59977c478bd9Sstevel@tonic-gate { 59987c478bd9Sstevel@tonic-gate pfn_t cur; 59997c478bd9Sstevel@tonic-gate pgcnt_t index; 60007c478bd9Sstevel@tonic-gate struct memseg *pseg; 60017c478bd9Sstevel@tonic-gate int i; 60027c478bd9Sstevel@tonic-gate 60037c478bd9Sstevel@tonic-gate /* 60047c478bd9Sstevel@tonic-gate * Clear memseg_hash array. 60057c478bd9Sstevel@tonic-gate * Since memory add/delete is designed to operate concurrently 60067c478bd9Sstevel@tonic-gate * with normal operation, the hash rebuild must be able to run 60077c478bd9Sstevel@tonic-gate * concurrently with page_numtopp_nolock(). To support this 60087c478bd9Sstevel@tonic-gate * functionality, assignments to memseg_hash array members must 60097c478bd9Sstevel@tonic-gate * be done atomically. 60107c478bd9Sstevel@tonic-gate * 60117c478bd9Sstevel@tonic-gate * NOTE: bzero() does not currently guarantee this for kernel 60127c478bd9Sstevel@tonic-gate * threads, and cannot be used here. 60137c478bd9Sstevel@tonic-gate */ 60147c478bd9Sstevel@tonic-gate for (i = 0; i < N_MEM_SLOTS; i++) 60157c478bd9Sstevel@tonic-gate memseg_hash[i] = NULL; 60167c478bd9Sstevel@tonic-gate 60177c478bd9Sstevel@tonic-gate hat_kpm_mseghash_clear(N_MEM_SLOTS); 60187c478bd9Sstevel@tonic-gate 60197c478bd9Sstevel@tonic-gate /* 60207c478bd9Sstevel@tonic-gate * Physmax is the last valid pfn. 60217c478bd9Sstevel@tonic-gate */ 60227c478bd9Sstevel@tonic-gate mhash_per_slot = (physmax + 1) >> MEM_HASH_SHIFT; 60237c478bd9Sstevel@tonic-gate for (pseg = memsegs; pseg != NULL; pseg = pseg->next) { 60247c478bd9Sstevel@tonic-gate index = MEMSEG_PFN_HASH(pseg->pages_base); 60257c478bd9Sstevel@tonic-gate cur = pseg->pages_base; 60267c478bd9Sstevel@tonic-gate do { 60277c478bd9Sstevel@tonic-gate if (index >= N_MEM_SLOTS) 60287c478bd9Sstevel@tonic-gate index = MEMSEG_PFN_HASH(cur); 60297c478bd9Sstevel@tonic-gate 60307c478bd9Sstevel@tonic-gate if (memseg_hash[index] == NULL || 60317c478bd9Sstevel@tonic-gate memseg_hash[index]->pages_base > pseg->pages_base) { 60327c478bd9Sstevel@tonic-gate memseg_hash[index] = pseg; 60337c478bd9Sstevel@tonic-gate hat_kpm_mseghash_update(index, pseg); 60347c478bd9Sstevel@tonic-gate } 60357c478bd9Sstevel@tonic-gate cur += mhash_per_slot; 60367c478bd9Sstevel@tonic-gate index++; 60377c478bd9Sstevel@tonic-gate } while (cur < pseg->pages_end); 60387c478bd9Sstevel@tonic-gate } 60397c478bd9Sstevel@tonic-gate } 60407c478bd9Sstevel@tonic-gate 60417c478bd9Sstevel@tonic-gate /* 60427c478bd9Sstevel@tonic-gate * Return the pagenum for the pp 60437c478bd9Sstevel@tonic-gate */ 60447c478bd9Sstevel@tonic-gate pfn_t 60457c478bd9Sstevel@tonic-gate page_pptonum(page_t *pp) 60467c478bd9Sstevel@tonic-gate { 60477c478bd9Sstevel@tonic-gate return (pp->p_pagenum); 60487c478bd9Sstevel@tonic-gate } 60497c478bd9Sstevel@tonic-gate 60507c478bd9Sstevel@tonic-gate /* 60517c478bd9Sstevel@tonic-gate * interface to the referenced and modified etc bits 60527c478bd9Sstevel@tonic-gate * in the PSM part of the page struct 60537c478bd9Sstevel@tonic-gate * when no locking is desired. 60547c478bd9Sstevel@tonic-gate */ 60557c478bd9Sstevel@tonic-gate void 60567c478bd9Sstevel@tonic-gate page_set_props(page_t *pp, uint_t flags) 60577c478bd9Sstevel@tonic-gate { 60587c478bd9Sstevel@tonic-gate ASSERT((flags & ~(P_MOD | P_REF | P_RO)) == 0); 60597c478bd9Sstevel@tonic-gate pp->p_nrm |= (uchar_t)flags; 60607c478bd9Sstevel@tonic-gate } 60617c478bd9Sstevel@tonic-gate 60627c478bd9Sstevel@tonic-gate void 60639d0d62adSJason Beloro page_clr_all_props(page_t *pp) 60647c478bd9Sstevel@tonic-gate { 60657c478bd9Sstevel@tonic-gate pp->p_nrm = 0; 60667c478bd9Sstevel@tonic-gate } 60677c478bd9Sstevel@tonic-gate 60687c478bd9Sstevel@tonic-gate /* 6069db874c57Selowe * Clear p_lckcnt and p_cowcnt, adjusting freemem if required. 6070db874c57Selowe */ 6071db874c57Selowe int 6072db874c57Selowe page_clear_lck_cow(page_t *pp, int adjust) 6073db874c57Selowe { 6074db874c57Selowe int f_amount; 6075db874c57Selowe 6076db874c57Selowe ASSERT(PAGE_EXCL(pp)); 6077db874c57Selowe 6078db874c57Selowe /* 6079db874c57Selowe * The page_struct_lock need not be acquired here since 6080db874c57Selowe * we require the caller hold the page exclusively locked. 6081db874c57Selowe */ 6082db874c57Selowe f_amount = 0; 6083db874c57Selowe if (pp->p_lckcnt) { 6084db874c57Selowe f_amount = 1; 6085db874c57Selowe pp->p_lckcnt = 0; 6086db874c57Selowe } 6087db874c57Selowe if (pp->p_cowcnt) { 6088db874c57Selowe f_amount += pp->p_cowcnt; 6089db874c57Selowe pp->p_cowcnt = 0; 6090db874c57Selowe } 6091db874c57Selowe 6092db874c57Selowe if (adjust && f_amount) { 6093db874c57Selowe mutex_enter(&freemem_lock); 6094db874c57Selowe availrmem += f_amount; 6095db874c57Selowe mutex_exit(&freemem_lock); 6096db874c57Selowe } 6097db874c57Selowe 6098db874c57Selowe return (f_amount); 6099db874c57Selowe } 6100db874c57Selowe 6101db874c57Selowe /* 61027c478bd9Sstevel@tonic-gate * The following functions is called from free_vp_pages() 61037c478bd9Sstevel@tonic-gate * for an inexact estimate of a newly free'd page... 61047c478bd9Sstevel@tonic-gate */ 61057c478bd9Sstevel@tonic-gate ulong_t 61067c478bd9Sstevel@tonic-gate page_share_cnt(page_t *pp) 61077c478bd9Sstevel@tonic-gate { 61087c478bd9Sstevel@tonic-gate return (hat_page_getshare(pp)); 61097c478bd9Sstevel@tonic-gate } 61107c478bd9Sstevel@tonic-gate 61117c478bd9Sstevel@tonic-gate int 61127c478bd9Sstevel@tonic-gate page_isshared(page_t *pp) 61137c478bd9Sstevel@tonic-gate { 611405d3dc4bSpaulsan return (hat_page_checkshare(pp, 1)); 61157c478bd9Sstevel@tonic-gate } 61167c478bd9Sstevel@tonic-gate 61177c478bd9Sstevel@tonic-gate int 61187c478bd9Sstevel@tonic-gate page_isfree(page_t *pp) 61197c478bd9Sstevel@tonic-gate { 61207c478bd9Sstevel@tonic-gate return (PP_ISFREE(pp)); 61217c478bd9Sstevel@tonic-gate } 61227c478bd9Sstevel@tonic-gate 61237c478bd9Sstevel@tonic-gate int 61247c478bd9Sstevel@tonic-gate page_isref(page_t *pp) 61257c478bd9Sstevel@tonic-gate { 61267c478bd9Sstevel@tonic-gate return (hat_page_getattr(pp, P_REF)); 61277c478bd9Sstevel@tonic-gate } 61287c478bd9Sstevel@tonic-gate 61297c478bd9Sstevel@tonic-gate int 61307c478bd9Sstevel@tonic-gate page_ismod(page_t *pp) 61317c478bd9Sstevel@tonic-gate { 61327c478bd9Sstevel@tonic-gate return (hat_page_getattr(pp, P_MOD)); 61337c478bd9Sstevel@tonic-gate } 61348b464eb8Smec 61358b464eb8Smec /* 61368b464eb8Smec * The following code all currently relates to the page capture logic: 61378b464eb8Smec * 61388b464eb8Smec * This logic is used for cases where there is a desire to claim a certain 61398b464eb8Smec * physical page in the system for the caller. As it may not be possible 61408b464eb8Smec * to capture the page immediately, the p_toxic bits are used in the page 61418b464eb8Smec * structure to indicate that someone wants to capture this page. When the 61428b464eb8Smec * page gets unlocked, the toxic flag will be noted and an attempt to capture 61438b464eb8Smec * the page will be made. If it is successful, the original callers callback 61448b464eb8Smec * will be called with the page to do with it what they please. 61458b464eb8Smec * 61468b464eb8Smec * There is also an async thread which wakes up to attempt to capture 61478b464eb8Smec * pages occasionally which have the capture bit set. All of the pages which 61488b464eb8Smec * need to be captured asynchronously have been inserted into the 61498b464eb8Smec * page_capture_hash and thus this thread walks that hash list. Items in the 61508b464eb8Smec * hash have an expiration time so this thread handles that as well by removing 61518b464eb8Smec * the item from the hash if it has expired. 61528b464eb8Smec * 61538b464eb8Smec * Some important things to note are: 61548b464eb8Smec * - if the PR_CAPTURE bit is set on a page, then the page is in the 61558b464eb8Smec * page_capture_hash. The page_capture_hash_head.pchh_mutex is needed 61568b464eb8Smec * to set and clear this bit, and while the lock is held is the only time 61578b464eb8Smec * you can add or remove an entry from the hash. 61588b464eb8Smec * - the PR_CAPTURE bit can only be set and cleared while holding the 61598b464eb8Smec * page_capture_hash_head.pchh_mutex 61608b464eb8Smec * - the t_flag field of the thread struct is used with the T_CAPTURING 61618b464eb8Smec * flag to prevent recursion while dealing with large pages. 61628b464eb8Smec * - pages which need to be retired never expire on the page_capture_hash. 61638b464eb8Smec */ 61648b464eb8Smec 61658b464eb8Smec static void page_capture_thread(void); 61668b464eb8Smec static kthread_t *pc_thread_id; 61678b464eb8Smec kcondvar_t pc_cv; 61688b464eb8Smec static kmutex_t pc_thread_mutex; 61698b464eb8Smec static clock_t pc_thread_shortwait; 61708b464eb8Smec static clock_t pc_thread_longwait; 6171a98e9dbfSaguzovsk static int pc_thread_retry; 61728b464eb8Smec 61738b464eb8Smec struct page_capture_callback pc_cb[PC_NUM_CALLBACKS]; 61748b464eb8Smec 61758b464eb8Smec /* Note that this is a circular linked list */ 61768b464eb8Smec typedef struct page_capture_hash_bucket { 61778b464eb8Smec page_t *pp; 617811494be0SStan Studzinski uchar_t szc; 617911494be0SStan Studzinski uchar_t pri; 61808b464eb8Smec uint_t flags; 61818b464eb8Smec clock_t expires; /* lbolt at which this request expires. */ 61828b464eb8Smec void *datap; /* Cached data passed in for callback */ 61838b464eb8Smec struct page_capture_hash_bucket *next; 61848b464eb8Smec struct page_capture_hash_bucket *prev; 61858b464eb8Smec } page_capture_hash_bucket_t; 61868b464eb8Smec 618711494be0SStan Studzinski #define PC_PRI_HI 0 /* capture now */ 618811494be0SStan Studzinski #define PC_PRI_LO 1 /* capture later */ 618911494be0SStan Studzinski #define PC_NUM_PRI 2 619011494be0SStan Studzinski 619111494be0SStan Studzinski #define PAGE_CAPTURE_PRIO(pp) (PP_ISRAF(pp) ? PC_PRI_LO : PC_PRI_HI) 619211494be0SStan Studzinski 619311494be0SStan Studzinski 61948b464eb8Smec /* 61958b464eb8Smec * Each hash bucket will have it's own mutex and two lists which are: 61968b464eb8Smec * active (0): represents requests which have not been processed by 61978b464eb8Smec * the page_capture async thread yet. 61988b464eb8Smec * walked (1): represents requests which have been processed by the 61998b464eb8Smec * page_capture async thread within it's given walk of this bucket. 62008b464eb8Smec * 62018b464eb8Smec * These are all needed so that we can synchronize all async page_capture 62028b464eb8Smec * events. When the async thread moves to a new bucket, it will append the 62038b464eb8Smec * walked list to the active list and walk each item one at a time, moving it 62048b464eb8Smec * from the active list to the walked list. Thus if there is an async request 62058b464eb8Smec * outstanding for a given page, it will always be in one of the two lists. 62068b464eb8Smec * New requests will always be added to the active list. 62078b464eb8Smec * If we were not able to capture a page before the request expired, we'd free 62088b464eb8Smec * up the request structure which would indicate to page_capture that there is 62098b464eb8Smec * no longer a need for the given page, and clear the PR_CAPTURE flag if 62108b464eb8Smec * possible. 62118b464eb8Smec */ 62128b464eb8Smec typedef struct page_capture_hash_head { 62138b464eb8Smec kmutex_t pchh_mutex; 621411494be0SStan Studzinski uint_t num_pages[PC_NUM_PRI]; 62158b464eb8Smec page_capture_hash_bucket_t lists[2]; /* sentinel nodes */ 62168b464eb8Smec } page_capture_hash_head_t; 62178b464eb8Smec 62188b464eb8Smec #ifdef DEBUG 62198b464eb8Smec #define NUM_PAGE_CAPTURE_BUCKETS 4 62208b464eb8Smec #else 62218b464eb8Smec #define NUM_PAGE_CAPTURE_BUCKETS 64 62228b464eb8Smec #endif 62238b464eb8Smec 62248b464eb8Smec page_capture_hash_head_t page_capture_hash[NUM_PAGE_CAPTURE_BUCKETS]; 62258b464eb8Smec 62268b464eb8Smec /* for now use a very simple hash based upon the size of a page struct */ 62278b464eb8Smec #define PAGE_CAPTURE_HASH(pp) \ 62288b464eb8Smec ((int)(((uintptr_t)pp >> 7) & (NUM_PAGE_CAPTURE_BUCKETS - 1))) 62298b464eb8Smec 62308b464eb8Smec extern pgcnt_t swapfs_minfree; 62318b464eb8Smec 62328b464eb8Smec int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap); 62338b464eb8Smec 62348b464eb8Smec /* 62358b464eb8Smec * a callback function is required for page capture requests. 62368b464eb8Smec */ 62378b464eb8Smec void 62388b464eb8Smec page_capture_register_callback(uint_t index, clock_t duration, 62398b464eb8Smec int (*cb_func)(page_t *, void *, uint_t)) 62408b464eb8Smec { 62418b464eb8Smec ASSERT(pc_cb[index].cb_active == 0); 62428b464eb8Smec ASSERT(cb_func != NULL); 62438b464eb8Smec rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 62448b464eb8Smec pc_cb[index].duration = duration; 62458b464eb8Smec pc_cb[index].cb_func = cb_func; 62468b464eb8Smec pc_cb[index].cb_active = 1; 62478b464eb8Smec rw_exit(&pc_cb[index].cb_rwlock); 62488b464eb8Smec } 62498b464eb8Smec 62508b464eb8Smec void 62518b464eb8Smec page_capture_unregister_callback(uint_t index) 62528b464eb8Smec { 62538b464eb8Smec int i, j; 62548b464eb8Smec struct page_capture_hash_bucket *bp1; 62558b464eb8Smec struct page_capture_hash_bucket *bp2; 62568b464eb8Smec struct page_capture_hash_bucket *head = NULL; 62578b464eb8Smec uint_t flags = (1 << index); 62588b464eb8Smec 62598b464eb8Smec rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 62608b464eb8Smec ASSERT(pc_cb[index].cb_active == 1); 62618b464eb8Smec pc_cb[index].duration = 0; /* Paranoia */ 62628b464eb8Smec pc_cb[index].cb_func = NULL; /* Paranoia */ 62638b464eb8Smec pc_cb[index].cb_active = 0; 62648b464eb8Smec rw_exit(&pc_cb[index].cb_rwlock); 62658b464eb8Smec 62668b464eb8Smec /* 62678b464eb8Smec * Just move all the entries to a private list which we can walk 62688b464eb8Smec * through without the need to hold any locks. 62698b464eb8Smec * No more requests can get added to the hash lists for this consumer 62708b464eb8Smec * as the cb_active field for the callback has been cleared. 62718b464eb8Smec */ 62728b464eb8Smec for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 62738b464eb8Smec mutex_enter(&page_capture_hash[i].pchh_mutex); 62748b464eb8Smec for (j = 0; j < 2; j++) { 62758b464eb8Smec bp1 = page_capture_hash[i].lists[j].next; 62768b464eb8Smec /* walk through all but first (sentinel) element */ 62778b464eb8Smec while (bp1 != &page_capture_hash[i].lists[j]) { 62788b464eb8Smec bp2 = bp1; 62798b464eb8Smec if (bp2->flags & flags) { 62808b464eb8Smec bp1 = bp2->next; 62818b464eb8Smec bp1->prev = bp2->prev; 62828b464eb8Smec bp2->prev->next = bp1; 62838b464eb8Smec bp2->next = head; 62848b464eb8Smec head = bp2; 62858b464eb8Smec /* 62868b464eb8Smec * Clear the PR_CAPTURE bit as we 62878b464eb8Smec * hold appropriate locks here. 62888b464eb8Smec */ 62898b464eb8Smec page_clrtoxic(head->pp, PR_CAPTURE); 629011494be0SStan Studzinski page_capture_hash[i]. 629111494be0SStan Studzinski num_pages[bp2->pri]--; 62928b464eb8Smec continue; 62938b464eb8Smec } 62948b464eb8Smec bp1 = bp1->next; 62958b464eb8Smec } 62968b464eb8Smec } 62978b464eb8Smec mutex_exit(&page_capture_hash[i].pchh_mutex); 62988b464eb8Smec } 62998b464eb8Smec 63008b464eb8Smec while (head != NULL) { 63018b464eb8Smec bp1 = head; 63028b464eb8Smec head = head->next; 63038b464eb8Smec kmem_free(bp1, sizeof (*bp1)); 63048b464eb8Smec } 63058b464eb8Smec } 63068b464eb8Smec 63078b464eb8Smec 63088b464eb8Smec /* 63098b464eb8Smec * Find pp in the active list and move it to the walked list if it 63108b464eb8Smec * exists. 63118b464eb8Smec * Note that most often pp should be at the front of the active list 63128b464eb8Smec * as it is currently used and thus there is no other sort of optimization 63138b464eb8Smec * being done here as this is a linked list data structure. 63148b464eb8Smec * Returns 1 on successful move or 0 if page could not be found. 63158b464eb8Smec */ 63168b464eb8Smec static int 63178b464eb8Smec page_capture_move_to_walked(page_t *pp) 63188b464eb8Smec { 63198b464eb8Smec page_capture_hash_bucket_t *bp; 63208b464eb8Smec int index; 63218b464eb8Smec 63228b464eb8Smec index = PAGE_CAPTURE_HASH(pp); 63238b464eb8Smec 63248b464eb8Smec mutex_enter(&page_capture_hash[index].pchh_mutex); 63258b464eb8Smec bp = page_capture_hash[index].lists[0].next; 63268b464eb8Smec while (bp != &page_capture_hash[index].lists[0]) { 63278b464eb8Smec if (bp->pp == pp) { 63288b464eb8Smec /* Remove from old list */ 63298b464eb8Smec bp->next->prev = bp->prev; 63308b464eb8Smec bp->prev->next = bp->next; 63318b464eb8Smec 63328b464eb8Smec /* Add to new list */ 63338b464eb8Smec bp->next = page_capture_hash[index].lists[1].next; 63348b464eb8Smec bp->prev = &page_capture_hash[index].lists[1]; 63358b464eb8Smec page_capture_hash[index].lists[1].next = bp; 63368b464eb8Smec bp->next->prev = bp; 63378b464eb8Smec 633811494be0SStan Studzinski /* 633911494be0SStan Studzinski * There is a small probability of page on a free 634011494be0SStan Studzinski * list being retired while being allocated 634111494be0SStan Studzinski * and before P_RAF is set on it. The page may 634211494be0SStan Studzinski * end up marked as high priority request instead 634311494be0SStan Studzinski * of low priority request. 634411494be0SStan Studzinski * If P_RAF page is not marked as low priority request 634511494be0SStan Studzinski * change it to low priority request. 634611494be0SStan Studzinski */ 634711494be0SStan Studzinski page_capture_hash[index].num_pages[bp->pri]--; 634811494be0SStan Studzinski bp->pri = PAGE_CAPTURE_PRIO(pp); 634911494be0SStan Studzinski page_capture_hash[index].num_pages[bp->pri]++; 635011494be0SStan Studzinski mutex_exit(&page_capture_hash[index].pchh_mutex); 63518b464eb8Smec return (1); 63528b464eb8Smec } 63538b464eb8Smec bp = bp->next; 63548b464eb8Smec } 63558b464eb8Smec mutex_exit(&page_capture_hash[index].pchh_mutex); 63568b464eb8Smec return (0); 63578b464eb8Smec } 63588b464eb8Smec 63598b464eb8Smec /* 63608b464eb8Smec * Add a new entry to the page capture hash. The only case where a new 63618b464eb8Smec * entry is not added is when the page capture consumer is no longer registered. 63628b464eb8Smec * In this case, we'll silently not add the page to the hash. We know that 63638b464eb8Smec * page retire will always be registered for the case where we are currently 63648b464eb8Smec * unretiring a page and thus there are no conflicts. 63658b464eb8Smec */ 63668b464eb8Smec static void 63678b464eb8Smec page_capture_add_hash(page_t *pp, uint_t szc, uint_t flags, void *datap) 63688b464eb8Smec { 63698b464eb8Smec page_capture_hash_bucket_t *bp1; 63708b464eb8Smec page_capture_hash_bucket_t *bp2; 63718b464eb8Smec int index; 63728b464eb8Smec int cb_index; 63738b464eb8Smec int i; 637411494be0SStan Studzinski uchar_t pri; 63758b464eb8Smec #ifdef DEBUG 63768b464eb8Smec page_capture_hash_bucket_t *tp1; 63778b464eb8Smec int l; 63788b464eb8Smec #endif 63798b464eb8Smec 63808b464eb8Smec ASSERT(!(flags & CAPTURE_ASYNC)); 63818b464eb8Smec 63828b464eb8Smec bp1 = kmem_alloc(sizeof (struct page_capture_hash_bucket), KM_SLEEP); 63838b464eb8Smec 63848b464eb8Smec bp1->pp = pp; 63858b464eb8Smec bp1->szc = szc; 63868b464eb8Smec bp1->flags = flags; 63878b464eb8Smec bp1->datap = datap; 63888b464eb8Smec 63898b464eb8Smec for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 63908b464eb8Smec if ((flags >> cb_index) & 1) { 63918b464eb8Smec break; 63928b464eb8Smec } 63938b464eb8Smec } 63948b464eb8Smec 63958b464eb8Smec ASSERT(cb_index != PC_NUM_CALLBACKS); 63968b464eb8Smec 63978b464eb8Smec rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 63988b464eb8Smec if (pc_cb[cb_index].cb_active) { 63998b464eb8Smec if (pc_cb[cb_index].duration == -1) { 64008b464eb8Smec bp1->expires = (clock_t)-1; 64018b464eb8Smec } else { 6402d3d50737SRafael Vanoni bp1->expires = ddi_get_lbolt() + 6403d3d50737SRafael Vanoni pc_cb[cb_index].duration; 64048b464eb8Smec } 64058b464eb8Smec } else { 64068b464eb8Smec /* There's no callback registered so don't add to the hash */ 64078b464eb8Smec rw_exit(&pc_cb[cb_index].cb_rwlock); 64088b464eb8Smec kmem_free(bp1, sizeof (*bp1)); 64098b464eb8Smec return; 64108b464eb8Smec } 64118b464eb8Smec 64128b464eb8Smec index = PAGE_CAPTURE_HASH(pp); 64138b464eb8Smec 64148b464eb8Smec /* 64158b464eb8Smec * Only allow capture flag to be modified under this mutex. 64168b464eb8Smec * Prevents multiple entries for same page getting added. 64178b464eb8Smec */ 64188b464eb8Smec mutex_enter(&page_capture_hash[index].pchh_mutex); 64198b464eb8Smec 64208b464eb8Smec /* 64218b464eb8Smec * if not already on the hash, set capture bit and add to the hash 64228b464eb8Smec */ 64238b464eb8Smec if (!(pp->p_toxic & PR_CAPTURE)) { 64248b464eb8Smec #ifdef DEBUG 64258b464eb8Smec /* Check for duplicate entries */ 64268b464eb8Smec for (l = 0; l < 2; l++) { 64278b464eb8Smec tp1 = page_capture_hash[index].lists[l].next; 64288b464eb8Smec while (tp1 != &page_capture_hash[index].lists[l]) { 64298b464eb8Smec if (tp1->pp == pp) { 64308b464eb8Smec panic("page pp 0x%p already on hash " 64318793b36bSNick Todd "at 0x%p\n", 64328793b36bSNick Todd (void *)pp, (void *)tp1); 64338b464eb8Smec } 64348b464eb8Smec tp1 = tp1->next; 64358b464eb8Smec } 64368b464eb8Smec } 64378b464eb8Smec 64388b464eb8Smec #endif 64398b464eb8Smec page_settoxic(pp, PR_CAPTURE); 644011494be0SStan Studzinski pri = PAGE_CAPTURE_PRIO(pp); 644111494be0SStan Studzinski bp1->pri = pri; 64428b464eb8Smec bp1->next = page_capture_hash[index].lists[0].next; 64438b464eb8Smec bp1->prev = &page_capture_hash[index].lists[0]; 64448b464eb8Smec bp1->next->prev = bp1; 64458b464eb8Smec page_capture_hash[index].lists[0].next = bp1; 644611494be0SStan Studzinski page_capture_hash[index].num_pages[pri]++; 6447cee1d74bSjfrank if (flags & CAPTURE_RETIRE) { 6448704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States page_retire_incr_pend_count(datap); 6449cee1d74bSjfrank } 64508b464eb8Smec mutex_exit(&page_capture_hash[index].pchh_mutex); 64518b464eb8Smec rw_exit(&pc_cb[cb_index].cb_rwlock); 64528b464eb8Smec cv_signal(&pc_cv); 64538b464eb8Smec return; 64548b464eb8Smec } 64558b464eb8Smec 64568b464eb8Smec /* 64578b464eb8Smec * A page retire request will replace any other request. 64588b464eb8Smec * A second physmem request which is for a different process than 64598b464eb8Smec * the currently registered one will be dropped as there is 64608b464eb8Smec * no way to hold the private data for both calls. 64618b464eb8Smec * In the future, once there are more callers, this will have to 64628b464eb8Smec * be worked out better as there needs to be private storage for 64638b464eb8Smec * at least each type of caller (maybe have datap be an array of 64648b464eb8Smec * *void's so that we can index based upon callers index). 64658b464eb8Smec */ 64668b464eb8Smec 64678b464eb8Smec /* walk hash list to update expire time */ 64688b464eb8Smec for (i = 0; i < 2; i++) { 64698b464eb8Smec bp2 = page_capture_hash[index].lists[i].next; 64708b464eb8Smec while (bp2 != &page_capture_hash[index].lists[i]) { 64718b464eb8Smec if (bp2->pp == pp) { 64728b464eb8Smec if (flags & CAPTURE_RETIRE) { 64738b464eb8Smec if (!(bp2->flags & CAPTURE_RETIRE)) { 6474704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States page_retire_incr_pend_count( 6475704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States datap); 64768b464eb8Smec bp2->flags = flags; 64778b464eb8Smec bp2->expires = bp1->expires; 64788b464eb8Smec bp2->datap = datap; 64798b464eb8Smec } 64808b464eb8Smec } else { 64818b464eb8Smec ASSERT(flags & CAPTURE_PHYSMEM); 64828b464eb8Smec if (!(bp2->flags & CAPTURE_RETIRE) && 64838b464eb8Smec (datap == bp2->datap)) { 64848b464eb8Smec bp2->expires = bp1->expires; 64858b464eb8Smec } 64868b464eb8Smec } 64878b464eb8Smec mutex_exit(&page_capture_hash[index]. 64888b464eb8Smec pchh_mutex); 64898b464eb8Smec rw_exit(&pc_cb[cb_index].cb_rwlock); 64908b464eb8Smec kmem_free(bp1, sizeof (*bp1)); 64918b464eb8Smec return; 64928b464eb8Smec } 64938b464eb8Smec bp2 = bp2->next; 64948b464eb8Smec } 64958b464eb8Smec } 64968b464eb8Smec 64978b464eb8Smec /* 64988b464eb8Smec * the PR_CAPTURE flag is protected by the page_capture_hash mutexes 64998b464eb8Smec * and thus it either has to be set or not set and can't change 65008b464eb8Smec * while holding the mutex above. 65018b464eb8Smec */ 65028793b36bSNick Todd panic("page_capture_add_hash, PR_CAPTURE flag set on pp %p\n", 65038793b36bSNick Todd (void *)pp); 65048b464eb8Smec } 65058b464eb8Smec 65068b464eb8Smec /* 65078b464eb8Smec * We have a page in our hands, lets try and make it ours by turning 65088b464eb8Smec * it into a clean page like it had just come off the freelists. 65098b464eb8Smec * 65108b464eb8Smec * Returns 0 on success, with the page still EXCL locked. 65118b464eb8Smec * On failure, the page will be unlocked, and returns EAGAIN 65128b464eb8Smec */ 65138b464eb8Smec static int 65148b464eb8Smec page_capture_clean_page(page_t *pp) 65158b464eb8Smec { 65168b464eb8Smec page_t *newpp; 65178b464eb8Smec int skip_unlock = 0; 65188b464eb8Smec spgcnt_t count; 65198b464eb8Smec page_t *tpp; 65208b464eb8Smec int ret = 0; 65218b464eb8Smec int extra; 65228b464eb8Smec 65238b464eb8Smec ASSERT(PAGE_EXCL(pp)); 65248b464eb8Smec ASSERT(!PP_RETIRED(pp)); 65258b464eb8Smec ASSERT(curthread->t_flag & T_CAPTURING); 65268b464eb8Smec 65278b464eb8Smec if (PP_ISFREE(pp)) { 65286e4dd838Smec if (!page_reclaim(pp, NULL)) { 65298b464eb8Smec skip_unlock = 1; 65308b464eb8Smec ret = EAGAIN; 65318b464eb8Smec goto cleanup; 65328b464eb8Smec } 65336e4dd838Smec ASSERT(pp->p_szc == 0); 65348b464eb8Smec if (pp->p_vnode != NULL) { 65358b464eb8Smec /* 65368b464eb8Smec * Since this page came from the 65378b464eb8Smec * cachelist, we must destroy the 65388b464eb8Smec * old vnode association. 65398b464eb8Smec */ 65408b464eb8Smec page_hashout(pp, NULL); 65418b464eb8Smec } 65428b464eb8Smec goto cleanup; 65438b464eb8Smec } 65448b464eb8Smec 65458b464eb8Smec /* 65468b464eb8Smec * If we know page_relocate will fail, skip it 65478b464eb8Smec * It could still fail due to a UE on another page but we 65488b464eb8Smec * can't do anything about that. 65498b464eb8Smec */ 65508b464eb8Smec if (pp->p_toxic & PR_UE) { 65518b464eb8Smec goto skip_relocate; 65528b464eb8Smec } 65538b464eb8Smec 65548b464eb8Smec /* 65558b464eb8Smec * It's possible that pages can not have a vnode as fsflush comes 65568b464eb8Smec * through and cleans up these pages. It's ugly but that's how it is. 65578b464eb8Smec */ 65588b464eb8Smec if (pp->p_vnode == NULL) { 65598b464eb8Smec goto skip_relocate; 65608b464eb8Smec } 65618b464eb8Smec 65628b464eb8Smec /* 65638b464eb8Smec * Page was not free, so lets try to relocate it. 65648b464eb8Smec * page_relocate only works with root pages, so if this is not a root 65658b464eb8Smec * page, we need to demote it to try and relocate it. 65668b464eb8Smec * Unfortunately this is the best we can do right now. 65678b464eb8Smec */ 65688b464eb8Smec newpp = NULL; 65698b464eb8Smec if ((pp->p_szc > 0) && (pp != PP_PAGEROOT(pp))) { 65708b464eb8Smec if (page_try_demote_pages(pp) == 0) { 65718b464eb8Smec ret = EAGAIN; 65728b464eb8Smec goto cleanup; 65738b464eb8Smec } 65748b464eb8Smec } 65758b464eb8Smec ret = page_relocate(&pp, &newpp, 1, 0, &count, NULL); 65768b464eb8Smec if (ret == 0) { 65778b464eb8Smec page_t *npp; 65788b464eb8Smec /* unlock the new page(s) */ 65798b464eb8Smec while (count-- > 0) { 65808b464eb8Smec ASSERT(newpp != NULL); 65818b464eb8Smec npp = newpp; 65828b464eb8Smec page_sub(&newpp, npp); 65838b464eb8Smec page_unlock(npp); 65848b464eb8Smec } 65858b464eb8Smec ASSERT(newpp == NULL); 65868b464eb8Smec /* 65878b464eb8Smec * Check to see if the page we have is too large. 65888b464eb8Smec * If so, demote it freeing up the extra pages. 65898b464eb8Smec */ 65908b464eb8Smec if (pp->p_szc > 0) { 65918b464eb8Smec /* For now demote extra pages to szc == 0 */ 65928b464eb8Smec extra = page_get_pagecnt(pp->p_szc) - 1; 65938b464eb8Smec while (extra > 0) { 65948b464eb8Smec tpp = pp->p_next; 65958b464eb8Smec page_sub(&pp, tpp); 65968b464eb8Smec tpp->p_szc = 0; 65978b464eb8Smec page_free(tpp, 1); 65988b464eb8Smec extra--; 65998b464eb8Smec } 66008b464eb8Smec /* Make sure to set our page to szc 0 as well */ 66018b464eb8Smec ASSERT(pp->p_next == pp && pp->p_prev == pp); 66028b464eb8Smec pp->p_szc = 0; 66038b464eb8Smec } 66048b464eb8Smec goto cleanup; 66058b464eb8Smec } else if (ret == EIO) { 66068b464eb8Smec ret = EAGAIN; 66078b464eb8Smec goto cleanup; 66088b464eb8Smec } else { 66098b464eb8Smec /* 66108b464eb8Smec * Need to reset return type as we failed to relocate the page 66118b464eb8Smec * but that does not mean that some of the next steps will not 66128b464eb8Smec * work. 66138b464eb8Smec */ 66148b464eb8Smec ret = 0; 66158b464eb8Smec } 66168b464eb8Smec 66178b464eb8Smec skip_relocate: 66188b464eb8Smec 66198b464eb8Smec if (pp->p_szc > 0) { 66208b464eb8Smec if (page_try_demote_pages(pp) == 0) { 66218b464eb8Smec ret = EAGAIN; 66228b464eb8Smec goto cleanup; 66238b464eb8Smec } 66248b464eb8Smec } 66258b464eb8Smec 66268b464eb8Smec ASSERT(pp->p_szc == 0); 66278b464eb8Smec 66288b464eb8Smec if (hat_ismod(pp)) { 66298b464eb8Smec ret = EAGAIN; 66308b464eb8Smec goto cleanup; 66318b464eb8Smec } 6632ad23a2dbSjohansen if (PP_ISKAS(pp)) { 66338b464eb8Smec ret = EAGAIN; 66348b464eb8Smec goto cleanup; 66358b464eb8Smec } 66368b464eb8Smec if (pp->p_lckcnt || pp->p_cowcnt) { 66378b464eb8Smec ret = EAGAIN; 66388b464eb8Smec goto cleanup; 66398b464eb8Smec } 66408b464eb8Smec 66418b464eb8Smec (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 66428b464eb8Smec ASSERT(!hat_page_is_mapped(pp)); 66438b464eb8Smec 66448b464eb8Smec if (hat_ismod(pp)) { 66458b464eb8Smec /* 66468b464eb8Smec * This is a semi-odd case as the page is now modified but not 66478b464eb8Smec * mapped as we just unloaded the mappings above. 66488b464eb8Smec */ 66498b464eb8Smec ret = EAGAIN; 66508b464eb8Smec goto cleanup; 66518b464eb8Smec } 66528b464eb8Smec if (pp->p_vnode != NULL) { 66538b464eb8Smec page_hashout(pp, NULL); 66548b464eb8Smec } 66558b464eb8Smec 66568b464eb8Smec /* 66578b464eb8Smec * At this point, the page should be in a clean state and 66588b464eb8Smec * we can do whatever we want with it. 66598b464eb8Smec */ 66608b464eb8Smec 66618b464eb8Smec cleanup: 66628b464eb8Smec if (ret != 0) { 66638b464eb8Smec if (!skip_unlock) { 66648b464eb8Smec page_unlock(pp); 66658b464eb8Smec } 66668b464eb8Smec } else { 66678b464eb8Smec ASSERT(pp->p_szc == 0); 66688b464eb8Smec ASSERT(PAGE_EXCL(pp)); 66698b464eb8Smec 66708b464eb8Smec pp->p_next = pp; 66718b464eb8Smec pp->p_prev = pp; 66728b464eb8Smec } 66738b464eb8Smec return (ret); 66748b464eb8Smec } 66758b464eb8Smec 66768b464eb8Smec /* 66778b464eb8Smec * Various callers of page_trycapture() can have different restrictions upon 66788b464eb8Smec * what memory they have access to. 66798b464eb8Smec * Returns 0 on success, with the following error codes on failure: 66808b464eb8Smec * EPERM - The requested page is long term locked, and thus repeated 66818b464eb8Smec * requests to capture this page will likely fail. 66828b464eb8Smec * ENOMEM - There was not enough free memory in the system to safely 66838b464eb8Smec * map the requested page. 66848b464eb8Smec * ENOENT - The requested page was inside the kernel cage, and the 66858b464eb8Smec * PHYSMEM_CAGE flag was not set. 66868b464eb8Smec */ 66878b464eb8Smec int 66888b464eb8Smec page_capture_pre_checks(page_t *pp, uint_t flags) 66898b464eb8Smec { 66908b464eb8Smec ASSERT(pp != NULL); 66918b464eb8Smec 66928b464eb8Smec #if defined(__sparc) 6693af4c679fSSean McEnroe if (pp->p_vnode == &promvp) { 66948b464eb8Smec return (EPERM); 66958b464eb8Smec } 66968b464eb8Smec 6697a98e9dbfSaguzovsk if (PP_ISNORELOC(pp) && !(flags & CAPTURE_GET_CAGE) && 6698a98e9dbfSaguzovsk (flags & CAPTURE_PHYSMEM)) { 66998b464eb8Smec return (ENOENT); 67008b464eb8Smec } 67018b464eb8Smec 67028b464eb8Smec if (PP_ISNORELOCKERNEL(pp)) { 67038b464eb8Smec return (EPERM); 67048b464eb8Smec } 67058b464eb8Smec #else 6706ad23a2dbSjohansen if (PP_ISKAS(pp)) { 67078b464eb8Smec return (EPERM); 67088b464eb8Smec } 67098b464eb8Smec #endif /* __sparc */ 67108b464eb8Smec 6711a98e9dbfSaguzovsk /* only physmem currently has the restrictions checked below */ 6712a98e9dbfSaguzovsk if (!(flags & CAPTURE_PHYSMEM)) { 6713a98e9dbfSaguzovsk return (0); 6714a98e9dbfSaguzovsk } 6715a98e9dbfSaguzovsk 67168b464eb8Smec if (availrmem < swapfs_minfree) { 67178b464eb8Smec /* 67188b464eb8Smec * We won't try to capture this page as we are 67198b464eb8Smec * running low on memory. 67208b464eb8Smec */ 67218b464eb8Smec return (ENOMEM); 67228b464eb8Smec } 67238b464eb8Smec return (0); 67248b464eb8Smec } 67258b464eb8Smec 67268b464eb8Smec /* 67278b464eb8Smec * Once we have a page in our mits, go ahead and complete the capture 67288b464eb8Smec * operation. 67298b464eb8Smec * Returns 1 on failure where page is no longer needed 67308b464eb8Smec * Returns 0 on success 67318b464eb8Smec * Returns -1 if there was a transient failure. 67328b464eb8Smec * Failure cases must release the SE_EXCL lock on pp (usually via page_free). 67338b464eb8Smec */ 67348b464eb8Smec int 67358b464eb8Smec page_capture_take_action(page_t *pp, uint_t flags, void *datap) 67368b464eb8Smec { 67378b464eb8Smec int cb_index; 67388b464eb8Smec int ret = 0; 67398b464eb8Smec page_capture_hash_bucket_t *bp1; 67408b464eb8Smec page_capture_hash_bucket_t *bp2; 67418b464eb8Smec int index; 67428b464eb8Smec int found = 0; 67438b464eb8Smec int i; 67448b464eb8Smec 67458b464eb8Smec ASSERT(PAGE_EXCL(pp)); 67468b464eb8Smec ASSERT(curthread->t_flag & T_CAPTURING); 67478b464eb8Smec 67488b464eb8Smec for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 67498b464eb8Smec if ((flags >> cb_index) & 1) { 67508b464eb8Smec break; 67518b464eb8Smec } 67528b464eb8Smec } 67538b464eb8Smec ASSERT(cb_index < PC_NUM_CALLBACKS); 67548b464eb8Smec 67558b464eb8Smec /* 67568b464eb8Smec * Remove the entry from the page_capture hash, but don't free it yet 67578b464eb8Smec * as we may need to put it back. 67588b464eb8Smec * Since we own the page at this point in time, we should find it 67598b464eb8Smec * in the hash if this is an ASYNC call. If we don't it's likely 67608b464eb8Smec * that the page_capture_async() thread decided that this request 67618b464eb8Smec * had expired, in which case we just continue on. 67628b464eb8Smec */ 67638b464eb8Smec if (flags & CAPTURE_ASYNC) { 67648b464eb8Smec 67658b464eb8Smec index = PAGE_CAPTURE_HASH(pp); 67668b464eb8Smec 67678b464eb8Smec mutex_enter(&page_capture_hash[index].pchh_mutex); 67688b464eb8Smec for (i = 0; i < 2 && !found; i++) { 67698b464eb8Smec bp1 = page_capture_hash[index].lists[i].next; 67708b464eb8Smec while (bp1 != &page_capture_hash[index].lists[i]) { 67718b464eb8Smec if (bp1->pp == pp) { 67728b464eb8Smec bp1->next->prev = bp1->prev; 67738b464eb8Smec bp1->prev->next = bp1->next; 677411494be0SStan Studzinski page_capture_hash[index]. 677511494be0SStan Studzinski num_pages[bp1->pri]--; 67768b464eb8Smec page_clrtoxic(pp, PR_CAPTURE); 67778b464eb8Smec found = 1; 67788b464eb8Smec break; 67798b464eb8Smec } 67808b464eb8Smec bp1 = bp1->next; 67818b464eb8Smec } 67828b464eb8Smec } 67838b464eb8Smec mutex_exit(&page_capture_hash[index].pchh_mutex); 67848b464eb8Smec } 67858b464eb8Smec 67868b464eb8Smec /* Synchronize with the unregister func. */ 67878b464eb8Smec rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 67888b464eb8Smec if (!pc_cb[cb_index].cb_active) { 67898b464eb8Smec page_free(pp, 1); 67908b464eb8Smec rw_exit(&pc_cb[cb_index].cb_rwlock); 67918b464eb8Smec if (found) { 67928b464eb8Smec kmem_free(bp1, sizeof (*bp1)); 67938b464eb8Smec } 67948b464eb8Smec return (1); 67958b464eb8Smec } 67968b464eb8Smec 67978b464eb8Smec /* 67988b464eb8Smec * We need to remove the entry from the page capture hash and turn off 67998b464eb8Smec * the PR_CAPTURE bit before calling the callback. We'll need to cache 68008b464eb8Smec * the entry here, and then based upon the return value, cleanup 68018b464eb8Smec * appropriately or re-add it to the hash, making sure that someone else 68028b464eb8Smec * hasn't already done so. 68038b464eb8Smec * It should be rare for the callback to fail and thus it's ok for 68048b464eb8Smec * the failure path to be a bit complicated as the success path is 68058b464eb8Smec * cleaner and the locking rules are easier to follow. 68068b464eb8Smec */ 68078b464eb8Smec 68088b464eb8Smec ret = pc_cb[cb_index].cb_func(pp, datap, flags); 68098b464eb8Smec 68108b464eb8Smec rw_exit(&pc_cb[cb_index].cb_rwlock); 68118b464eb8Smec 68128b464eb8Smec /* 68138b464eb8Smec * If this was an ASYNC request, we need to cleanup the hash if the 68148b464eb8Smec * callback was successful or if the request was no longer valid. 68158b464eb8Smec * For non-ASYNC requests, we return failure to map and the caller 68168b464eb8Smec * will take care of adding the request to the hash. 68178b464eb8Smec * Note also that the callback itself is responsible for the page 68188b464eb8Smec * at this point in time in terms of locking ... The most common 68198b464eb8Smec * case for the failure path should just be a page_free. 68208b464eb8Smec */ 68218b464eb8Smec if (ret >= 0) { 68228b464eb8Smec if (found) { 6823cee1d74bSjfrank if (bp1->flags & CAPTURE_RETIRE) { 6824704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States page_retire_decr_pend_count(datap); 6825cee1d74bSjfrank } 68268b464eb8Smec kmem_free(bp1, sizeof (*bp1)); 68278b464eb8Smec } 68288b464eb8Smec return (ret); 68298b464eb8Smec } 68308b464eb8Smec if (!found) { 68318b464eb8Smec return (ret); 68328b464eb8Smec } 68338b464eb8Smec 68348b464eb8Smec ASSERT(flags & CAPTURE_ASYNC); 68358b464eb8Smec 68368b464eb8Smec /* 68378b464eb8Smec * Check for expiration time first as we can just free it up if it's 68388b464eb8Smec * expired. 68398b464eb8Smec */ 6840d3d50737SRafael Vanoni if (ddi_get_lbolt() > bp1->expires && bp1->expires != -1) { 68418b464eb8Smec kmem_free(bp1, sizeof (*bp1)); 68428b464eb8Smec return (ret); 68438b464eb8Smec } 68448b464eb8Smec 68458b464eb8Smec /* 68468b464eb8Smec * The callback failed and there used to be an entry in the hash for 68478b464eb8Smec * this page, so we need to add it back to the hash. 68488b464eb8Smec */ 68498b464eb8Smec mutex_enter(&page_capture_hash[index].pchh_mutex); 68508b464eb8Smec if (!(pp->p_toxic & PR_CAPTURE)) { 68518b464eb8Smec /* just add bp1 back to head of walked list */ 68528b464eb8Smec page_settoxic(pp, PR_CAPTURE); 68538b464eb8Smec bp1->next = page_capture_hash[index].lists[1].next; 68548b464eb8Smec bp1->prev = &page_capture_hash[index].lists[1]; 68558b464eb8Smec bp1->next->prev = bp1; 685611494be0SStan Studzinski bp1->pri = PAGE_CAPTURE_PRIO(pp); 68578b464eb8Smec page_capture_hash[index].lists[1].next = bp1; 685811494be0SStan Studzinski page_capture_hash[index].num_pages[bp1->pri]++; 68598b464eb8Smec mutex_exit(&page_capture_hash[index].pchh_mutex); 68608b464eb8Smec return (ret); 68618b464eb8Smec } 68628b464eb8Smec 68638b464eb8Smec /* 68648b464eb8Smec * Otherwise there was a new capture request added to list 68658b464eb8Smec * Need to make sure that our original data is represented if 68668b464eb8Smec * appropriate. 68678b464eb8Smec */ 68688b464eb8Smec for (i = 0; i < 2; i++) { 68698b464eb8Smec bp2 = page_capture_hash[index].lists[i].next; 68708b464eb8Smec while (bp2 != &page_capture_hash[index].lists[i]) { 68718b464eb8Smec if (bp2->pp == pp) { 68728b464eb8Smec if (bp1->flags & CAPTURE_RETIRE) { 68738b464eb8Smec if (!(bp2->flags & CAPTURE_RETIRE)) { 68748b464eb8Smec bp2->szc = bp1->szc; 68758b464eb8Smec bp2->flags = bp1->flags; 68768b464eb8Smec bp2->expires = bp1->expires; 68778b464eb8Smec bp2->datap = bp1->datap; 68788b464eb8Smec } 68798b464eb8Smec } else { 68808b464eb8Smec ASSERT(bp1->flags & CAPTURE_PHYSMEM); 68818b464eb8Smec if (!(bp2->flags & CAPTURE_RETIRE)) { 68828b464eb8Smec bp2->szc = bp1->szc; 68838b464eb8Smec bp2->flags = bp1->flags; 68848b464eb8Smec bp2->expires = bp1->expires; 68858b464eb8Smec bp2->datap = bp1->datap; 68868b464eb8Smec } 68878b464eb8Smec } 688811494be0SStan Studzinski page_capture_hash[index].num_pages[bp2->pri]--; 688911494be0SStan Studzinski bp2->pri = PAGE_CAPTURE_PRIO(pp); 689011494be0SStan Studzinski page_capture_hash[index].num_pages[bp2->pri]++; 68918b464eb8Smec mutex_exit(&page_capture_hash[index]. 68928b464eb8Smec pchh_mutex); 68938b464eb8Smec kmem_free(bp1, sizeof (*bp1)); 68948b464eb8Smec return (ret); 68958b464eb8Smec } 68968b464eb8Smec bp2 = bp2->next; 68978b464eb8Smec } 68988b464eb8Smec } 68998793b36bSNick Todd panic("PR_CAPTURE set but not on hash for pp 0x%p\n", (void *)pp); 69008b464eb8Smec /*NOTREACHED*/ 69018b464eb8Smec } 69028b464eb8Smec 69038b464eb8Smec /* 69048b464eb8Smec * Try to capture the given page for the caller specified in the flags 69058b464eb8Smec * parameter. The page will either be captured and handed over to the 69068b464eb8Smec * appropriate callback, or will be queued up in the page capture hash 69078b464eb8Smec * to be captured asynchronously. 69088b464eb8Smec * If the current request is due to an async capture, the page must be 69098b464eb8Smec * exclusively locked before calling this function. 69108b464eb8Smec * Currently szc must be 0 but in the future this should be expandable to 69118b464eb8Smec * other page sizes. 69128b464eb8Smec * Returns 0 on success, with the following error codes on failure: 69138b464eb8Smec * EPERM - The requested page is long term locked, and thus repeated 69148b464eb8Smec * requests to capture this page will likely fail. 69158b464eb8Smec * ENOMEM - There was not enough free memory in the system to safely 69168b464eb8Smec * map the requested page. 69178b464eb8Smec * ENOENT - The requested page was inside the kernel cage, and the 69188b464eb8Smec * CAPTURE_GET_CAGE flag was not set. 69198b464eb8Smec * EAGAIN - The requested page could not be capturead at this point in 69208b464eb8Smec * time but future requests will likely work. 69218b464eb8Smec * EBUSY - The requested page is retired and the CAPTURE_GET_RETIRED flag 69228b464eb8Smec * was not set. 69238b464eb8Smec */ 69248b464eb8Smec int 69258b464eb8Smec page_itrycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 69268b464eb8Smec { 69278b464eb8Smec int ret; 69288b464eb8Smec int cb_index; 69298b464eb8Smec 69308b464eb8Smec if (flags & CAPTURE_ASYNC) { 69318b464eb8Smec ASSERT(PAGE_EXCL(pp)); 69328b464eb8Smec goto async; 69338b464eb8Smec } 69348b464eb8Smec 69358b464eb8Smec /* Make sure there's enough availrmem ... */ 69368b464eb8Smec ret = page_capture_pre_checks(pp, flags); 69378b464eb8Smec if (ret != 0) { 69388b464eb8Smec return (ret); 69398b464eb8Smec } 69408b464eb8Smec 69418b464eb8Smec if (!page_trylock(pp, SE_EXCL)) { 69428b464eb8Smec for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 69438b464eb8Smec if ((flags >> cb_index) & 1) { 69448b464eb8Smec break; 69458b464eb8Smec } 69468b464eb8Smec } 69478b464eb8Smec ASSERT(cb_index < PC_NUM_CALLBACKS); 69488b464eb8Smec ret = EAGAIN; 69498b464eb8Smec /* Special case for retired pages */ 69508b464eb8Smec if (PP_RETIRED(pp)) { 69518b464eb8Smec if (flags & CAPTURE_GET_RETIRED) { 69528b464eb8Smec if (!page_unretire_pp(pp, PR_UNR_TEMP)) { 69538b464eb8Smec /* 69548b464eb8Smec * Need to set capture bit and add to 69558b464eb8Smec * hash so that the page will be 69568b464eb8Smec * retired when freed. 69578b464eb8Smec */ 69588b464eb8Smec page_capture_add_hash(pp, szc, 69598b464eb8Smec CAPTURE_RETIRE, NULL); 69608b464eb8Smec ret = 0; 69618b464eb8Smec goto own_page; 69628b464eb8Smec } 69638b464eb8Smec } else { 69648b464eb8Smec return (EBUSY); 69658b464eb8Smec } 69668b464eb8Smec } 69678b464eb8Smec page_capture_add_hash(pp, szc, flags, datap); 69688b464eb8Smec return (ret); 69698b464eb8Smec } 69708b464eb8Smec 69718b464eb8Smec async: 69728b464eb8Smec ASSERT(PAGE_EXCL(pp)); 69738b464eb8Smec 69748b464eb8Smec /* Need to check for physmem async requests that availrmem is sane */ 69758b464eb8Smec if ((flags & (CAPTURE_ASYNC | CAPTURE_PHYSMEM)) == 69768b464eb8Smec (CAPTURE_ASYNC | CAPTURE_PHYSMEM) && 69778b464eb8Smec (availrmem < swapfs_minfree)) { 69788b464eb8Smec page_unlock(pp); 69798b464eb8Smec return (ENOMEM); 69808b464eb8Smec } 69818b464eb8Smec 69828b464eb8Smec ret = page_capture_clean_page(pp); 69838b464eb8Smec 69848b464eb8Smec if (ret != 0) { 69858b464eb8Smec /* We failed to get the page, so lets add it to the hash */ 69868b464eb8Smec if (!(flags & CAPTURE_ASYNC)) { 69878b464eb8Smec page_capture_add_hash(pp, szc, flags, datap); 69888b464eb8Smec } 69898b464eb8Smec return (ret); 69908b464eb8Smec } 69918b464eb8Smec 69928b464eb8Smec own_page: 69938b464eb8Smec ASSERT(PAGE_EXCL(pp)); 69948b464eb8Smec ASSERT(pp->p_szc == 0); 69958b464eb8Smec 69968b464eb8Smec /* Call the callback */ 69978b464eb8Smec ret = page_capture_take_action(pp, flags, datap); 69988b464eb8Smec 69998b464eb8Smec if (ret == 0) { 70008b464eb8Smec return (0); 70018b464eb8Smec } 70028b464eb8Smec 70038b464eb8Smec /* 70048b464eb8Smec * Note that in the failure cases from page_capture_take_action, the 70058b464eb8Smec * EXCL lock will have already been dropped. 70068b464eb8Smec */ 70078b464eb8Smec if ((ret == -1) && (!(flags & CAPTURE_ASYNC))) { 70088b464eb8Smec page_capture_add_hash(pp, szc, flags, datap); 70098b464eb8Smec } 70108b464eb8Smec return (EAGAIN); 70118b464eb8Smec } 70128b464eb8Smec 70138b464eb8Smec int 70148b464eb8Smec page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 70158b464eb8Smec { 70168b464eb8Smec int ret; 70178b464eb8Smec 70188b464eb8Smec curthread->t_flag |= T_CAPTURING; 70198b464eb8Smec ret = page_itrycapture(pp, szc, flags, datap); 70208b464eb8Smec curthread->t_flag &= ~T_CAPTURING; /* xor works as we know its set */ 70218b464eb8Smec return (ret); 70228b464eb8Smec } 70238b464eb8Smec 70248b464eb8Smec /* 70258b464eb8Smec * When unlocking a page which has the PR_CAPTURE bit set, this routine 70268b464eb8Smec * gets called to try and capture the page. 70278b464eb8Smec */ 70288b464eb8Smec void 70298b464eb8Smec page_unlock_capture(page_t *pp) 70308b464eb8Smec { 70318b464eb8Smec page_capture_hash_bucket_t *bp; 70328b464eb8Smec int index; 70338b464eb8Smec int i; 70348b464eb8Smec uint_t szc; 70358b464eb8Smec uint_t flags = 0; 70368b464eb8Smec void *datap; 70378b464eb8Smec kmutex_t *mp; 70388b464eb8Smec extern vnode_t retired_pages; 70398b464eb8Smec 70408b464eb8Smec /* 70418b464eb8Smec * We need to protect against a possible deadlock here where we own 70428b464eb8Smec * the vnode page hash mutex and want to acquire it again as there 70438b464eb8Smec * are locations in the code, where we unlock a page while holding 70448b464eb8Smec * the mutex which can lead to the page being captured and eventually 70458b464eb8Smec * end up here. As we may be hashing out the old page and hashing into 70468b464eb8Smec * the retire vnode, we need to make sure we don't own them. 70478b464eb8Smec * Other callbacks who do hash operations also need to make sure that 70488b464eb8Smec * before they hashin to a vnode that they do not currently own the 70498b464eb8Smec * vphm mutex otherwise there will be a panic. 70508b464eb8Smec */ 70518b464eb8Smec if (mutex_owned(page_vnode_mutex(&retired_pages))) { 705222addef7Smec page_unlock_nocapture(pp); 70538b464eb8Smec return; 70548b464eb8Smec } 70558b464eb8Smec if (pp->p_vnode != NULL && mutex_owned(page_vnode_mutex(pp->p_vnode))) { 705622addef7Smec page_unlock_nocapture(pp); 70578b464eb8Smec return; 70588b464eb8Smec } 70598b464eb8Smec 70608b464eb8Smec index = PAGE_CAPTURE_HASH(pp); 70618b464eb8Smec 70628b464eb8Smec mp = &page_capture_hash[index].pchh_mutex; 70638b464eb8Smec mutex_enter(mp); 70648b464eb8Smec for (i = 0; i < 2; i++) { 70658b464eb8Smec bp = page_capture_hash[index].lists[i].next; 70668b464eb8Smec while (bp != &page_capture_hash[index].lists[i]) { 70678b464eb8Smec if (bp->pp == pp) { 70688b464eb8Smec szc = bp->szc; 70698b464eb8Smec flags = bp->flags | CAPTURE_ASYNC; 70708b464eb8Smec datap = bp->datap; 70718b464eb8Smec mutex_exit(mp); 70728b464eb8Smec (void) page_trycapture(pp, szc, flags, datap); 70738b464eb8Smec return; 70748b464eb8Smec } 70758b464eb8Smec bp = bp->next; 70768b464eb8Smec } 70778b464eb8Smec } 70788b464eb8Smec 70798b464eb8Smec /* Failed to find page in hash so clear flags and unlock it. */ 70808b464eb8Smec page_clrtoxic(pp, PR_CAPTURE); 70818b464eb8Smec page_unlock(pp); 70828b464eb8Smec 70838b464eb8Smec mutex_exit(mp); 70848b464eb8Smec } 70858b464eb8Smec 70868b464eb8Smec void 70878b464eb8Smec page_capture_init() 70888b464eb8Smec { 70898b464eb8Smec int i; 70908b464eb8Smec for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 70918b464eb8Smec page_capture_hash[i].lists[0].next = 70928b464eb8Smec &page_capture_hash[i].lists[0]; 70938b464eb8Smec page_capture_hash[i].lists[0].prev = 70948b464eb8Smec &page_capture_hash[i].lists[0]; 70958b464eb8Smec page_capture_hash[i].lists[1].next = 70968b464eb8Smec &page_capture_hash[i].lists[1]; 70978b464eb8Smec page_capture_hash[i].lists[1].prev = 70988b464eb8Smec &page_capture_hash[i].lists[1]; 70998b464eb8Smec } 71008b464eb8Smec 71018b464eb8Smec pc_thread_shortwait = 23 * hz; 71028b464eb8Smec pc_thread_longwait = 1201 * hz; 7103a98e9dbfSaguzovsk pc_thread_retry = 3; 71048b464eb8Smec mutex_init(&pc_thread_mutex, NULL, MUTEX_DEFAULT, NULL); 71058b464eb8Smec cv_init(&pc_cv, NULL, CV_DEFAULT, NULL); 71068b464eb8Smec pc_thread_id = thread_create(NULL, 0, page_capture_thread, NULL, 0, &p0, 71078b464eb8Smec TS_RUN, minclsyspri); 71088b464eb8Smec } 71098b464eb8Smec 71108b464eb8Smec /* 71118b464eb8Smec * It is necessary to scrub any failing pages prior to reboot in order to 71128b464eb8Smec * prevent a latent error trap from occurring on the next boot. 71138b464eb8Smec */ 71148b464eb8Smec void 71158b464eb8Smec page_retire_mdboot() 71168b464eb8Smec { 71178b464eb8Smec page_t *pp; 71188b464eb8Smec int i, j; 71198b464eb8Smec page_capture_hash_bucket_t *bp; 712011494be0SStan Studzinski uchar_t pri; 71218b464eb8Smec 71228b464eb8Smec /* walk lists looking for pages to scrub */ 71238b464eb8Smec for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 712411494be0SStan Studzinski for (pri = 0; pri < PC_NUM_PRI; pri++) { 712511494be0SStan Studzinski if (page_capture_hash[i].num_pages[pri] != 0) { 712611494be0SStan Studzinski break; 712711494be0SStan Studzinski } 712811494be0SStan Studzinski } 712911494be0SStan Studzinski if (pri == PC_NUM_PRI) 71308b464eb8Smec continue; 71318b464eb8Smec 71328b464eb8Smec mutex_enter(&page_capture_hash[i].pchh_mutex); 71338b464eb8Smec 71348b464eb8Smec for (j = 0; j < 2; j++) { 71358b464eb8Smec bp = page_capture_hash[i].lists[j].next; 71368b464eb8Smec while (bp != &page_capture_hash[i].lists[j]) { 71378b464eb8Smec pp = bp->pp; 7138954021b7SJustin Frank if (PP_TOXIC(pp)) { 7139954021b7SJustin Frank if (page_trylock(pp, SE_EXCL)) { 71408b464eb8Smec PP_CLRFREE(pp); 71418b464eb8Smec pagescrub(pp, 0, PAGESIZE); 7142954021b7SJustin Frank page_unlock(pp); 7143954021b7SJustin Frank } 71448b464eb8Smec } 71458b464eb8Smec bp = bp->next; 71468b464eb8Smec } 71478b464eb8Smec } 71488b464eb8Smec mutex_exit(&page_capture_hash[i].pchh_mutex); 71498b464eb8Smec } 71508b464eb8Smec } 71518b464eb8Smec 71528b464eb8Smec /* 71538b464eb8Smec * Walk the page_capture_hash trying to capture pages and also cleanup old 71548b464eb8Smec * entries which have expired. 71558b464eb8Smec */ 71568b464eb8Smec void 71578b464eb8Smec page_capture_async() 71588b464eb8Smec { 71598b464eb8Smec page_t *pp; 71608b464eb8Smec int i; 71618b464eb8Smec int ret; 71628b464eb8Smec page_capture_hash_bucket_t *bp1, *bp2; 71638b464eb8Smec uint_t szc; 71648b464eb8Smec uint_t flags; 71658b464eb8Smec void *datap; 716611494be0SStan Studzinski uchar_t pri; 71678b464eb8Smec 71688b464eb8Smec /* If there are outstanding pages to be captured, get to work */ 71698b464eb8Smec for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 717011494be0SStan Studzinski for (pri = 0; pri < PC_NUM_PRI; pri++) { 717111494be0SStan Studzinski if (page_capture_hash[i].num_pages[pri] != 0) 717211494be0SStan Studzinski break; 717311494be0SStan Studzinski } 717411494be0SStan Studzinski if (pri == PC_NUM_PRI) 71758b464eb8Smec continue; 717611494be0SStan Studzinski 71778b464eb8Smec /* Append list 1 to list 0 and then walk through list 0 */ 71788b464eb8Smec mutex_enter(&page_capture_hash[i].pchh_mutex); 71798b464eb8Smec bp1 = &page_capture_hash[i].lists[1]; 71808b464eb8Smec bp2 = bp1->next; 71818b464eb8Smec if (bp1 != bp2) { 71828b464eb8Smec bp1->prev->next = page_capture_hash[i].lists[0].next; 71838b464eb8Smec bp2->prev = &page_capture_hash[i].lists[0]; 71848b464eb8Smec page_capture_hash[i].lists[0].next->prev = bp1->prev; 71858b464eb8Smec page_capture_hash[i].lists[0].next = bp2; 71868b464eb8Smec bp1->next = bp1; 71878b464eb8Smec bp1->prev = bp1; 71888b464eb8Smec } 71898b464eb8Smec 71908b464eb8Smec /* list[1] will be empty now */ 71918b464eb8Smec 71928b464eb8Smec bp1 = page_capture_hash[i].lists[0].next; 71938b464eb8Smec while (bp1 != &page_capture_hash[i].lists[0]) { 71948b464eb8Smec /* Check expiration time */ 7195d3d50737SRafael Vanoni if ((ddi_get_lbolt() > bp1->expires && 7196d3d50737SRafael Vanoni bp1->expires != -1) || 71978b464eb8Smec page_deleted(bp1->pp)) { 71988b464eb8Smec page_capture_hash[i].lists[0].next = bp1->next; 71998b464eb8Smec bp1->next->prev = 72008b464eb8Smec &page_capture_hash[i].lists[0]; 720111494be0SStan Studzinski page_capture_hash[i].num_pages[bp1->pri]--; 72028b464eb8Smec 72038b464eb8Smec /* 72048b464eb8Smec * We can safely remove the PR_CAPTURE bit 72058b464eb8Smec * without holding the EXCL lock on the page 72068b464eb8Smec * as the PR_CAPTURE bit requres that the 72078b464eb8Smec * page_capture_hash[].pchh_mutex be held 72088b464eb8Smec * to modify it. 72098b464eb8Smec */ 72108b464eb8Smec page_clrtoxic(bp1->pp, PR_CAPTURE); 72118b464eb8Smec mutex_exit(&page_capture_hash[i].pchh_mutex); 72128b464eb8Smec kmem_free(bp1, sizeof (*bp1)); 72138b464eb8Smec mutex_enter(&page_capture_hash[i].pchh_mutex); 72148b464eb8Smec bp1 = page_capture_hash[i].lists[0].next; 72158b464eb8Smec continue; 72168b464eb8Smec } 72178b464eb8Smec pp = bp1->pp; 72188b464eb8Smec szc = bp1->szc; 72198b464eb8Smec flags = bp1->flags; 72208b464eb8Smec datap = bp1->datap; 72218b464eb8Smec mutex_exit(&page_capture_hash[i].pchh_mutex); 72228b464eb8Smec if (page_trylock(pp, SE_EXCL)) { 72238b464eb8Smec ret = page_trycapture(pp, szc, 72248b464eb8Smec flags | CAPTURE_ASYNC, datap); 72258b464eb8Smec } else { 72268b464eb8Smec ret = 1; /* move to walked hash */ 72278b464eb8Smec } 72288b464eb8Smec 72298b464eb8Smec if (ret != 0) { 72308b464eb8Smec /* Move to walked hash */ 72318b464eb8Smec (void) page_capture_move_to_walked(pp); 72328b464eb8Smec } 72338b464eb8Smec mutex_enter(&page_capture_hash[i].pchh_mutex); 72348b464eb8Smec bp1 = page_capture_hash[i].lists[0].next; 72358b464eb8Smec } 72368b464eb8Smec 72378b464eb8Smec mutex_exit(&page_capture_hash[i].pchh_mutex); 72388b464eb8Smec } 72398b464eb8Smec } 72408b464eb8Smec 72418b464eb8Smec /* 7242cee1d74bSjfrank * This function is called by the page_capture_thread, and is needed in 7243cee1d74bSjfrank * in order to initiate aio cleanup, so that pages used in aio 7244cee1d74bSjfrank * will be unlocked and subsequently retired by page_capture_thread. 7245cee1d74bSjfrank */ 7246cee1d74bSjfrank static int 7247cee1d74bSjfrank do_aio_cleanup(void) 7248cee1d74bSjfrank { 7249cee1d74bSjfrank proc_t *procp; 7250cee1d74bSjfrank int (*aio_cleanup_dr_delete_memory)(proc_t *); 7251cee1d74bSjfrank int cleaned = 0; 7252cee1d74bSjfrank 7253cee1d74bSjfrank if (modload("sys", "kaio") == -1) { 7254cee1d74bSjfrank cmn_err(CE_WARN, "do_aio_cleanup: cannot load kaio"); 7255cee1d74bSjfrank return (0); 7256cee1d74bSjfrank } 7257cee1d74bSjfrank /* 7258cee1d74bSjfrank * We use the aio_cleanup_dr_delete_memory function to 7259cee1d74bSjfrank * initiate the actual clean up; this function will wake 7260cee1d74bSjfrank * up the per-process aio_cleanup_thread. 7261cee1d74bSjfrank */ 7262cee1d74bSjfrank aio_cleanup_dr_delete_memory = (int (*)(proc_t *)) 7263cee1d74bSjfrank modgetsymvalue("aio_cleanup_dr_delete_memory", 0); 7264cee1d74bSjfrank if (aio_cleanup_dr_delete_memory == NULL) { 7265cee1d74bSjfrank cmn_err(CE_WARN, 7266cee1d74bSjfrank "aio_cleanup_dr_delete_memory not found in kaio"); 7267cee1d74bSjfrank return (0); 7268cee1d74bSjfrank } 7269cee1d74bSjfrank mutex_enter(&pidlock); 7270cee1d74bSjfrank for (procp = practive; (procp != NULL); procp = procp->p_next) { 7271cee1d74bSjfrank mutex_enter(&procp->p_lock); 7272cee1d74bSjfrank if (procp->p_aio != NULL) { 7273cee1d74bSjfrank /* cleanup proc's outstanding kaio */ 7274cee1d74bSjfrank cleaned += (*aio_cleanup_dr_delete_memory)(procp); 7275cee1d74bSjfrank } 7276cee1d74bSjfrank mutex_exit(&procp->p_lock); 7277cee1d74bSjfrank } 7278cee1d74bSjfrank mutex_exit(&pidlock); 7279cee1d74bSjfrank return (cleaned); 7280cee1d74bSjfrank } 7281cee1d74bSjfrank 7282cee1d74bSjfrank /* 7283cee1d74bSjfrank * helper function for page_capture_thread 7284cee1d74bSjfrank */ 7285cee1d74bSjfrank static void 7286cee1d74bSjfrank page_capture_handle_outstanding(void) 7287cee1d74bSjfrank { 7288cee1d74bSjfrank int ntry; 7289cee1d74bSjfrank 7290704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States /* Reap pages before attempting capture pages */ 7291cee1d74bSjfrank kmem_reap(); 7292704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States 7293704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States if ((page_retire_pend_count() > page_retire_pend_kas_count()) && 7294704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 7295cee1d74bSjfrank /* 7296c2d79585SVijay Balakrishna, SG-RPE * Note: Purging only for platforms that support 7297c2d79585SVijay Balakrishna, SG-RPE * ISM hat_pageunload() - mainly SPARC. On x86/x64 7298c2d79585SVijay Balakrishna, SG-RPE * platforms ISM pages SE_SHARED locked until destroyed. 7299cee1d74bSjfrank */ 7300a98e9dbfSaguzovsk 7301cee1d74bSjfrank /* disable and purge seg_pcache */ 7302cee1d74bSjfrank (void) seg_p_disable(); 7303a98e9dbfSaguzovsk for (ntry = 0; ntry < pc_thread_retry; ntry++) { 7304cee1d74bSjfrank if (!page_retire_pend_count()) 7305cee1d74bSjfrank break; 7306cee1d74bSjfrank if (do_aio_cleanup()) { 7307cee1d74bSjfrank /* 7308cee1d74bSjfrank * allow the apps cleanup threads 7309cee1d74bSjfrank * to run 7310cee1d74bSjfrank */ 7311cee1d74bSjfrank delay(pc_thread_shortwait); 7312cee1d74bSjfrank } 7313cee1d74bSjfrank page_capture_async(); 7314cee1d74bSjfrank } 7315cee1d74bSjfrank /* reenable seg_pcache */ 7316cee1d74bSjfrank seg_p_enable(); 7317704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States 7318704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States /* completed what can be done. break out */ 7319704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States return; 7320cee1d74bSjfrank } 7321704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States 7322704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States /* 7323704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States * For kernel pages and/or unsupported HAT_DYNAMIC_ISM_UNMAP, reap 7324704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States * and then attempt to capture. 7325704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States */ 7326704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States seg_preap(); 7327704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States page_capture_async(); 7328cee1d74bSjfrank } 7329cee1d74bSjfrank 7330cee1d74bSjfrank /* 73318b464eb8Smec * The page_capture_thread loops forever, looking to see if there are 73328b464eb8Smec * pages still waiting to be captured. 73338b464eb8Smec */ 73348b464eb8Smec static void 73358b464eb8Smec page_capture_thread(void) 73368b464eb8Smec { 73378b464eb8Smec callb_cpr_t c; 73388b464eb8Smec int i; 733911494be0SStan Studzinski int high_pri_pages; 734011494be0SStan Studzinski int low_pri_pages; 734111494be0SStan Studzinski clock_t timeout; 73428b464eb8Smec 73438b464eb8Smec CALLB_CPR_INIT(&c, &pc_thread_mutex, callb_generic_cpr, "page_capture"); 73448b464eb8Smec 73458b464eb8Smec mutex_enter(&pc_thread_mutex); 73468b464eb8Smec for (;;) { 734711494be0SStan Studzinski high_pri_pages = 0; 734811494be0SStan Studzinski low_pri_pages = 0; 734911494be0SStan Studzinski for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 735011494be0SStan Studzinski high_pri_pages += 735111494be0SStan Studzinski page_capture_hash[i].num_pages[PC_PRI_HI]; 735211494be0SStan Studzinski low_pri_pages += 735311494be0SStan Studzinski page_capture_hash[i].num_pages[PC_PRI_LO]; 73548b464eb8Smec } 735511494be0SStan Studzinski 735611494be0SStan Studzinski timeout = pc_thread_longwait; 735711494be0SStan Studzinski if (high_pri_pages != 0) { 735811494be0SStan Studzinski timeout = pc_thread_shortwait; 735911494be0SStan Studzinski page_capture_handle_outstanding(); 736011494be0SStan Studzinski } else if (low_pri_pages != 0) { 736111494be0SStan Studzinski page_capture_async(); 736211494be0SStan Studzinski } 736311494be0SStan Studzinski CALLB_CPR_SAFE_BEGIN(&c); 736411494be0SStan Studzinski (void) cv_reltimedwait(&pc_cv, &pc_thread_mutex, 736511494be0SStan Studzinski timeout, TR_CLOCK_TICK); 736611494be0SStan Studzinski CALLB_CPR_SAFE_END(&c, &pc_thread_mutex); 73678b464eb8Smec } 73688b464eb8Smec /*NOTREACHED*/ 73698b464eb8Smec } 737006fb6a36Sdv142724 /* 737106fb6a36Sdv142724 * Attempt to locate a bucket that has enough pages to satisfy the request. 737206fb6a36Sdv142724 * The initial check is done without the lock to avoid unneeded contention. 737306fb6a36Sdv142724 * The function returns 1 if enough pages were found, else 0 if it could not 737406fb6a36Sdv142724 * find enough pages in a bucket. 737506fb6a36Sdv142724 */ 737606fb6a36Sdv142724 static int 737706fb6a36Sdv142724 pcf_decrement_bucket(pgcnt_t npages) 737806fb6a36Sdv142724 { 737906fb6a36Sdv142724 struct pcf *p; 738006fb6a36Sdv142724 struct pcf *q; 738106fb6a36Sdv142724 int i; 738206fb6a36Sdv142724 738306fb6a36Sdv142724 p = &pcf[PCF_INDEX()]; 738406fb6a36Sdv142724 q = &pcf[pcf_fanout]; 738506fb6a36Sdv142724 for (i = 0; i < pcf_fanout; i++) { 738606fb6a36Sdv142724 if (p->pcf_count > npages) { 738706fb6a36Sdv142724 /* 738806fb6a36Sdv142724 * a good one to try. 738906fb6a36Sdv142724 */ 739006fb6a36Sdv142724 mutex_enter(&p->pcf_lock); 739106fb6a36Sdv142724 if (p->pcf_count > npages) { 739206fb6a36Sdv142724 p->pcf_count -= (uint_t)npages; 739306fb6a36Sdv142724 /* 739406fb6a36Sdv142724 * freemem is not protected by any lock. 739506fb6a36Sdv142724 * Thus, we cannot have any assertion 739606fb6a36Sdv142724 * containing freemem here. 739706fb6a36Sdv142724 */ 739806fb6a36Sdv142724 freemem -= npages; 739906fb6a36Sdv142724 mutex_exit(&p->pcf_lock); 740006fb6a36Sdv142724 return (1); 740106fb6a36Sdv142724 } 740206fb6a36Sdv142724 mutex_exit(&p->pcf_lock); 740306fb6a36Sdv142724 } 740406fb6a36Sdv142724 p++; 740506fb6a36Sdv142724 if (p >= q) { 740606fb6a36Sdv142724 p = pcf; 740706fb6a36Sdv142724 } 740806fb6a36Sdv142724 } 740906fb6a36Sdv142724 return (0); 741006fb6a36Sdv142724 } 741106fb6a36Sdv142724 741206fb6a36Sdv142724 /* 741306fb6a36Sdv142724 * Arguments: 741406fb6a36Sdv142724 * pcftotal_ret: If the value is not NULL and we have walked all the 741506fb6a36Sdv142724 * buckets but did not find enough pages then it will 741606fb6a36Sdv142724 * be set to the total number of pages in all the pcf 741706fb6a36Sdv142724 * buckets. 741806fb6a36Sdv142724 * npages: Is the number of pages we have been requested to 741906fb6a36Sdv142724 * find. 742006fb6a36Sdv142724 * unlock: If set to 0 we will leave the buckets locked if the 742106fb6a36Sdv142724 * requested number of pages are not found. 742206fb6a36Sdv142724 * 742306fb6a36Sdv142724 * Go and try to satisfy the page request from any number of buckets. 742406fb6a36Sdv142724 * This can be a very expensive operation as we have to lock the buckets 742506fb6a36Sdv142724 * we are checking (and keep them locked), starting at bucket 0. 742606fb6a36Sdv142724 * 742706fb6a36Sdv142724 * The function returns 1 if enough pages were found, else 0 if it could not 742806fb6a36Sdv142724 * find enough pages in the buckets. 742906fb6a36Sdv142724 * 743006fb6a36Sdv142724 */ 743106fb6a36Sdv142724 static int 743206fb6a36Sdv142724 pcf_decrement_multiple(pgcnt_t *pcftotal_ret, pgcnt_t npages, int unlock) 743306fb6a36Sdv142724 { 743406fb6a36Sdv142724 struct pcf *p; 743506fb6a36Sdv142724 pgcnt_t pcftotal; 743606fb6a36Sdv142724 int i; 743706fb6a36Sdv142724 743806fb6a36Sdv142724 p = pcf; 743906fb6a36Sdv142724 /* try to collect pages from several pcf bins */ 744006fb6a36Sdv142724 for (pcftotal = 0, i = 0; i < pcf_fanout; i++) { 744106fb6a36Sdv142724 mutex_enter(&p->pcf_lock); 744206fb6a36Sdv142724 pcftotal += p->pcf_count; 744306fb6a36Sdv142724 if (pcftotal >= npages) { 744406fb6a36Sdv142724 /* 744506fb6a36Sdv142724 * Wow! There are enough pages laying around 744606fb6a36Sdv142724 * to satisfy the request. Do the accounting, 744706fb6a36Sdv142724 * drop the locks we acquired, and go back. 744806fb6a36Sdv142724 * 744906fb6a36Sdv142724 * freemem is not protected by any lock. So, 745006fb6a36Sdv142724 * we cannot have any assertion containing 745106fb6a36Sdv142724 * freemem. 745206fb6a36Sdv142724 */ 745306fb6a36Sdv142724 freemem -= npages; 745406fb6a36Sdv142724 while (p >= pcf) { 745506fb6a36Sdv142724 if (p->pcf_count <= npages) { 745606fb6a36Sdv142724 npages -= p->pcf_count; 745706fb6a36Sdv142724 p->pcf_count = 0; 745806fb6a36Sdv142724 } else { 745906fb6a36Sdv142724 p->pcf_count -= (uint_t)npages; 746006fb6a36Sdv142724 npages = 0; 746106fb6a36Sdv142724 } 746206fb6a36Sdv142724 mutex_exit(&p->pcf_lock); 746306fb6a36Sdv142724 p--; 746406fb6a36Sdv142724 } 746506fb6a36Sdv142724 ASSERT(npages == 0); 746606fb6a36Sdv142724 return (1); 746706fb6a36Sdv142724 } 746806fb6a36Sdv142724 p++; 746906fb6a36Sdv142724 } 747006fb6a36Sdv142724 if (unlock) { 747106fb6a36Sdv142724 /* failed to collect pages - release the locks */ 747206fb6a36Sdv142724 while (--p >= pcf) { 747306fb6a36Sdv142724 mutex_exit(&p->pcf_lock); 747406fb6a36Sdv142724 } 747506fb6a36Sdv142724 } 747606fb6a36Sdv142724 if (pcftotal_ret != NULL) 747706fb6a36Sdv142724 *pcftotal_ret = pcftotal; 747806fb6a36Sdv142724 return (0); 747906fb6a36Sdv142724 } 7480