1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24 /*
25 * Copyright (c) 2010, Intel Corporation.
26 * All rights reserved.
27 */
28
29 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
30 /* All Rights Reserved */
31
32 /*
33 * Portions of this source code were derived from Berkeley 4.3 BSD
34 * under license from the Regents of the University of California.
35 */
36
37 /*
38 * UNIX machine dependent virtual memory support.
39 */
40
41 #include <sys/types.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/user.h>
45 #include <sys/proc.h>
46 #include <sys/kmem.h>
47 #include <sys/vmem.h>
48 #include <sys/buf.h>
49 #include <sys/cpuvar.h>
50 #include <sys/lgrp.h>
51 #include <sys/disp.h>
52 #include <sys/vm.h>
53 #include <sys/mman.h>
54 #include <sys/vnode.h>
55 #include <sys/cred.h>
56 #include <sys/exec.h>
57 #include <sys/exechdr.h>
58 #include <sys/debug.h>
59 #include <sys/vmsystm.h>
60 #include <sys/swap.h>
61 #include <sys/dumphdr.h>
62
63 #include <vm/hat.h>
64 #include <vm/as.h>
65 #include <vm/seg.h>
66 #include <vm/seg_kp.h>
67 #include <vm/seg_vn.h>
68 #include <vm/page.h>
69 #include <vm/seg_kmem.h>
70 #include <vm/seg_kpm.h>
71 #include <vm/vm_dep.h>
72
73 #include <sys/cpu.h>
74 #include <sys/vm_machparam.h>
75 #include <sys/memlist.h>
76 #include <sys/bootconf.h> /* XXX the memlist stuff belongs in memlist_plat.h */
77 #include <vm/hat_i86.h>
78 #include <sys/x86_archext.h>
79 #include <sys/elf_386.h>
80 #include <sys/cmn_err.h>
81 #include <sys/archsystm.h>
82 #include <sys/machsystm.h>
83
84 #include <sys/vtrace.h>
85 #include <sys/ddidmareq.h>
86 #include <sys/promif.h>
87 #include <sys/memnode.h>
88 #include <sys/stack.h>
89 #include <util/qsort.h>
90 #include <sys/taskq.h>
91
92 #ifdef __xpv
93
94 #include <sys/hypervisor.h>
95 #include <sys/xen_mmu.h>
96 #include <sys/balloon_impl.h>
97
98 /*
99 * domain 0 pages usable for DMA are kept pre-allocated and kept in
100 * distinct lists, ordered by increasing mfn.
101 */
102 static kmutex_t io_pool_lock;
103 static kmutex_t contig_list_lock;
104 static page_t *io_pool_4g; /* pool for 32 bit dma limited devices */
105 static page_t *io_pool_16m; /* pool for 24 bit dma limited legacy devices */
106 static long io_pool_cnt;
107 static long io_pool_cnt_max = 0;
108 #define DEFAULT_IO_POOL_MIN 128
109 static long io_pool_cnt_min = DEFAULT_IO_POOL_MIN;
110 static long io_pool_cnt_lowater = 0;
111 static long io_pool_shrink_attempts; /* how many times did we try to shrink */
112 static long io_pool_shrinks; /* how many times did we really shrink */
113 static long io_pool_grows; /* how many times did we grow */
114 static mfn_t start_mfn = 1;
115 static caddr_t io_pool_kva; /* use to alloc pages when needed */
116
117 static int create_contig_pfnlist(uint_t);
118
119 /*
120 * percentage of phys mem to hold in the i/o pool
121 */
122 #define DEFAULT_IO_POOL_PCT 2
123 static long io_pool_physmem_pct = DEFAULT_IO_POOL_PCT;
124 static void page_io_pool_sub(page_t **, page_t *, page_t *);
125 int ioalloc_dbg = 0;
126
127 #endif /* __xpv */
128
129 uint_t vac_colors = 1;
130
131 int largepagesupport = 0;
132 extern uint_t page_create_new;
133 extern uint_t page_create_exists;
134 extern uint_t page_create_putbacks;
135 /*
136 * Allow users to disable the kernel's use of SSE.
137 */
138 extern int use_sse_pagecopy, use_sse_pagezero;
139
140 /*
141 * combined memory ranges from mnode and memranges[] to manage single
142 * mnode/mtype dimension in the page lists.
143 */
144 typedef struct {
145 pfn_t mnr_pfnlo;
146 pfn_t mnr_pfnhi;
147 int mnr_mnode;
148 int mnr_memrange; /* index into memranges[] */
149 int mnr_next; /* next lower PA mnoderange */
150 int mnr_exists;
151 /* maintain page list stats */
152 pgcnt_t mnr_mt_clpgcnt; /* cache list cnt */
153 pgcnt_t mnr_mt_flpgcnt[MMU_PAGE_SIZES]; /* free list cnt per szc */
154 pgcnt_t mnr_mt_totcnt; /* sum of cache and free lists */
155 #ifdef DEBUG
156 struct mnr_mts { /* mnode/mtype szc stats */
157 pgcnt_t mnr_mts_pgcnt;
158 int mnr_mts_colors;
159 pgcnt_t *mnr_mtsc_pgcnt;
160 } *mnr_mts;
161 #endif
162 } mnoderange_t;
163
164 #define MEMRANGEHI(mtype) \
165 ((mtype > 0) ? memranges[mtype - 1] - 1: physmax)
166 #define MEMRANGELO(mtype) (memranges[mtype])
167
168 #define MTYPE_FREEMEM(mt) (mnoderanges[mt].mnr_mt_totcnt)
169
170 /*
171 * As the PC architecture evolved memory up was clumped into several
172 * ranges for various historical I/O devices to do DMA.
173 * < 16Meg - ISA bus
174 * < 2Gig - ???
175 * < 4Gig - PCI bus or drivers that don't understand PAE mode
176 *
177 * These are listed in reverse order, so that we can skip over unused
178 * ranges on machines with small memories.
179 *
180 * For now under the Hypervisor, we'll only ever have one memrange.
181 */
182 #define PFN_4GIG 0x100000
183 #define PFN_16MEG 0x1000
184 /* Indices into the memory range (arch_memranges) array. */
185 #define MRI_4G 0
186 #define MRI_2G 1
187 #define MRI_16M 2
188 #define MRI_0 3
189 static pfn_t arch_memranges[NUM_MEM_RANGES] = {
190 PFN_4GIG, /* pfn range for 4G and above */
191 0x80000, /* pfn range for 2G-4G */
192 PFN_16MEG, /* pfn range for 16M-2G */
193 0x00000, /* pfn range for 0-16M */
194 };
195 pfn_t *memranges = &arch_memranges[0];
196 int nranges = NUM_MEM_RANGES;
197
198 /*
199 * This combines mem_node_config and memranges into one data
200 * structure to be used for page list management.
201 */
202 mnoderange_t *mnoderanges;
203 int mnoderangecnt;
204 int mtype4g;
205 int mtype16m;
206 int mtypetop; /* index of highest pfn'ed mnoderange */
207
208 /*
209 * 4g memory management variables for systems with more than 4g of memory:
210 *
211 * physical memory below 4g is required for 32bit dma devices and, currently,
212 * for kmem memory. On systems with more than 4g of memory, the pool of memory
213 * below 4g can be depleted without any paging activity given that there is
214 * likely to be sufficient memory above 4g.
215 *
216 * physmax4g is set true if the largest pfn is over 4g. The rest of the
217 * 4g memory management code is enabled only when physmax4g is true.
218 *
219 * maxmem4g is the count of the maximum number of pages on the page lists
220 * with physical addresses below 4g. It can be a lot less then 4g given that
221 * BIOS may reserve large chunks of space below 4g for hot plug pci devices,
222 * agp aperture etc.
223 *
224 * freemem4g maintains the count of the number of available pages on the
225 * page lists with physical addresses below 4g.
226 *
227 * DESFREE4G specifies the desired amount of below 4g memory. It defaults to
228 * 6% (desfree4gshift = 4) of maxmem4g.
229 *
230 * RESTRICT4G_ALLOC returns true if freemem4g falls below DESFREE4G
231 * and the amount of physical memory above 4g is greater than freemem4g.
232 * In this case, page_get_* routines will restrict below 4g allocations
233 * for requests that don't specifically require it.
234 */
235
236 #define DESFREE4G (maxmem4g >> desfree4gshift)
237
238 #define RESTRICT4G_ALLOC \
239 (physmax4g && (freemem4g < DESFREE4G) && ((freemem4g << 1) < freemem))
240
241 static pgcnt_t maxmem4g;
242 static pgcnt_t freemem4g;
243 static int physmax4g;
244 static int desfree4gshift = 4; /* maxmem4g shift to derive DESFREE4G */
245
246 /*
247 * 16m memory management:
248 *
249 * reserve some amount of physical memory below 16m for legacy devices.
250 *
251 * RESTRICT16M_ALLOC returns true if an there are sufficient free pages above
252 * 16m or if the 16m pool drops below DESFREE16M.
253 *
254 * In this case, general page allocations via page_get_{free,cache}list
255 * routines will be restricted from allocating from the 16m pool. Allocations
256 * that require specific pfn ranges (page_get_anylist) and PG_PANIC allocations
257 * are not restricted.
258 */
259
260 #define FREEMEM16M MTYPE_FREEMEM(mtype16m)
261 #define DESFREE16M desfree16m
262 #define RESTRICT16M_ALLOC(freemem, pgcnt, flags) \
263 ((freemem != 0) && ((flags & PG_PANIC) == 0) && \
264 ((freemem >= (FREEMEM16M)) || \
265 (FREEMEM16M < (DESFREE16M + pgcnt))))
266
267 static pgcnt_t desfree16m = 0x380;
268
269 /*
270 * This can be patched via /etc/system to allow old non-PAE aware device
271 * drivers to use kmem_alloc'd memory on 32 bit systems with > 4Gig RAM.
272 */
273 int restricted_kmemalloc = 0;
274
275 #ifdef VM_STATS
276 struct {
277 ulong_t pga_alloc;
278 ulong_t pga_notfullrange;
279 ulong_t pga_nulldmaattr;
280 ulong_t pga_allocok;
281 ulong_t pga_allocfailed;
282 ulong_t pgma_alloc;
283 ulong_t pgma_allocok;
284 ulong_t pgma_allocfailed;
285 ulong_t pgma_allocempty;
286 } pga_vmstats;
287 #endif
288
289 uint_t mmu_page_sizes;
290
291 /* How many page sizes the users can see */
292 uint_t mmu_exported_page_sizes;
293
294 /* page sizes that legacy applications can see */
295 uint_t mmu_legacy_page_sizes;
296
297 /*
298 * Number of pages in 1 GB. Don't enable automatic large pages if we have
299 * fewer than this many pages.
300 */
301 pgcnt_t shm_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT);
302 pgcnt_t privm_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT);
303
304 /*
305 * Maximum and default segment size tunables for user private
306 * and shared anon memory, and user text and initialized data.
307 * These can be patched via /etc/system to allow large pages
308 * to be used for mapping application private and shared anon memory.
309 */
310 size_t mcntl0_lpsize = MMU_PAGESIZE;
311 size_t max_uheap_lpsize = MMU_PAGESIZE;
312 size_t default_uheap_lpsize = MMU_PAGESIZE;
313 size_t max_ustack_lpsize = MMU_PAGESIZE;
314 size_t default_ustack_lpsize = MMU_PAGESIZE;
315 size_t max_privmap_lpsize = MMU_PAGESIZE;
316 size_t max_uidata_lpsize = MMU_PAGESIZE;
317 size_t max_utext_lpsize = MMU_PAGESIZE;
318 size_t max_shm_lpsize = MMU_PAGESIZE;
319
320
321 /*
322 * initialized by page_coloring_init().
323 */
324 uint_t page_colors;
325 uint_t page_colors_mask;
326 uint_t page_coloring_shift;
327 int cpu_page_colors;
328 static uint_t l2_colors;
329
330 /*
331 * Page freelists and cachelists are dynamically allocated once mnoderangecnt
332 * and page_colors are calculated from the l2 cache n-way set size. Within a
333 * mnode range, the page freelist and cachelist are hashed into bins based on
334 * color. This makes it easier to search for a page within a specific memory
335 * range.
336 */
337 #define PAGE_COLORS_MIN 16
338
339 page_t ****page_freelists;
340 page_t ***page_cachelists;
341
342
343 /*
344 * Used by page layer to know about page sizes
345 */
346 hw_pagesize_t hw_page_array[MAX_NUM_LEVEL + 1];
347
348 kmutex_t *fpc_mutex[NPC_MUTEX];
349 kmutex_t *cpc_mutex[NPC_MUTEX];
350
351 /* Lock to protect mnoderanges array for memory DR operations. */
352 static kmutex_t mnoderange_lock;
353
354 /*
355 * Only let one thread at a time try to coalesce large pages, to
356 * prevent them from working against each other.
357 */
358 static kmutex_t contig_lock;
359 #define CONTIG_LOCK() mutex_enter(&contig_lock);
360 #define CONTIG_UNLOCK() mutex_exit(&contig_lock);
361
362 #define PFN_16M (mmu_btop((uint64_t)0x1000000))
363
364 /*
365 * Return the optimum page size for a given mapping
366 */
367 /*ARGSUSED*/
368 size_t
map_pgsz(int maptype,struct proc * p,caddr_t addr,size_t len,int memcntl)369 map_pgsz(int maptype, struct proc *p, caddr_t addr, size_t len, int memcntl)
370 {
371 level_t l = 0;
372 size_t pgsz = MMU_PAGESIZE;
373 size_t max_lpsize;
374 uint_t mszc;
375
376 ASSERT(maptype != MAPPGSZ_VA);
377
378 if (maptype != MAPPGSZ_ISM && physmem < privm_lpg_min_physmem) {
379 return (MMU_PAGESIZE);
380 }
381
382 switch (maptype) {
383 case MAPPGSZ_HEAP:
384 case MAPPGSZ_STK:
385 max_lpsize = memcntl ? mcntl0_lpsize : (maptype ==
386 MAPPGSZ_HEAP ? max_uheap_lpsize : max_ustack_lpsize);
387 if (max_lpsize == MMU_PAGESIZE) {
388 return (MMU_PAGESIZE);
389 }
390 if (len == 0) {
391 len = (maptype == MAPPGSZ_HEAP) ? p->p_brkbase +
392 p->p_brksize - p->p_bssbase : p->p_stksize;
393 }
394 len = (maptype == MAPPGSZ_HEAP) ? MAX(len,
395 default_uheap_lpsize) : MAX(len, default_ustack_lpsize);
396
397 /*
398 * use the pages size that best fits len
399 */
400 for (l = mmu.umax_page_level; l > 0; --l) {
401 if (LEVEL_SIZE(l) > max_lpsize || len < LEVEL_SIZE(l)) {
402 continue;
403 } else {
404 pgsz = LEVEL_SIZE(l);
405 }
406 break;
407 }
408
409 mszc = (maptype == MAPPGSZ_HEAP ? p->p_brkpageszc :
410 p->p_stkpageszc);
411 if (addr == 0 && (pgsz < hw_page_array[mszc].hp_size)) {
412 pgsz = hw_page_array[mszc].hp_size;
413 }
414 return (pgsz);
415
416 case MAPPGSZ_ISM:
417 for (l = mmu.umax_page_level; l > 0; --l) {
418 if (len >= LEVEL_SIZE(l))
419 return (LEVEL_SIZE(l));
420 }
421 return (LEVEL_SIZE(0));
422 }
423 return (pgsz);
424 }
425
426 static uint_t
map_szcvec(caddr_t addr,size_t size,uintptr_t off,size_t max_lpsize,size_t min_physmem)427 map_szcvec(caddr_t addr, size_t size, uintptr_t off, size_t max_lpsize,
428 size_t min_physmem)
429 {
430 caddr_t eaddr = addr + size;
431 uint_t szcvec = 0;
432 caddr_t raddr;
433 caddr_t readdr;
434 size_t pgsz;
435 int i;
436
437 if (physmem < min_physmem || max_lpsize <= MMU_PAGESIZE) {
438 return (0);
439 }
440
441 for (i = mmu_exported_page_sizes - 1; i > 0; i--) {
442 pgsz = page_get_pagesize(i);
443 if (pgsz > max_lpsize) {
444 continue;
445 }
446 raddr = (caddr_t)P2ROUNDUP((uintptr_t)addr, pgsz);
447 readdr = (caddr_t)P2ALIGN((uintptr_t)eaddr, pgsz);
448 if (raddr < addr || raddr >= readdr) {
449 continue;
450 }
451 if (P2PHASE((uintptr_t)addr ^ off, pgsz)) {
452 continue;
453 }
454 /*
455 * Set szcvec to the remaining page sizes.
456 */
457 szcvec = ((1 << (i + 1)) - 1) & ~1;
458 break;
459 }
460 return (szcvec);
461 }
462
463 /*
464 * Return a bit vector of large page size codes that
465 * can be used to map [addr, addr + len) region.
466 */
467 /*ARGSUSED*/
468 uint_t
map_pgszcvec(caddr_t addr,size_t size,uintptr_t off,int flags,int type,int memcntl)469 map_pgszcvec(caddr_t addr, size_t size, uintptr_t off, int flags, int type,
470 int memcntl)
471 {
472 size_t max_lpsize = mcntl0_lpsize;
473
474 if (mmu.max_page_level == 0)
475 return (0);
476
477 if (flags & MAP_TEXT) {
478 if (!memcntl)
479 max_lpsize = max_utext_lpsize;
480 return (map_szcvec(addr, size, off, max_lpsize,
481 shm_lpg_min_physmem));
482
483 } else if (flags & MAP_INITDATA) {
484 if (!memcntl)
485 max_lpsize = max_uidata_lpsize;
486 return (map_szcvec(addr, size, off, max_lpsize,
487 privm_lpg_min_physmem));
488
489 } else if (type == MAPPGSZC_SHM) {
490 if (!memcntl)
491 max_lpsize = max_shm_lpsize;
492 return (map_szcvec(addr, size, off, max_lpsize,
493 shm_lpg_min_physmem));
494
495 } else if (type == MAPPGSZC_HEAP) {
496 if (!memcntl)
497 max_lpsize = max_uheap_lpsize;
498 return (map_szcvec(addr, size, off, max_lpsize,
499 privm_lpg_min_physmem));
500
501 } else if (type == MAPPGSZC_STACK) {
502 if (!memcntl)
503 max_lpsize = max_ustack_lpsize;
504 return (map_szcvec(addr, size, off, max_lpsize,
505 privm_lpg_min_physmem));
506
507 } else {
508 if (!memcntl)
509 max_lpsize = max_privmap_lpsize;
510 return (map_szcvec(addr, size, off, max_lpsize,
511 privm_lpg_min_physmem));
512 }
513 }
514
515 /*
516 * Handle a pagefault.
517 */
518 faultcode_t
pagefault(caddr_t addr,enum fault_type type,enum seg_rw rw,int iskernel)519 pagefault(
520 caddr_t addr,
521 enum fault_type type,
522 enum seg_rw rw,
523 int iskernel)
524 {
525 struct as *as;
526 struct hat *hat;
527 struct proc *p;
528 kthread_t *t;
529 faultcode_t res;
530 caddr_t base;
531 size_t len;
532 int err;
533 int mapped_red;
534 uintptr_t ea;
535
536 ASSERT_STACK_ALIGNED();
537
538 if (INVALID_VADDR(addr))
539 return (FC_NOMAP);
540
541 mapped_red = segkp_map_red();
542
543 if (iskernel) {
544 as = &kas;
545 hat = as->a_hat;
546 } else {
547 t = curthread;
548 p = ttoproc(t);
549 as = p->p_as;
550 hat = as->a_hat;
551 }
552
553 /*
554 * Dispatch pagefault.
555 */
556 res = as_fault(hat, as, addr, 1, type, rw);
557
558 /*
559 * If this isn't a potential unmapped hole in the user's
560 * UNIX data or stack segments, just return status info.
561 */
562 if (res != FC_NOMAP || iskernel)
563 goto out;
564
565 /*
566 * Check to see if we happened to faulted on a currently unmapped
567 * part of the UNIX data or stack segments. If so, create a zfod
568 * mapping there and then try calling the fault routine again.
569 */
570 base = p->p_brkbase;
571 len = p->p_brksize;
572
573 if (addr < base || addr >= base + len) { /* data seg? */
574 base = (caddr_t)p->p_usrstack - p->p_stksize;
575 len = p->p_stksize;
576 if (addr < base || addr >= p->p_usrstack) { /* stack seg? */
577 /* not in either UNIX data or stack segments */
578 res = FC_NOMAP;
579 goto out;
580 }
581 }
582
583 /*
584 * the rest of this function implements a 3.X 4.X 5.X compatibility
585 * This code is probably not needed anymore
586 */
587 if (p->p_model == DATAMODEL_ILP32) {
588
589 /* expand the gap to the page boundaries on each side */
590 ea = P2ROUNDUP((uintptr_t)base + len, MMU_PAGESIZE);
591 base = (caddr_t)P2ALIGN((uintptr_t)base, MMU_PAGESIZE);
592 len = ea - (uintptr_t)base;
593
594 as_rangelock(as);
595 if (as_gap(as, MMU_PAGESIZE, &base, &len, AH_CONTAIN, addr) ==
596 0) {
597 err = as_map(as, base, len, segvn_create, zfod_argsp);
598 as_rangeunlock(as);
599 if (err) {
600 res = FC_MAKE_ERR(err);
601 goto out;
602 }
603 } else {
604 /*
605 * This page is already mapped by another thread after
606 * we returned from as_fault() above. We just fall
607 * through as_fault() below.
608 */
609 as_rangeunlock(as);
610 }
611
612 res = as_fault(hat, as, addr, 1, F_INVAL, rw);
613 }
614
615 out:
616 if (mapped_red)
617 segkp_unmap_red();
618
619 return (res);
620 }
621
622 void
map_addr(caddr_t * addrp,size_t len,offset_t off,int vacalign,uint_t flags)623 map_addr(caddr_t *addrp, size_t len, offset_t off, int vacalign, uint_t flags)
624 {
625 struct proc *p = curproc;
626 caddr_t userlimit = (flags & _MAP_LOW32) ?
627 (caddr_t)_userlimit32 : p->p_as->a_userlimit;
628
629 map_addr_proc(addrp, len, off, vacalign, userlimit, curproc, flags);
630 }
631
632 /*ARGSUSED*/
633 int
map_addr_vacalign_check(caddr_t addr,u_offset_t off)634 map_addr_vacalign_check(caddr_t addr, u_offset_t off)
635 {
636 return (0);
637 }
638
639 /*
640 * map_addr_proc() is the routine called when the system is to
641 * choose an address for the user. We will pick an address
642 * range which is the highest available below userlimit.
643 *
644 * Every mapping will have a redzone of a single page on either side of
645 * the request. This is done to leave one page unmapped between segments.
646 * This is not required, but it's useful for the user because if their
647 * program strays across a segment boundary, it will catch a fault
648 * immediately making debugging a little easier. Currently the redzone
649 * is mandatory.
650 *
651 * addrp is a value/result parameter.
652 * On input it is a hint from the user to be used in a completely
653 * machine dependent fashion. We decide to completely ignore this hint.
654 * If MAP_ALIGN was specified, addrp contains the minimal alignment, which
655 * must be some "power of two" multiple of pagesize.
656 *
657 * On output it is NULL if no address can be found in the current
658 * processes address space or else an address that is currently
659 * not mapped for len bytes with a page of red zone on either side.
660 *
661 * vacalign is not needed on x86 (it's for viturally addressed caches)
662 */
663 /*ARGSUSED*/
664 void
map_addr_proc(caddr_t * addrp,size_t len,offset_t off,int vacalign,caddr_t userlimit,struct proc * p,uint_t flags)665 map_addr_proc(
666 caddr_t *addrp,
667 size_t len,
668 offset_t off,
669 int vacalign,
670 caddr_t userlimit,
671 struct proc *p,
672 uint_t flags)
673 {
674 struct as *as = p->p_as;
675 caddr_t addr;
676 caddr_t base;
677 size_t slen;
678 size_t align_amount;
679
680 ASSERT32(userlimit == as->a_userlimit);
681
682 base = p->p_brkbase;
683 #if defined(__amd64)
684 /*
685 * XX64 Yes, this needs more work.
686 */
687 if (p->p_model == DATAMODEL_NATIVE) {
688 if (userlimit < as->a_userlimit) {
689 /*
690 * This happens when a program wants to map
691 * something in a range that's accessible to a
692 * program in a smaller address space. For example,
693 * a 64-bit program calling mmap32(2) to guarantee
694 * that the returned address is below 4Gbytes.
695 */
696 ASSERT((uintptr_t)userlimit < ADDRESS_C(0xffffffff));
697
698 if (userlimit > base)
699 slen = userlimit - base;
700 else {
701 *addrp = NULL;
702 return;
703 }
704 } else {
705 /*
706 * XX64 This layout is probably wrong .. but in
707 * the event we make the amd64 address space look
708 * like sparcv9 i.e. with the stack -above- the
709 * heap, this bit of code might even be correct.
710 */
711 slen = p->p_usrstack - base -
712 ((p->p_stk_ctl + PAGEOFFSET) & PAGEMASK);
713 }
714 } else
715 #endif
716 slen = userlimit - base;
717
718 /* Make len be a multiple of PAGESIZE */
719 len = (len + PAGEOFFSET) & PAGEMASK;
720
721 /*
722 * figure out what the alignment should be
723 *
724 * XX64 -- is there an ELF_AMD64_MAXPGSZ or is it the same????
725 */
726 if (len <= ELF_386_MAXPGSZ) {
727 /*
728 * Align virtual addresses to ensure that ELF shared libraries
729 * are mapped with the appropriate alignment constraints by
730 * the run-time linker.
731 */
732 align_amount = ELF_386_MAXPGSZ;
733 } else {
734 /*
735 * For 32-bit processes, only those which have specified
736 * MAP_ALIGN and an addr will be aligned on a larger page size.
737 * Not doing so can potentially waste up to 1G of process
738 * address space.
739 */
740 int lvl = (p->p_model == DATAMODEL_ILP32) ? 1 :
741 mmu.umax_page_level;
742
743 while (lvl && len < LEVEL_SIZE(lvl))
744 --lvl;
745
746 align_amount = LEVEL_SIZE(lvl);
747 }
748 if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount))
749 align_amount = (uintptr_t)*addrp;
750
751 ASSERT(ISP2(align_amount));
752 ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
753
754 off = off & (align_amount - 1);
755 /*
756 * Look for a large enough hole starting below userlimit.
757 * After finding it, use the upper part.
758 */
759 if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
760 PAGESIZE, off) == 0) {
761 caddr_t as_addr;
762
763 /*
764 * addr is the highest possible address to use since we have
765 * a PAGESIZE redzone at the beginning and end.
766 */
767 addr = base + slen - (PAGESIZE + len);
768 as_addr = addr;
769 /*
770 * Round address DOWN to the alignment amount and
771 * add the offset in.
772 * If addr is greater than as_addr, len would not be large
773 * enough to include the redzone, so we must adjust down
774 * by the alignment amount.
775 */
776 addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1)));
777 addr += (uintptr_t)off;
778 if (addr > as_addr) {
779 addr -= align_amount;
780 }
781
782 ASSERT(addr > base);
783 ASSERT(addr + len < base + slen);
784 ASSERT(((uintptr_t)addr & (align_amount - 1)) ==
785 ((uintptr_t)(off)));
786 *addrp = addr;
787 } else {
788 *addrp = NULL; /* no more virtual space */
789 }
790 }
791
792 int valid_va_range_aligned_wraparound;
793
794 /*
795 * Determine whether [*basep, *basep + *lenp) contains a mappable range of
796 * addresses at least "minlen" long, where the base of the range is at "off"
797 * phase from an "align" boundary and there is space for a "redzone"-sized
798 * redzone on either side of the range. On success, 1 is returned and *basep
799 * and *lenp are adjusted to describe the acceptable range (including
800 * the redzone). On failure, 0 is returned.
801 */
802 /*ARGSUSED3*/
803 int
valid_va_range_aligned(caddr_t * basep,size_t * lenp,size_t minlen,int dir,size_t align,size_t redzone,size_t off)804 valid_va_range_aligned(caddr_t *basep, size_t *lenp, size_t minlen, int dir,
805 size_t align, size_t redzone, size_t off)
806 {
807 uintptr_t hi, lo;
808 size_t tot_len;
809
810 ASSERT(align == 0 ? off == 0 : off < align);
811 ASSERT(ISP2(align));
812 ASSERT(align == 0 || align >= PAGESIZE);
813
814 lo = (uintptr_t)*basep;
815 hi = lo + *lenp;
816 tot_len = minlen + 2 * redzone; /* need at least this much space */
817
818 /*
819 * If hi rolled over the top, try cutting back.
820 */
821 if (hi < lo) {
822 *lenp = 0UL - lo - 1UL;
823 /* See if this really happens. If so, then we figure out why */
824 valid_va_range_aligned_wraparound++;
825 hi = lo + *lenp;
826 }
827 if (*lenp < tot_len) {
828 return (0);
829 }
830
831 #if defined(__amd64)
832 /*
833 * Deal with a possible hole in the address range between
834 * hole_start and hole_end that should never be mapped.
835 */
836 if (lo < hole_start) {
837 if (hi > hole_start) {
838 if (hi < hole_end) {
839 hi = hole_start;
840 } else {
841 /* lo < hole_start && hi >= hole_end */
842 if (dir == AH_LO) {
843 /*
844 * prefer lowest range
845 */
846 if (hole_start - lo >= tot_len)
847 hi = hole_start;
848 else if (hi - hole_end >= tot_len)
849 lo = hole_end;
850 else
851 return (0);
852 } else {
853 /*
854 * prefer highest range
855 */
856 if (hi - hole_end >= tot_len)
857 lo = hole_end;
858 else if (hole_start - lo >= tot_len)
859 hi = hole_start;
860 else
861 return (0);
862 }
863 }
864 }
865 } else {
866 /* lo >= hole_start */
867 if (hi < hole_end)
868 return (0);
869 if (lo < hole_end)
870 lo = hole_end;
871 }
872 #endif
873
874 if (hi - lo < tot_len)
875 return (0);
876
877 if (align > 1) {
878 uintptr_t tlo = lo + redzone;
879 uintptr_t thi = hi - redzone;
880 tlo = (uintptr_t)P2PHASEUP(tlo, align, off);
881 if (tlo < lo + redzone) {
882 return (0);
883 }
884 if (thi < tlo || thi - tlo < minlen) {
885 return (0);
886 }
887 }
888
889 *basep = (caddr_t)lo;
890 *lenp = hi - lo;
891 return (1);
892 }
893
894 /*
895 * Determine whether [*basep, *basep + *lenp) contains a mappable range of
896 * addresses at least "minlen" long. On success, 1 is returned and *basep
897 * and *lenp are adjusted to describe the acceptable range. On failure, 0
898 * is returned.
899 */
900 int
valid_va_range(caddr_t * basep,size_t * lenp,size_t minlen,int dir)901 valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
902 {
903 return (valid_va_range_aligned(basep, lenp, minlen, dir, 0, 0, 0));
904 }
905
906 /*
907 * Determine whether [addr, addr+len] are valid user addresses.
908 */
909 /*ARGSUSED*/
910 int
valid_usr_range(caddr_t addr,size_t len,uint_t prot,struct as * as,caddr_t userlimit)911 valid_usr_range(caddr_t addr, size_t len, uint_t prot, struct as *as,
912 caddr_t userlimit)
913 {
914 caddr_t eaddr = addr + len;
915
916 if (eaddr <= addr || addr >= userlimit || eaddr > userlimit)
917 return (RANGE_BADADDR);
918
919 #if defined(__amd64)
920 /*
921 * Check for the VA hole
922 */
923 if (eaddr > (caddr_t)hole_start && addr < (caddr_t)hole_end)
924 return (RANGE_BADADDR);
925 #endif
926
927 return (RANGE_OKAY);
928 }
929
930 /*
931 * Return 1 if the page frame is onboard memory, else 0.
932 */
933 int
pf_is_memory(pfn_t pf)934 pf_is_memory(pfn_t pf)
935 {
936 if (pfn_is_foreign(pf))
937 return (0);
938 return (address_in_memlist(phys_install, pfn_to_pa(pf), 1));
939 }
940
941 /*
942 * return the memrange containing pfn
943 */
944 int
memrange_num(pfn_t pfn)945 memrange_num(pfn_t pfn)
946 {
947 int n;
948
949 for (n = 0; n < nranges - 1; ++n) {
950 if (pfn >= memranges[n])
951 break;
952 }
953 return (n);
954 }
955
956 /*
957 * return the mnoderange containing pfn
958 */
959 /*ARGSUSED*/
960 int
pfn_2_mtype(pfn_t pfn)961 pfn_2_mtype(pfn_t pfn)
962 {
963 #if defined(__xpv)
964 return (0);
965 #else
966 int n;
967
968 /* Always start from highest pfn and work our way down */
969 for (n = mtypetop; n != -1; n = mnoderanges[n].mnr_next) {
970 if (pfn >= mnoderanges[n].mnr_pfnlo) {
971 break;
972 }
973 }
974 return (n);
975 #endif
976 }
977
978 #if !defined(__xpv)
979 /*
980 * is_contigpage_free:
981 * returns a page list of contiguous pages. It minimally has to return
982 * minctg pages. Caller determines minctg based on the scatter-gather
983 * list length.
984 *
985 * pfnp is set to the next page frame to search on return.
986 */
987 static page_t *
is_contigpage_free(pfn_t * pfnp,pgcnt_t * pgcnt,pgcnt_t minctg,uint64_t pfnseg,int iolock)988 is_contigpage_free(
989 pfn_t *pfnp,
990 pgcnt_t *pgcnt,
991 pgcnt_t minctg,
992 uint64_t pfnseg,
993 int iolock)
994 {
995 int i = 0;
996 pfn_t pfn = *pfnp;
997 page_t *pp;
998 page_t *plist = NULL;
999
1000 /*
1001 * fail if pfn + minctg crosses a segment boundary.
1002 * Adjust for next starting pfn to begin at segment boundary.
1003 */
1004
1005 if (((*pfnp + minctg - 1) & pfnseg) < (*pfnp & pfnseg)) {
1006 *pfnp = roundup(*pfnp, pfnseg + 1);
1007 return (NULL);
1008 }
1009
1010 do {
1011 retry:
1012 pp = page_numtopp_nolock(pfn + i);
1013 if ((pp == NULL) || IS_DUMP_PAGE(pp) ||
1014 (page_trylock(pp, SE_EXCL) == 0)) {
1015 (*pfnp)++;
1016 break;
1017 }
1018 if (page_pptonum(pp) != pfn + i) {
1019 page_unlock(pp);
1020 goto retry;
1021 }
1022
1023 if (!(PP_ISFREE(pp))) {
1024 page_unlock(pp);
1025 (*pfnp)++;
1026 break;
1027 }
1028
1029 if (!PP_ISAGED(pp)) {
1030 page_list_sub(pp, PG_CACHE_LIST);
1031 page_hashout(pp, (kmutex_t *)NULL);
1032 } else {
1033 page_list_sub(pp, PG_FREE_LIST);
1034 }
1035
1036 if (iolock)
1037 page_io_lock(pp);
1038 page_list_concat(&plist, &pp);
1039
1040 /*
1041 * exit loop when pgcnt satisfied or segment boundary reached.
1042 */
1043
1044 } while ((++i < *pgcnt) && ((pfn + i) & pfnseg));
1045
1046 *pfnp += i; /* set to next pfn to search */
1047
1048 if (i >= minctg) {
1049 *pgcnt -= i;
1050 return (plist);
1051 }
1052
1053 /*
1054 * failure: minctg not satisfied.
1055 *
1056 * if next request crosses segment boundary, set next pfn
1057 * to search from the segment boundary.
1058 */
1059 if (((*pfnp + minctg - 1) & pfnseg) < (*pfnp & pfnseg))
1060 *pfnp = roundup(*pfnp, pfnseg + 1);
1061
1062 /* clean up any pages already allocated */
1063
1064 while (plist) {
1065 pp = plist;
1066 page_sub(&plist, pp);
1067 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
1068 if (iolock)
1069 page_io_unlock(pp);
1070 page_unlock(pp);
1071 }
1072
1073 return (NULL);
1074 }
1075 #endif /* !__xpv */
1076
1077 /*
1078 * verify that pages being returned from allocator have correct DMA attribute
1079 */
1080 #ifndef DEBUG
1081 #define check_dma(a, b, c) (void)(0)
1082 #else
1083 static void
check_dma(ddi_dma_attr_t * dma_attr,page_t * pp,int cnt)1084 check_dma(ddi_dma_attr_t *dma_attr, page_t *pp, int cnt)
1085 {
1086 if (dma_attr == NULL)
1087 return;
1088
1089 while (cnt-- > 0) {
1090 if (pa_to_ma(pfn_to_pa(pp->p_pagenum)) <
1091 dma_attr->dma_attr_addr_lo)
1092 panic("PFN (pp=%p) below dma_attr_addr_lo", (void *)pp);
1093 if (pa_to_ma(pfn_to_pa(pp->p_pagenum)) >=
1094 dma_attr->dma_attr_addr_hi)
1095 panic("PFN (pp=%p) above dma_attr_addr_hi", (void *)pp);
1096 pp = pp->p_next;
1097 }
1098 }
1099 #endif
1100
1101 #if !defined(__xpv)
1102 static page_t *
page_get_contigpage(pgcnt_t * pgcnt,ddi_dma_attr_t * mattr,int iolock)1103 page_get_contigpage(pgcnt_t *pgcnt, ddi_dma_attr_t *mattr, int iolock)
1104 {
1105 pfn_t pfn;
1106 int sgllen;
1107 uint64_t pfnseg;
1108 pgcnt_t minctg;
1109 page_t *pplist = NULL, *plist;
1110 uint64_t lo, hi;
1111 pgcnt_t pfnalign = 0;
1112 static pfn_t startpfn;
1113 static pgcnt_t lastctgcnt;
1114 uintptr_t align;
1115
1116 CONTIG_LOCK();
1117
1118 if (mattr) {
1119 lo = mmu_btop((mattr->dma_attr_addr_lo + MMU_PAGEOFFSET));
1120 hi = mmu_btop(mattr->dma_attr_addr_hi);
1121 if (hi >= physmax)
1122 hi = physmax - 1;
1123 sgllen = mattr->dma_attr_sgllen;
1124 pfnseg = mmu_btop(mattr->dma_attr_seg);
1125
1126 align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
1127 if (align > MMU_PAGESIZE)
1128 pfnalign = mmu_btop(align);
1129
1130 /*
1131 * in order to satisfy the request, must minimally
1132 * acquire minctg contiguous pages
1133 */
1134 minctg = howmany(*pgcnt, sgllen);
1135
1136 ASSERT(hi >= lo);
1137
1138 /*
1139 * start from where last searched if the minctg >= lastctgcnt
1140 */
1141 if (minctg < lastctgcnt || startpfn < lo || startpfn > hi)
1142 startpfn = lo;
1143 } else {
1144 hi = physmax - 1;
1145 lo = 0;
1146 sgllen = 1;
1147 pfnseg = mmu.highest_pfn;
1148 minctg = *pgcnt;
1149
1150 if (minctg < lastctgcnt)
1151 startpfn = lo;
1152 }
1153 lastctgcnt = minctg;
1154
1155 ASSERT(pfnseg + 1 >= (uint64_t)minctg);
1156
1157 /* conserve 16m memory - start search above 16m when possible */
1158 if (hi > PFN_16M && startpfn < PFN_16M)
1159 startpfn = PFN_16M;
1160
1161 pfn = startpfn;
1162 if (pfnalign)
1163 pfn = P2ROUNDUP(pfn, pfnalign);
1164
1165 while (pfn + minctg - 1 <= hi) {
1166
1167 plist = is_contigpage_free(&pfn, pgcnt, minctg, pfnseg, iolock);
1168 if (plist) {
1169 page_list_concat(&pplist, &plist);
1170 sgllen--;
1171 /*
1172 * return when contig pages no longer needed
1173 */
1174 if (!*pgcnt || ((*pgcnt <= sgllen) && !pfnalign)) {
1175 startpfn = pfn;
1176 CONTIG_UNLOCK();
1177 check_dma(mattr, pplist, *pgcnt);
1178 return (pplist);
1179 }
1180 minctg = howmany(*pgcnt, sgllen);
1181 }
1182 if (pfnalign)
1183 pfn = P2ROUNDUP(pfn, pfnalign);
1184 }
1185
1186 /* cannot find contig pages in specified range */
1187 if (startpfn == lo) {
1188 CONTIG_UNLOCK();
1189 return (NULL);
1190 }
1191
1192 /* did not start with lo previously */
1193 pfn = lo;
1194 if (pfnalign)
1195 pfn = P2ROUNDUP(pfn, pfnalign);
1196
1197 /* allow search to go above startpfn */
1198 while (pfn < startpfn) {
1199
1200 plist = is_contigpage_free(&pfn, pgcnt, minctg, pfnseg, iolock);
1201 if (plist != NULL) {
1202
1203 page_list_concat(&pplist, &plist);
1204 sgllen--;
1205
1206 /*
1207 * return when contig pages no longer needed
1208 */
1209 if (!*pgcnt || ((*pgcnt <= sgllen) && !pfnalign)) {
1210 startpfn = pfn;
1211 CONTIG_UNLOCK();
1212 check_dma(mattr, pplist, *pgcnt);
1213 return (pplist);
1214 }
1215 minctg = howmany(*pgcnt, sgllen);
1216 }
1217 if (pfnalign)
1218 pfn = P2ROUNDUP(pfn, pfnalign);
1219 }
1220 CONTIG_UNLOCK();
1221 return (NULL);
1222 }
1223 #endif /* !__xpv */
1224
1225 /*
1226 * mnode_range_cnt() calculates the number of memory ranges for mnode and
1227 * memranges[]. Used to determine the size of page lists and mnoderanges.
1228 */
1229 int
mnode_range_cnt(int mnode)1230 mnode_range_cnt(int mnode)
1231 {
1232 #if defined(__xpv)
1233 ASSERT(mnode == 0);
1234 return (1);
1235 #else /* __xpv */
1236 int mri;
1237 int mnrcnt = 0;
1238
1239 if (mem_node_config[mnode].exists != 0) {
1240 mri = nranges - 1;
1241
1242 /* find the memranges index below contained in mnode range */
1243
1244 while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase)
1245 mri--;
1246
1247 /*
1248 * increment mnode range counter when memranges or mnode
1249 * boundary is reached.
1250 */
1251 while (mri >= 0 &&
1252 mem_node_config[mnode].physmax >= MEMRANGELO(mri)) {
1253 mnrcnt++;
1254 if (mem_node_config[mnode].physmax > MEMRANGEHI(mri))
1255 mri--;
1256 else
1257 break;
1258 }
1259 }
1260 ASSERT(mnrcnt <= MAX_MNODE_MRANGES);
1261 return (mnrcnt);
1262 #endif /* __xpv */
1263 }
1264
1265 /*
1266 * mnode_range_setup() initializes mnoderanges.
1267 */
1268 void
mnode_range_setup(mnoderange_t * mnoderanges)1269 mnode_range_setup(mnoderange_t *mnoderanges)
1270 {
1271 mnoderange_t *mp = mnoderanges;
1272 int mnode, mri;
1273 int mindex = 0; /* current index into mnoderanges array */
1274 int i, j;
1275 pfn_t hipfn;
1276 int last, hi;
1277
1278 for (mnode = 0; mnode < max_mem_nodes; mnode++) {
1279 if (mem_node_config[mnode].exists == 0)
1280 continue;
1281
1282 mri = nranges - 1;
1283
1284 while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase)
1285 mri--;
1286
1287 while (mri >= 0 && mem_node_config[mnode].physmax >=
1288 MEMRANGELO(mri)) {
1289 mnoderanges->mnr_pfnlo = MAX(MEMRANGELO(mri),
1290 mem_node_config[mnode].physbase);
1291 mnoderanges->mnr_pfnhi = MIN(MEMRANGEHI(mri),
1292 mem_node_config[mnode].physmax);
1293 mnoderanges->mnr_mnode = mnode;
1294 mnoderanges->mnr_memrange = mri;
1295 mnoderanges->mnr_exists = 1;
1296 mnoderanges++;
1297 mindex++;
1298 if (mem_node_config[mnode].physmax > MEMRANGEHI(mri))
1299 mri--;
1300 else
1301 break;
1302 }
1303 }
1304
1305 /*
1306 * For now do a simple sort of the mnoderanges array to fill in
1307 * the mnr_next fields. Since mindex is expected to be relatively
1308 * small, using a simple O(N^2) algorithm.
1309 */
1310 for (i = 0; i < mindex; i++) {
1311 if (mp[i].mnr_pfnlo == 0) /* find lowest */
1312 break;
1313 }
1314 ASSERT(i < mindex);
1315 last = i;
1316 mtype16m = last;
1317 mp[last].mnr_next = -1;
1318 for (i = 0; i < mindex - 1; i++) {
1319 hipfn = (pfn_t)(-1);
1320 hi = -1;
1321 /* find next highest mnode range */
1322 for (j = 0; j < mindex; j++) {
1323 if (mp[j].mnr_pfnlo > mp[last].mnr_pfnlo &&
1324 mp[j].mnr_pfnlo < hipfn) {
1325 hipfn = mp[j].mnr_pfnlo;
1326 hi = j;
1327 }
1328 }
1329 mp[hi].mnr_next = last;
1330 last = hi;
1331 }
1332 mtypetop = last;
1333 }
1334
1335 #ifndef __xpv
1336 /*
1337 * Update mnoderanges for memory hot-add DR operations.
1338 */
1339 static void
mnode_range_add(int mnode)1340 mnode_range_add(int mnode)
1341 {
1342 int *prev;
1343 int n, mri;
1344 pfn_t start, end;
1345 extern void membar_sync(void);
1346
1347 ASSERT(0 <= mnode && mnode < max_mem_nodes);
1348 ASSERT(mem_node_config[mnode].exists);
1349 start = mem_node_config[mnode].physbase;
1350 end = mem_node_config[mnode].physmax;
1351 ASSERT(start <= end);
1352 mutex_enter(&mnoderange_lock);
1353
1354 #ifdef DEBUG
1355 /* Check whether it interleaves with other memory nodes. */
1356 for (n = mtypetop; n != -1; n = mnoderanges[n].mnr_next) {
1357 ASSERT(mnoderanges[n].mnr_exists);
1358 if (mnoderanges[n].mnr_mnode == mnode)
1359 continue;
1360 ASSERT(start > mnoderanges[n].mnr_pfnhi ||
1361 end < mnoderanges[n].mnr_pfnlo);
1362 }
1363 #endif /* DEBUG */
1364
1365 mri = nranges - 1;
1366 while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase)
1367 mri--;
1368 while (mri >= 0 && mem_node_config[mnode].physmax >= MEMRANGELO(mri)) {
1369 /* Check whether mtype already exists. */
1370 for (n = mtypetop; n != -1; n = mnoderanges[n].mnr_next) {
1371 if (mnoderanges[n].mnr_mnode == mnode &&
1372 mnoderanges[n].mnr_memrange == mri) {
1373 mnoderanges[n].mnr_pfnlo = MAX(MEMRANGELO(mri),
1374 start);
1375 mnoderanges[n].mnr_pfnhi = MIN(MEMRANGEHI(mri),
1376 end);
1377 break;
1378 }
1379 }
1380
1381 /* Add a new entry if it doesn't exist yet. */
1382 if (n == -1) {
1383 /* Try to find an unused entry in mnoderanges array. */
1384 for (n = 0; n < mnoderangecnt; n++) {
1385 if (mnoderanges[n].mnr_exists == 0)
1386 break;
1387 }
1388 ASSERT(n < mnoderangecnt);
1389 mnoderanges[n].mnr_pfnlo = MAX(MEMRANGELO(mri), start);
1390 mnoderanges[n].mnr_pfnhi = MIN(MEMRANGEHI(mri), end);
1391 mnoderanges[n].mnr_mnode = mnode;
1392 mnoderanges[n].mnr_memrange = mri;
1393 mnoderanges[n].mnr_exists = 1;
1394 /* Page 0 should always be present. */
1395 for (prev = &mtypetop;
1396 mnoderanges[*prev].mnr_pfnlo > start;
1397 prev = &mnoderanges[*prev].mnr_next) {
1398 ASSERT(mnoderanges[*prev].mnr_next >= 0);
1399 ASSERT(mnoderanges[*prev].mnr_pfnlo > end);
1400 }
1401 mnoderanges[n].mnr_next = *prev;
1402 membar_sync();
1403 *prev = n;
1404 }
1405
1406 if (mem_node_config[mnode].physmax > MEMRANGEHI(mri))
1407 mri--;
1408 else
1409 break;
1410 }
1411
1412 mutex_exit(&mnoderange_lock);
1413 }
1414
1415 /*
1416 * Update mnoderanges for memory hot-removal DR operations.
1417 */
1418 static void
mnode_range_del(int mnode)1419 mnode_range_del(int mnode)
1420 {
1421 _NOTE(ARGUNUSED(mnode));
1422 ASSERT(0 <= mnode && mnode < max_mem_nodes);
1423 /* TODO: support deletion operation. */
1424 ASSERT(0);
1425 }
1426
1427 void
plat_slice_add(pfn_t start,pfn_t end)1428 plat_slice_add(pfn_t start, pfn_t end)
1429 {
1430 mem_node_add_slice(start, end);
1431 if (plat_dr_enabled()) {
1432 mnode_range_add(PFN_2_MEM_NODE(start));
1433 }
1434 }
1435
1436 void
plat_slice_del(pfn_t start,pfn_t end)1437 plat_slice_del(pfn_t start, pfn_t end)
1438 {
1439 ASSERT(PFN_2_MEM_NODE(start) == PFN_2_MEM_NODE(end));
1440 ASSERT(plat_dr_enabled());
1441 mnode_range_del(PFN_2_MEM_NODE(start));
1442 mem_node_del_slice(start, end);
1443 }
1444 #endif /* __xpv */
1445
1446 /*ARGSUSED*/
1447 int
mtype_init(vnode_t * vp,caddr_t vaddr,uint_t * flags,size_t pgsz)1448 mtype_init(vnode_t *vp, caddr_t vaddr, uint_t *flags, size_t pgsz)
1449 {
1450 int mtype = mtypetop;
1451
1452 #if !defined(__xpv)
1453 #if defined(__i386)
1454 /*
1455 * set the mtype range
1456 * - kmem requests need to be below 4g if restricted_kmemalloc is set.
1457 * - for non kmem requests, set range to above 4g if memory below 4g
1458 * runs low.
1459 */
1460 if (restricted_kmemalloc && VN_ISKAS(vp) &&
1461 (caddr_t)(vaddr) >= kernelheap &&
1462 (caddr_t)(vaddr) < ekernelheap) {
1463 ASSERT(physmax4g);
1464 mtype = mtype4g;
1465 if (RESTRICT16M_ALLOC(freemem4g - btop(pgsz),
1466 btop(pgsz), *flags)) {
1467 *flags |= PGI_MT_RANGE16M;
1468 } else {
1469 VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);
1470 VM_STAT_COND_ADD((*flags & PG_PANIC),
1471 vmm_vmstats.pgpanicalloc);
1472 *flags |= PGI_MT_RANGE0;
1473 }
1474 return (mtype);
1475 }
1476 #endif /* __i386 */
1477
1478 if (RESTRICT4G_ALLOC) {
1479 VM_STAT_ADD(vmm_vmstats.restrict4gcnt);
1480 /* here only for > 4g systems */
1481 *flags |= PGI_MT_RANGE4G;
1482 } else if (RESTRICT16M_ALLOC(freemem, btop(pgsz), *flags)) {
1483 *flags |= PGI_MT_RANGE16M;
1484 } else {
1485 VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);
1486 VM_STAT_COND_ADD((*flags & PG_PANIC), vmm_vmstats.pgpanicalloc);
1487 *flags |= PGI_MT_RANGE0;
1488 }
1489 #endif /* !__xpv */
1490 return (mtype);
1491 }
1492
1493
1494 /* mtype init for page_get_replacement_page */
1495 /*ARGSUSED*/
1496 int
mtype_pgr_init(int * flags,page_t * pp,int mnode,pgcnt_t pgcnt)1497 mtype_pgr_init(int *flags, page_t *pp, int mnode, pgcnt_t pgcnt)
1498 {
1499 int mtype = mtypetop;
1500 #if !defined(__xpv)
1501 if (RESTRICT16M_ALLOC(freemem, pgcnt, *flags)) {
1502 *flags |= PGI_MT_RANGE16M;
1503 } else {
1504 VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);
1505 *flags |= PGI_MT_RANGE0;
1506 }
1507 #endif
1508 return (mtype);
1509 }
1510
1511 /*
1512 * Determine if the mnode range specified in mtype contains memory belonging
1513 * to memory node mnode. If flags & PGI_MT_RANGE is set then mtype contains
1514 * the range from high pfn to 0, 16m or 4g.
1515 *
1516 * Return first mnode range type index found otherwise return -1 if none found.
1517 */
1518 int
mtype_func(int mnode,int mtype,uint_t flags)1519 mtype_func(int mnode, int mtype, uint_t flags)
1520 {
1521 if (flags & PGI_MT_RANGE) {
1522 int mnr_lim = MRI_0;
1523
1524 if (flags & PGI_MT_NEXT) {
1525 mtype = mnoderanges[mtype].mnr_next;
1526 }
1527 if (flags & PGI_MT_RANGE4G)
1528 mnr_lim = MRI_4G; /* exclude 0-4g range */
1529 else if (flags & PGI_MT_RANGE16M)
1530 mnr_lim = MRI_16M; /* exclude 0-16m range */
1531 while (mtype != -1 &&
1532 mnoderanges[mtype].mnr_memrange <= mnr_lim) {
1533 if (mnoderanges[mtype].mnr_mnode == mnode)
1534 return (mtype);
1535 mtype = mnoderanges[mtype].mnr_next;
1536 }
1537 } else if (mnoderanges[mtype].mnr_mnode == mnode) {
1538 return (mtype);
1539 }
1540 return (-1);
1541 }
1542
1543 /*
1544 * Update the page list max counts with the pfn range specified by the
1545 * input parameters.
1546 */
1547 void
mtype_modify_max(pfn_t startpfn,long cnt)1548 mtype_modify_max(pfn_t startpfn, long cnt)
1549 {
1550 int mtype;
1551 pgcnt_t inc;
1552 spgcnt_t scnt = (spgcnt_t)(cnt);
1553 pgcnt_t acnt = ABS(scnt);
1554 pfn_t endpfn = startpfn + acnt;
1555 pfn_t pfn, lo;
1556
1557 if (!physmax4g)
1558 return;
1559
1560 mtype = mtypetop;
1561 for (pfn = endpfn; pfn > startpfn; ) {
1562 ASSERT(mtype != -1);
1563 lo = mnoderanges[mtype].mnr_pfnlo;
1564 if (pfn > lo) {
1565 if (startpfn >= lo) {
1566 inc = pfn - startpfn;
1567 } else {
1568 inc = pfn - lo;
1569 }
1570 if (mnoderanges[mtype].mnr_memrange != MRI_4G) {
1571 if (scnt > 0)
1572 maxmem4g += inc;
1573 else
1574 maxmem4g -= inc;
1575 }
1576 pfn -= inc;
1577 }
1578 mtype = mnoderanges[mtype].mnr_next;
1579 }
1580 }
1581
1582 int
mtype_2_mrange(int mtype)1583 mtype_2_mrange(int mtype)
1584 {
1585 return (mnoderanges[mtype].mnr_memrange);
1586 }
1587
1588 void
mnodetype_2_pfn(int mnode,int mtype,pfn_t * pfnlo,pfn_t * pfnhi)1589 mnodetype_2_pfn(int mnode, int mtype, pfn_t *pfnlo, pfn_t *pfnhi)
1590 {
1591 _NOTE(ARGUNUSED(mnode));
1592 ASSERT(mnoderanges[mtype].mnr_mnode == mnode);
1593 *pfnlo = mnoderanges[mtype].mnr_pfnlo;
1594 *pfnhi = mnoderanges[mtype].mnr_pfnhi;
1595 }
1596
1597 size_t
plcnt_sz(size_t ctrs_sz)1598 plcnt_sz(size_t ctrs_sz)
1599 {
1600 #ifdef DEBUG
1601 int szc, colors;
1602
1603 ctrs_sz += mnoderangecnt * sizeof (struct mnr_mts) * mmu_page_sizes;
1604 for (szc = 0; szc < mmu_page_sizes; szc++) {
1605 colors = page_get_pagecolors(szc);
1606 ctrs_sz += mnoderangecnt * sizeof (pgcnt_t) * colors;
1607 }
1608 #endif
1609 return (ctrs_sz);
1610 }
1611
1612 caddr_t
plcnt_init(caddr_t addr)1613 plcnt_init(caddr_t addr)
1614 {
1615 #ifdef DEBUG
1616 int mt, szc, colors;
1617
1618 for (mt = 0; mt < mnoderangecnt; mt++) {
1619 mnoderanges[mt].mnr_mts = (struct mnr_mts *)addr;
1620 addr += (sizeof (struct mnr_mts) * mmu_page_sizes);
1621 for (szc = 0; szc < mmu_page_sizes; szc++) {
1622 colors = page_get_pagecolors(szc);
1623 mnoderanges[mt].mnr_mts[szc].mnr_mts_colors = colors;
1624 mnoderanges[mt].mnr_mts[szc].mnr_mtsc_pgcnt =
1625 (pgcnt_t *)addr;
1626 addr += (sizeof (pgcnt_t) * colors);
1627 }
1628 }
1629 #endif
1630 return (addr);
1631 }
1632
1633 void
plcnt_inc_dec(page_t * pp,int mtype,int szc,long cnt,int flags)1634 plcnt_inc_dec(page_t *pp, int mtype, int szc, long cnt, int flags)
1635 {
1636 _NOTE(ARGUNUSED(pp));
1637 #ifdef DEBUG
1638 int bin = PP_2_BIN(pp);
1639
1640 atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].mnr_mts_pgcnt, cnt);
1641 atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].mnr_mtsc_pgcnt[bin],
1642 cnt);
1643 #endif
1644 ASSERT(mtype == PP_2_MTYPE(pp));
1645 if (physmax4g && mnoderanges[mtype].mnr_memrange != MRI_4G)
1646 atomic_add_long(&freemem4g, cnt);
1647 if (flags & PG_CACHE_LIST)
1648 atomic_add_long(&mnoderanges[mtype].mnr_mt_clpgcnt, cnt);
1649 else
1650 atomic_add_long(&mnoderanges[mtype].mnr_mt_flpgcnt[szc], cnt);
1651 atomic_add_long(&mnoderanges[mtype].mnr_mt_totcnt, cnt);
1652 }
1653
1654 /*
1655 * Returns the free page count for mnode
1656 */
1657 int
mnode_pgcnt(int mnode)1658 mnode_pgcnt(int mnode)
1659 {
1660 int mtype = mtypetop;
1661 int flags = PGI_MT_RANGE0;
1662 pgcnt_t pgcnt = 0;
1663
1664 mtype = mtype_func(mnode, mtype, flags);
1665
1666 while (mtype != -1) {
1667 pgcnt += MTYPE_FREEMEM(mtype);
1668 mtype = mtype_func(mnode, mtype, flags | PGI_MT_NEXT);
1669 }
1670 return (pgcnt);
1671 }
1672
1673 /*
1674 * Initialize page coloring variables based on the l2 cache parameters.
1675 * Calculate and return memory needed for page coloring data structures.
1676 */
1677 size_t
page_coloring_init(uint_t l2_sz,int l2_linesz,int l2_assoc)1678 page_coloring_init(uint_t l2_sz, int l2_linesz, int l2_assoc)
1679 {
1680 _NOTE(ARGUNUSED(l2_linesz));
1681 size_t colorsz = 0;
1682 int i;
1683 int colors;
1684
1685 #if defined(__xpv)
1686 /*
1687 * Hypervisor domains currently don't have any concept of NUMA.
1688 * Hence we'll act like there is only 1 memrange.
1689 */
1690 i = memrange_num(1);
1691 #else /* !__xpv */
1692 /*
1693 * Reduce the memory ranges lists if we don't have large amounts
1694 * of memory. This avoids searching known empty free lists.
1695 * To support memory DR operations, we need to keep memory ranges
1696 * for possible memory hot-add operations.
1697 */
1698 if (plat_dr_physmax > physmax)
1699 i = memrange_num(plat_dr_physmax);
1700 else
1701 i = memrange_num(physmax);
1702 #if defined(__i386)
1703 if (i > MRI_4G)
1704 restricted_kmemalloc = 0;
1705 #endif
1706 /* physmax greater than 4g */
1707 if (i == MRI_4G)
1708 physmax4g = 1;
1709 #endif /* !__xpv */
1710 memranges += i;
1711 nranges -= i;
1712
1713 ASSERT(mmu_page_sizes <= MMU_PAGE_SIZES);
1714
1715 ASSERT(ISP2(l2_linesz));
1716 ASSERT(l2_sz > MMU_PAGESIZE);
1717
1718 /* l2_assoc is 0 for fully associative l2 cache */
1719 if (l2_assoc)
1720 l2_colors = MAX(1, l2_sz / (l2_assoc * MMU_PAGESIZE));
1721 else
1722 l2_colors = 1;
1723
1724 ASSERT(ISP2(l2_colors));
1725
1726 /* for scalability, configure at least PAGE_COLORS_MIN color bins */
1727 page_colors = MAX(l2_colors, PAGE_COLORS_MIN);
1728
1729 /*
1730 * cpu_page_colors is non-zero when a page color may be spread across
1731 * multiple bins.
1732 */
1733 if (l2_colors < page_colors)
1734 cpu_page_colors = l2_colors;
1735
1736 ASSERT(ISP2(page_colors));
1737
1738 page_colors_mask = page_colors - 1;
1739
1740 ASSERT(ISP2(CPUSETSIZE()));
1741 page_coloring_shift = lowbit(CPUSETSIZE());
1742
1743 /* initialize number of colors per page size */
1744 for (i = 0; i <= mmu.max_page_level; i++) {
1745 hw_page_array[i].hp_size = LEVEL_SIZE(i);
1746 hw_page_array[i].hp_shift = LEVEL_SHIFT(i);
1747 hw_page_array[i].hp_pgcnt = LEVEL_SIZE(i) >> LEVEL_SHIFT(0);
1748 hw_page_array[i].hp_colors = (page_colors_mask >>
1749 (hw_page_array[i].hp_shift - hw_page_array[0].hp_shift))
1750 + 1;
1751 colorequivszc[i] = 0;
1752 }
1753
1754 /*
1755 * The value of cpu_page_colors determines if additional color bins
1756 * need to be checked for a particular color in the page_get routines.
1757 */
1758 if (cpu_page_colors != 0) {
1759
1760 int a = lowbit(page_colors) - lowbit(cpu_page_colors);
1761 ASSERT(a > 0);
1762 ASSERT(a < 16);
1763
1764 for (i = 0; i <= mmu.max_page_level; i++) {
1765 if ((colors = hw_page_array[i].hp_colors) <= 1) {
1766 colorequivszc[i] = 0;
1767 continue;
1768 }
1769 while ((colors >> a) == 0)
1770 a--;
1771 ASSERT(a >= 0);
1772
1773 /* higher 4 bits encodes color equiv mask */
1774 colorequivszc[i] = (a << 4);
1775 }
1776 }
1777
1778 /* factor in colorequiv to check additional 'equivalent' bins. */
1779 if (colorequiv > 1) {
1780
1781 int a = lowbit(colorequiv) - 1;
1782 if (a > 15)
1783 a = 15;
1784
1785 for (i = 0; i <= mmu.max_page_level; i++) {
1786 if ((colors = hw_page_array[i].hp_colors) <= 1) {
1787 continue;
1788 }
1789 while ((colors >> a) == 0)
1790 a--;
1791 if ((a << 4) > colorequivszc[i]) {
1792 colorequivszc[i] = (a << 4);
1793 }
1794 }
1795 }
1796
1797 /* size for mnoderanges */
1798 for (mnoderangecnt = 0, i = 0; i < max_mem_nodes; i++)
1799 mnoderangecnt += mnode_range_cnt(i);
1800 if (plat_dr_support_memory()) {
1801 /*
1802 * Reserve enough space for memory DR operations.
1803 * Two extra mnoderanges for possbile fragmentations,
1804 * one for the 2G boundary and the other for the 4G boundary.
1805 * We don't expect a memory board crossing the 16M boundary
1806 * for memory hot-add operations on x86 platforms.
1807 */
1808 mnoderangecnt += 2 + max_mem_nodes - lgrp_plat_node_cnt;
1809 }
1810 colorsz = mnoderangecnt * sizeof (mnoderange_t);
1811
1812 /* size for fpc_mutex and cpc_mutex */
1813 colorsz += (2 * max_mem_nodes * sizeof (kmutex_t) * NPC_MUTEX);
1814
1815 /* size of page_freelists */
1816 colorsz += mnoderangecnt * sizeof (page_t ***);
1817 colorsz += mnoderangecnt * mmu_page_sizes * sizeof (page_t **);
1818
1819 for (i = 0; i < mmu_page_sizes; i++) {
1820 colors = page_get_pagecolors(i);
1821 colorsz += mnoderangecnt * colors * sizeof (page_t *);
1822 }
1823
1824 /* size of page_cachelists */
1825 colorsz += mnoderangecnt * sizeof (page_t **);
1826 colorsz += mnoderangecnt * page_colors * sizeof (page_t *);
1827
1828 return (colorsz);
1829 }
1830
1831 /*
1832 * Called once at startup to configure page_coloring data structures and
1833 * does the 1st page_free()/page_freelist_add().
1834 */
1835 void
page_coloring_setup(caddr_t pcmemaddr)1836 page_coloring_setup(caddr_t pcmemaddr)
1837 {
1838 int i;
1839 int j;
1840 int k;
1841 caddr_t addr;
1842 int colors;
1843
1844 /*
1845 * do page coloring setup
1846 */
1847 addr = pcmemaddr;
1848
1849 mnoderanges = (mnoderange_t *)addr;
1850 addr += (mnoderangecnt * sizeof (mnoderange_t));
1851
1852 mnode_range_setup(mnoderanges);
1853
1854 if (physmax4g)
1855 mtype4g = pfn_2_mtype(0xfffff);
1856
1857 for (k = 0; k < NPC_MUTEX; k++) {
1858 fpc_mutex[k] = (kmutex_t *)addr;
1859 addr += (max_mem_nodes * sizeof (kmutex_t));
1860 }
1861 for (k = 0; k < NPC_MUTEX; k++) {
1862 cpc_mutex[k] = (kmutex_t *)addr;
1863 addr += (max_mem_nodes * sizeof (kmutex_t));
1864 }
1865 page_freelists = (page_t ****)addr;
1866 addr += (mnoderangecnt * sizeof (page_t ***));
1867
1868 page_cachelists = (page_t ***)addr;
1869 addr += (mnoderangecnt * sizeof (page_t **));
1870
1871 for (i = 0; i < mnoderangecnt; i++) {
1872 page_freelists[i] = (page_t ***)addr;
1873 addr += (mmu_page_sizes * sizeof (page_t **));
1874
1875 for (j = 0; j < mmu_page_sizes; j++) {
1876 colors = page_get_pagecolors(j);
1877 page_freelists[i][j] = (page_t **)addr;
1878 addr += (colors * sizeof (page_t *));
1879 }
1880 page_cachelists[i] = (page_t **)addr;
1881 addr += (page_colors * sizeof (page_t *));
1882 }
1883 }
1884
1885 #if defined(__xpv)
1886 /*
1887 * Give back 10% of the io_pool pages to the free list.
1888 * Don't shrink the pool below some absolute minimum.
1889 */
1890 static void
page_io_pool_shrink()1891 page_io_pool_shrink()
1892 {
1893 int retcnt;
1894 page_t *pp, *pp_first, *pp_last, **curpool;
1895 mfn_t mfn;
1896 int bothpools = 0;
1897
1898 mutex_enter(&io_pool_lock);
1899 io_pool_shrink_attempts++; /* should be a kstat? */
1900 retcnt = io_pool_cnt / 10;
1901 if (io_pool_cnt - retcnt < io_pool_cnt_min)
1902 retcnt = io_pool_cnt - io_pool_cnt_min;
1903 if (retcnt <= 0)
1904 goto done;
1905 io_pool_shrinks++; /* should be a kstat? */
1906 curpool = &io_pool_4g;
1907 domore:
1908 /*
1909 * Loop through taking pages from the end of the list
1910 * (highest mfns) till amount to return reached.
1911 */
1912 for (pp = *curpool; pp && retcnt > 0; ) {
1913 pp_first = pp_last = pp->p_prev;
1914 if (pp_first == *curpool)
1915 break;
1916 retcnt--;
1917 io_pool_cnt--;
1918 page_io_pool_sub(curpool, pp_first, pp_last);
1919 if ((mfn = pfn_to_mfn(pp->p_pagenum)) < start_mfn)
1920 start_mfn = mfn;
1921 page_free(pp_first, 1);
1922 pp = *curpool;
1923 }
1924 if (retcnt != 0 && !bothpools) {
1925 /*
1926 * If not enough found in less constrained pool try the
1927 * more constrained one.
1928 */
1929 curpool = &io_pool_16m;
1930 bothpools = 1;
1931 goto domore;
1932 }
1933 done:
1934 mutex_exit(&io_pool_lock);
1935 }
1936
1937 #endif /* __xpv */
1938
1939 uint_t
page_create_update_flags_x86(uint_t flags)1940 page_create_update_flags_x86(uint_t flags)
1941 {
1942 #if defined(__xpv)
1943 /*
1944 * Check this is an urgent allocation and free pages are depleted.
1945 */
1946 if (!(flags & PG_WAIT) && freemem < desfree)
1947 page_io_pool_shrink();
1948 #else /* !__xpv */
1949 /*
1950 * page_create_get_something may call this because 4g memory may be
1951 * depleted. Set flags to allow for relocation of base page below
1952 * 4g if necessary.
1953 */
1954 if (physmax4g)
1955 flags |= (PGI_PGCPSZC0 | PGI_PGCPHIPRI);
1956 #endif /* __xpv */
1957 return (flags);
1958 }
1959
1960 /*ARGSUSED*/
1961 int
bp_color(struct buf * bp)1962 bp_color(struct buf *bp)
1963 {
1964 return (0);
1965 }
1966
1967 #if defined(__xpv)
1968
1969 /*
1970 * Take pages out of an io_pool
1971 */
1972 static void
page_io_pool_sub(page_t ** poolp,page_t * pp_first,page_t * pp_last)1973 page_io_pool_sub(page_t **poolp, page_t *pp_first, page_t *pp_last)
1974 {
1975 if (*poolp == pp_first) {
1976 *poolp = pp_last->p_next;
1977 if (*poolp == pp_first)
1978 *poolp = NULL;
1979 }
1980 pp_first->p_prev->p_next = pp_last->p_next;
1981 pp_last->p_next->p_prev = pp_first->p_prev;
1982 pp_first->p_prev = pp_last;
1983 pp_last->p_next = pp_first;
1984 }
1985
1986 /*
1987 * Put a page on the io_pool list. The list is ordered by increasing MFN.
1988 */
1989 static void
page_io_pool_add(page_t ** poolp,page_t * pp)1990 page_io_pool_add(page_t **poolp, page_t *pp)
1991 {
1992 page_t *look;
1993 mfn_t mfn = mfn_list[pp->p_pagenum];
1994
1995 if (*poolp == NULL) {
1996 *poolp = pp;
1997 pp->p_next = pp;
1998 pp->p_prev = pp;
1999 return;
2000 }
2001
2002 /*
2003 * Since we try to take pages from the high end of the pool
2004 * chances are good that the pages to be put on the list will
2005 * go at or near the end of the list. so start at the end and
2006 * work backwards.
2007 */
2008 look = (*poolp)->p_prev;
2009 while (mfn < mfn_list[look->p_pagenum]) {
2010 look = look->p_prev;
2011 if (look == (*poolp)->p_prev)
2012 break; /* backed all the way to front of list */
2013 }
2014
2015 /* insert after look */
2016 pp->p_prev = look;
2017 pp->p_next = look->p_next;
2018 pp->p_next->p_prev = pp;
2019 look->p_next = pp;
2020 if (mfn < mfn_list[(*poolp)->p_pagenum]) {
2021 /*
2022 * we inserted a new first list element
2023 * adjust pool pointer to newly inserted element
2024 */
2025 *poolp = pp;
2026 }
2027 }
2028
2029 /*
2030 * Add a page to the io_pool. Setting the force flag will force the page
2031 * into the io_pool no matter what.
2032 */
2033 static void
add_page_to_pool(page_t * pp,int force)2034 add_page_to_pool(page_t *pp, int force)
2035 {
2036 page_t *highest;
2037 page_t *freep = NULL;
2038
2039 mutex_enter(&io_pool_lock);
2040 /*
2041 * Always keep the scarce low memory pages
2042 */
2043 if (mfn_list[pp->p_pagenum] < PFN_16MEG) {
2044 ++io_pool_cnt;
2045 page_io_pool_add(&io_pool_16m, pp);
2046 goto done;
2047 }
2048 if (io_pool_cnt < io_pool_cnt_max || force || io_pool_4g == NULL) {
2049 ++io_pool_cnt;
2050 page_io_pool_add(&io_pool_4g, pp);
2051 } else {
2052 highest = io_pool_4g->p_prev;
2053 if (mfn_list[pp->p_pagenum] < mfn_list[highest->p_pagenum]) {
2054 page_io_pool_sub(&io_pool_4g, highest, highest);
2055 page_io_pool_add(&io_pool_4g, pp);
2056 freep = highest;
2057 } else {
2058 freep = pp;
2059 }
2060 }
2061 done:
2062 mutex_exit(&io_pool_lock);
2063 if (freep)
2064 page_free(freep, 1);
2065 }
2066
2067
2068 int contig_pfn_cnt; /* no of pfns in the contig pfn list */
2069 int contig_pfn_max; /* capacity of the contig pfn list */
2070 int next_alloc_pfn; /* next position in list to start a contig search */
2071 int contig_pfnlist_updates; /* pfn list update count */
2072 int contig_pfnlist_builds; /* how many times have we (re)built list */
2073 int contig_pfnlist_buildfailed; /* how many times has list build failed */
2074 int create_contig_pending; /* nonzero means taskq creating contig list */
2075 pfn_t *contig_pfn_list = NULL; /* list of contig pfns in ascending mfn order */
2076
2077 /*
2078 * Function to use in sorting a list of pfns by their underlying mfns.
2079 */
2080 static int
mfn_compare(const void * pfnp1,const void * pfnp2)2081 mfn_compare(const void *pfnp1, const void *pfnp2)
2082 {
2083 mfn_t mfn1 = mfn_list[*(pfn_t *)pfnp1];
2084 mfn_t mfn2 = mfn_list[*(pfn_t *)pfnp2];
2085
2086 if (mfn1 > mfn2)
2087 return (1);
2088 if (mfn1 < mfn2)
2089 return (-1);
2090 return (0);
2091 }
2092
2093 /*
2094 * Compact the contig_pfn_list by tossing all the non-contiguous
2095 * elements from the list.
2096 */
2097 static void
compact_contig_pfn_list(void)2098 compact_contig_pfn_list(void)
2099 {
2100 pfn_t pfn, lapfn, prev_lapfn;
2101 mfn_t mfn;
2102 int i, newcnt = 0;
2103
2104 prev_lapfn = 0;
2105 for (i = 0; i < contig_pfn_cnt - 1; i++) {
2106 pfn = contig_pfn_list[i];
2107 lapfn = contig_pfn_list[i + 1];
2108 mfn = mfn_list[pfn];
2109 /*
2110 * See if next pfn is for a contig mfn
2111 */
2112 if (mfn_list[lapfn] != mfn + 1)
2113 continue;
2114 /*
2115 * pfn and lookahead are both put in list
2116 * unless pfn is the previous lookahead.
2117 */
2118 if (pfn != prev_lapfn)
2119 contig_pfn_list[newcnt++] = pfn;
2120 contig_pfn_list[newcnt++] = lapfn;
2121 prev_lapfn = lapfn;
2122 }
2123 for (i = newcnt; i < contig_pfn_cnt; i++)
2124 contig_pfn_list[i] = 0;
2125 contig_pfn_cnt = newcnt;
2126 }
2127
2128 /*ARGSUSED*/
2129 static void
call_create_contiglist(void * arg)2130 call_create_contiglist(void *arg)
2131 {
2132 (void) create_contig_pfnlist(PG_WAIT);
2133 }
2134
2135 /*
2136 * Create list of freelist pfns that have underlying
2137 * contiguous mfns. The list is kept in ascending mfn order.
2138 * returns 1 if list created else 0.
2139 */
2140 static int
create_contig_pfnlist(uint_t flags)2141 create_contig_pfnlist(uint_t flags)
2142 {
2143 pfn_t pfn;
2144 page_t *pp;
2145 int ret = 1;
2146
2147 mutex_enter(&contig_list_lock);
2148 if (contig_pfn_list != NULL)
2149 goto out;
2150 contig_pfn_max = freemem + (freemem / 10);
2151 contig_pfn_list = kmem_zalloc(contig_pfn_max * sizeof (pfn_t),
2152 (flags & PG_WAIT) ? KM_SLEEP : KM_NOSLEEP);
2153 if (contig_pfn_list == NULL) {
2154 /*
2155 * If we could not create the contig list (because
2156 * we could not sleep for memory). Dispatch a taskq that can
2157 * sleep to get the memory.
2158 */
2159 if (!create_contig_pending) {
2160 if (taskq_dispatch(system_taskq, call_create_contiglist,
2161 NULL, TQ_NOSLEEP) != NULL)
2162 create_contig_pending = 1;
2163 }
2164 contig_pfnlist_buildfailed++; /* count list build failures */
2165 ret = 0;
2166 goto out;
2167 }
2168 create_contig_pending = 0;
2169 ASSERT(contig_pfn_cnt == 0);
2170 for (pfn = 0; pfn < mfn_count; pfn++) {
2171 pp = page_numtopp_nolock(pfn);
2172 if (pp == NULL || !PP_ISFREE(pp))
2173 continue;
2174 contig_pfn_list[contig_pfn_cnt] = pfn;
2175 if (++contig_pfn_cnt == contig_pfn_max)
2176 break;
2177 }
2178 /*
2179 * Sanity check the new list.
2180 */
2181 if (contig_pfn_cnt < 2) { /* no contig pfns */
2182 contig_pfn_cnt = 0;
2183 contig_pfnlist_buildfailed++;
2184 kmem_free(contig_pfn_list, contig_pfn_max * sizeof (pfn_t));
2185 contig_pfn_list = NULL;
2186 contig_pfn_max = 0;
2187 ret = 0;
2188 goto out;
2189 }
2190 qsort(contig_pfn_list, contig_pfn_cnt, sizeof (pfn_t), mfn_compare);
2191 compact_contig_pfn_list();
2192 /*
2193 * Make sure next search of the newly created contiguous pfn
2194 * list starts at the beginning of the list.
2195 */
2196 next_alloc_pfn = 0;
2197 contig_pfnlist_builds++; /* count list builds */
2198 out:
2199 mutex_exit(&contig_list_lock);
2200 return (ret);
2201 }
2202
2203
2204 /*
2205 * Toss the current contig pfnlist. Someone is about to do a massive
2206 * update to pfn<->mfn mappings. So we have them destroy the list and lock
2207 * it till they are done with their update.
2208 */
2209 void
clear_and_lock_contig_pfnlist()2210 clear_and_lock_contig_pfnlist()
2211 {
2212 pfn_t *listp = NULL;
2213 size_t listsize;
2214
2215 mutex_enter(&contig_list_lock);
2216 if (contig_pfn_list != NULL) {
2217 listp = contig_pfn_list;
2218 listsize = contig_pfn_max * sizeof (pfn_t);
2219 contig_pfn_list = NULL;
2220 contig_pfn_max = contig_pfn_cnt = 0;
2221 }
2222 if (listp != NULL)
2223 kmem_free(listp, listsize);
2224 }
2225
2226 /*
2227 * Unlock the contig_pfn_list. The next attempted use of it will cause
2228 * it to be re-created.
2229 */
2230 void
unlock_contig_pfnlist()2231 unlock_contig_pfnlist()
2232 {
2233 mutex_exit(&contig_list_lock);
2234 }
2235
2236 /*
2237 * Update the contiguous pfn list in response to a pfn <-> mfn reassignment
2238 */
2239 void
update_contig_pfnlist(pfn_t pfn,mfn_t oldmfn,mfn_t newmfn)2240 update_contig_pfnlist(pfn_t pfn, mfn_t oldmfn, mfn_t newmfn)
2241 {
2242 int probe_hi, probe_lo, probe_pos, insert_after, insert_point;
2243 pfn_t probe_pfn;
2244 mfn_t probe_mfn;
2245 int drop_lock = 0;
2246
2247 if (mutex_owner(&contig_list_lock) != curthread) {
2248 drop_lock = 1;
2249 mutex_enter(&contig_list_lock);
2250 }
2251 if (contig_pfn_list == NULL)
2252 goto done;
2253 contig_pfnlist_updates++;
2254 /*
2255 * Find the pfn in the current list. Use a binary chop to locate it.
2256 */
2257 probe_hi = contig_pfn_cnt - 1;
2258 probe_lo = 0;
2259 probe_pos = (probe_hi + probe_lo) / 2;
2260 while ((probe_pfn = contig_pfn_list[probe_pos]) != pfn) {
2261 if (probe_pos == probe_lo) { /* pfn not in list */
2262 probe_pos = -1;
2263 break;
2264 }
2265 if (pfn_to_mfn(probe_pfn) <= oldmfn)
2266 probe_lo = probe_pos;
2267 else
2268 probe_hi = probe_pos;
2269 probe_pos = (probe_hi + probe_lo) / 2;
2270 }
2271 if (probe_pos >= 0) {
2272 /*
2273 * Remove pfn from list and ensure next alloc
2274 * position stays in bounds.
2275 */
2276 if (--contig_pfn_cnt <= next_alloc_pfn)
2277 next_alloc_pfn = 0;
2278 if (contig_pfn_cnt < 2) { /* no contig pfns */
2279 contig_pfn_cnt = 0;
2280 kmem_free(contig_pfn_list,
2281 contig_pfn_max * sizeof (pfn_t));
2282 contig_pfn_list = NULL;
2283 contig_pfn_max = 0;
2284 goto done;
2285 }
2286 ovbcopy(&contig_pfn_list[probe_pos + 1],
2287 &contig_pfn_list[probe_pos],
2288 (contig_pfn_cnt - probe_pos) * sizeof (pfn_t));
2289 }
2290 if (newmfn == MFN_INVALID)
2291 goto done;
2292 /*
2293 * Check if new mfn has adjacent mfns in the list
2294 */
2295 probe_hi = contig_pfn_cnt - 1;
2296 probe_lo = 0;
2297 insert_after = -2;
2298 do {
2299 probe_pos = (probe_hi + probe_lo) / 2;
2300 probe_mfn = pfn_to_mfn(contig_pfn_list[probe_pos]);
2301 if (newmfn == probe_mfn + 1)
2302 insert_after = probe_pos;
2303 else if (newmfn == probe_mfn - 1)
2304 insert_after = probe_pos - 1;
2305 if (probe_pos == probe_lo)
2306 break;
2307 if (probe_mfn <= newmfn)
2308 probe_lo = probe_pos;
2309 else
2310 probe_hi = probe_pos;
2311 } while (insert_after == -2);
2312 /*
2313 * If there is space in the list and there are adjacent mfns
2314 * insert the pfn in to its proper place in the list.
2315 */
2316 if (insert_after != -2 && contig_pfn_cnt + 1 <= contig_pfn_max) {
2317 insert_point = insert_after + 1;
2318 ovbcopy(&contig_pfn_list[insert_point],
2319 &contig_pfn_list[insert_point + 1],
2320 (contig_pfn_cnt - insert_point) * sizeof (pfn_t));
2321 contig_pfn_list[insert_point] = pfn;
2322 contig_pfn_cnt++;
2323 }
2324 done:
2325 if (drop_lock)
2326 mutex_exit(&contig_list_lock);
2327 }
2328
2329 /*
2330 * Called to (re-)populate the io_pool from the free page lists.
2331 */
2332 long
populate_io_pool(void)2333 populate_io_pool(void)
2334 {
2335 pfn_t pfn;
2336 mfn_t mfn, max_mfn;
2337 page_t *pp;
2338
2339 /*
2340 * Figure out the bounds of the pool on first invocation.
2341 * We use a percentage of memory for the io pool size.
2342 * we allow that to shrink, but not to less than a fixed minimum
2343 */
2344 if (io_pool_cnt_max == 0) {
2345 io_pool_cnt_max = physmem / (100 / io_pool_physmem_pct);
2346 io_pool_cnt_lowater = io_pool_cnt_max;
2347 /*
2348 * This is the first time in populate_io_pool, grab a va to use
2349 * when we need to allocate pages.
2350 */
2351 io_pool_kva = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP);
2352 }
2353 /*
2354 * If we are out of pages in the pool, then grow the size of the pool
2355 */
2356 if (io_pool_cnt == 0) {
2357 /*
2358 * Grow the max size of the io pool by 5%, but never more than
2359 * 25% of physical memory.
2360 */
2361 if (io_pool_cnt_max < physmem / 4)
2362 io_pool_cnt_max += io_pool_cnt_max / 20;
2363 }
2364 io_pool_grows++; /* should be a kstat? */
2365
2366 /*
2367 * Get highest mfn on this platform, but limit to the 32 bit DMA max.
2368 */
2369 (void) mfn_to_pfn(start_mfn);
2370 max_mfn = MIN(cached_max_mfn, PFN_4GIG);
2371 for (mfn = start_mfn; mfn < max_mfn; start_mfn = ++mfn) {
2372 pfn = mfn_to_pfn(mfn);
2373 if (pfn & PFN_IS_FOREIGN_MFN)
2374 continue;
2375 /*
2376 * try to allocate it from free pages
2377 */
2378 pp = page_numtopp_alloc(pfn);
2379 if (pp == NULL)
2380 continue;
2381 PP_CLRFREE(pp);
2382 add_page_to_pool(pp, 1);
2383 if (io_pool_cnt >= io_pool_cnt_max)
2384 break;
2385 }
2386
2387 return (io_pool_cnt);
2388 }
2389
2390 /*
2391 * Destroy a page that was being used for DMA I/O. It may or
2392 * may not actually go back to the io_pool.
2393 */
2394 void
page_destroy_io(page_t * pp)2395 page_destroy_io(page_t *pp)
2396 {
2397 mfn_t mfn = mfn_list[pp->p_pagenum];
2398
2399 /*
2400 * When the page was alloc'd a reservation was made, release it now
2401 */
2402 page_unresv(1);
2403 /*
2404 * Unload translations, if any, then hash out the
2405 * page to erase its identity.
2406 */
2407 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
2408 page_hashout(pp, NULL);
2409
2410 /*
2411 * If the page came from the free lists, just put it back to them.
2412 * DomU pages always go on the free lists as well.
2413 */
2414 if (!DOMAIN_IS_INITDOMAIN(xen_info) || mfn >= PFN_4GIG) {
2415 page_free(pp, 1);
2416 return;
2417 }
2418
2419 add_page_to_pool(pp, 0);
2420 }
2421
2422
2423 long contig_searches; /* count of times contig pages requested */
2424 long contig_search_restarts; /* count of contig ranges tried */
2425 long contig_search_failed; /* count of contig alloc failures */
2426
2427 /*
2428 * Free partial page list
2429 */
2430 static void
free_partial_list(page_t ** pplist)2431 free_partial_list(page_t **pplist)
2432 {
2433 page_t *pp;
2434
2435 while (*pplist != NULL) {
2436 pp = *pplist;
2437 page_io_pool_sub(pplist, pp, pp);
2438 page_free(pp, 1);
2439 }
2440 }
2441
2442 /*
2443 * Look thru the contiguous pfns that are not part of the io_pool for
2444 * contiguous free pages. Return a list of the found pages or NULL.
2445 */
2446 page_t *
find_contig_free(uint_t npages,uint_t flags,uint64_t pfnseg,pgcnt_t pfnalign)2447 find_contig_free(uint_t npages, uint_t flags, uint64_t pfnseg,
2448 pgcnt_t pfnalign)
2449 {
2450 page_t *pp, *plist = NULL;
2451 mfn_t mfn, prev_mfn, start_mfn;
2452 pfn_t pfn;
2453 int pages_needed, pages_requested;
2454 int search_start;
2455
2456 /*
2457 * create the contig pfn list if not already done
2458 */
2459 retry:
2460 mutex_enter(&contig_list_lock);
2461 if (contig_pfn_list == NULL) {
2462 mutex_exit(&contig_list_lock);
2463 if (!create_contig_pfnlist(flags)) {
2464 return (NULL);
2465 }
2466 goto retry;
2467 }
2468 contig_searches++;
2469 /*
2470 * Search contiguous pfn list for physically contiguous pages not in
2471 * the io_pool. Start the search where the last search left off.
2472 */
2473 pages_requested = pages_needed = npages;
2474 search_start = next_alloc_pfn;
2475 start_mfn = prev_mfn = 0;
2476 while (pages_needed) {
2477 pfn = contig_pfn_list[next_alloc_pfn];
2478 mfn = pfn_to_mfn(pfn);
2479 /*
2480 * Check if mfn is first one or contig to previous one and
2481 * if page corresponding to mfn is free and that mfn
2482 * range is not crossing a segment boundary.
2483 */
2484 if ((prev_mfn == 0 || mfn == prev_mfn + 1) &&
2485 (pp = page_numtopp_alloc(pfn)) != NULL &&
2486 !((mfn & pfnseg) < (start_mfn & pfnseg))) {
2487 PP_CLRFREE(pp);
2488 page_io_pool_add(&plist, pp);
2489 pages_needed--;
2490 if (prev_mfn == 0) {
2491 if (pfnalign &&
2492 mfn != P2ROUNDUP(mfn, pfnalign)) {
2493 /*
2494 * not properly aligned
2495 */
2496 contig_search_restarts++;
2497 free_partial_list(&plist);
2498 pages_needed = pages_requested;
2499 start_mfn = prev_mfn = 0;
2500 goto skip;
2501 }
2502 start_mfn = mfn;
2503 }
2504 prev_mfn = mfn;
2505 } else {
2506 contig_search_restarts++;
2507 free_partial_list(&plist);
2508 pages_needed = pages_requested;
2509 start_mfn = prev_mfn = 0;
2510 }
2511 skip:
2512 if (++next_alloc_pfn == contig_pfn_cnt)
2513 next_alloc_pfn = 0;
2514 if (next_alloc_pfn == search_start)
2515 break; /* all pfns searched */
2516 }
2517 mutex_exit(&contig_list_lock);
2518 if (pages_needed) {
2519 contig_search_failed++;
2520 /*
2521 * Failed to find enough contig pages.
2522 * free partial page list
2523 */
2524 free_partial_list(&plist);
2525 }
2526 return (plist);
2527 }
2528
2529 /*
2530 * Search the reserved io pool pages for a page range with the
2531 * desired characteristics.
2532 */
2533 page_t *
page_io_pool_alloc(ddi_dma_attr_t * mattr,int contig,pgcnt_t minctg)2534 page_io_pool_alloc(ddi_dma_attr_t *mattr, int contig, pgcnt_t minctg)
2535 {
2536 page_t *pp_first, *pp_last;
2537 page_t *pp, **poolp;
2538 pgcnt_t nwanted, pfnalign;
2539 uint64_t pfnseg;
2540 mfn_t mfn, tmfn, hi_mfn, lo_mfn;
2541 int align, attempt = 0;
2542
2543 if (minctg == 1)
2544 contig = 0;
2545 lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
2546 hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
2547 pfnseg = mmu_btop(mattr->dma_attr_seg);
2548 align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
2549 if (align > MMU_PAGESIZE)
2550 pfnalign = mmu_btop(align);
2551 else
2552 pfnalign = 0;
2553
2554 try_again:
2555 /*
2556 * See if we want pages for a legacy device
2557 */
2558 if (hi_mfn < PFN_16MEG)
2559 poolp = &io_pool_16m;
2560 else
2561 poolp = &io_pool_4g;
2562 try_smaller:
2563 /*
2564 * Take pages from I/O pool. We'll use pages from the highest
2565 * MFN range possible.
2566 */
2567 pp_first = pp_last = NULL;
2568 mutex_enter(&io_pool_lock);
2569 nwanted = minctg;
2570 for (pp = *poolp; pp && nwanted > 0; ) {
2571 pp = pp->p_prev;
2572
2573 /*
2574 * skip pages above allowable range
2575 */
2576 mfn = mfn_list[pp->p_pagenum];
2577 if (hi_mfn < mfn)
2578 goto skip;
2579
2580 /*
2581 * stop at pages below allowable range
2582 */
2583 if (lo_mfn > mfn)
2584 break;
2585 restart:
2586 if (pp_last == NULL) {
2587 /*
2588 * Check alignment
2589 */
2590 tmfn = mfn - (minctg - 1);
2591 if (pfnalign && tmfn != P2ROUNDUP(tmfn, pfnalign))
2592 goto skip; /* not properly aligned */
2593 /*
2594 * Check segment
2595 */
2596 if ((mfn & pfnseg) < (tmfn & pfnseg))
2597 goto skip; /* crosses seg boundary */
2598 /*
2599 * Start building page list
2600 */
2601 pp_first = pp_last = pp;
2602 nwanted--;
2603 } else {
2604 /*
2605 * check physical contiguity if required
2606 */
2607 if (contig &&
2608 mfn_list[pp_first->p_pagenum] != mfn + 1) {
2609 /*
2610 * not a contiguous page, restart list.
2611 */
2612 pp_last = NULL;
2613 nwanted = minctg;
2614 goto restart;
2615 } else { /* add page to list */
2616 pp_first = pp;
2617 nwanted--;
2618 }
2619 }
2620 skip:
2621 if (pp == *poolp)
2622 break;
2623 }
2624
2625 /*
2626 * If we didn't find memory. Try the more constrained pool, then
2627 * sweep free pages into the DMA pool and try again.
2628 */
2629 if (nwanted != 0) {
2630 mutex_exit(&io_pool_lock);
2631 /*
2632 * If we were looking in the less constrained pool and
2633 * didn't find pages, try the more constrained pool.
2634 */
2635 if (poolp == &io_pool_4g) {
2636 poolp = &io_pool_16m;
2637 goto try_smaller;
2638 }
2639 kmem_reap();
2640 if (++attempt < 4) {
2641 /*
2642 * Grab some more io_pool pages
2643 */
2644 (void) populate_io_pool();
2645 goto try_again; /* go around and retry */
2646 }
2647 return (NULL);
2648 }
2649 /*
2650 * Found the pages, now snip them from the list
2651 */
2652 page_io_pool_sub(poolp, pp_first, pp_last);
2653 io_pool_cnt -= minctg;
2654 /*
2655 * reset low water mark
2656 */
2657 if (io_pool_cnt < io_pool_cnt_lowater)
2658 io_pool_cnt_lowater = io_pool_cnt;
2659 mutex_exit(&io_pool_lock);
2660 return (pp_first);
2661 }
2662
2663 page_t *
page_swap_with_hypervisor(struct vnode * vp,u_offset_t off,caddr_t vaddr,ddi_dma_attr_t * mattr,uint_t flags,pgcnt_t minctg)2664 page_swap_with_hypervisor(struct vnode *vp, u_offset_t off, caddr_t vaddr,
2665 ddi_dma_attr_t *mattr, uint_t flags, pgcnt_t minctg)
2666 {
2667 uint_t kflags;
2668 int order, extra, extpages, i, contig, nbits, extents;
2669 page_t *pp, *expp, *pp_first, **pplist = NULL;
2670 mfn_t *mfnlist = NULL;
2671
2672 contig = flags & PG_PHYSCONTIG;
2673 if (minctg == 1)
2674 contig = 0;
2675 flags &= ~PG_PHYSCONTIG;
2676 kflags = flags & PG_WAIT ? KM_SLEEP : KM_NOSLEEP;
2677 /*
2678 * Hypervisor will allocate extents, if we want contig
2679 * pages extent must be >= minctg
2680 */
2681 if (contig) {
2682 order = highbit(minctg) - 1;
2683 if (minctg & ((1 << order) - 1))
2684 order++;
2685 extpages = 1 << order;
2686 } else {
2687 order = 0;
2688 extpages = minctg;
2689 }
2690 if (extpages > minctg) {
2691 extra = extpages - minctg;
2692 if (!page_resv(extra, kflags))
2693 return (NULL);
2694 }
2695 pp_first = NULL;
2696 pplist = kmem_alloc(extpages * sizeof (page_t *), kflags);
2697 if (pplist == NULL)
2698 goto balloon_fail;
2699 mfnlist = kmem_alloc(extpages * sizeof (mfn_t), kflags);
2700 if (mfnlist == NULL)
2701 goto balloon_fail;
2702 pp = page_create_va(vp, off, minctg * PAGESIZE, flags, &kvseg, vaddr);
2703 if (pp == NULL)
2704 goto balloon_fail;
2705 pp_first = pp;
2706 if (extpages > minctg) {
2707 /*
2708 * fill out the rest of extent pages to swap
2709 * with the hypervisor
2710 */
2711 for (i = 0; i < extra; i++) {
2712 expp = page_create_va(vp,
2713 (u_offset_t)(uintptr_t)io_pool_kva,
2714 PAGESIZE, flags, &kvseg, io_pool_kva);
2715 if (expp == NULL)
2716 goto balloon_fail;
2717 (void) hat_pageunload(expp, HAT_FORCE_PGUNLOAD);
2718 page_io_unlock(expp);
2719 page_hashout(expp, NULL);
2720 page_io_lock(expp);
2721 /*
2722 * add page to end of list
2723 */
2724 expp->p_prev = pp_first->p_prev;
2725 expp->p_next = pp_first;
2726 expp->p_prev->p_next = expp;
2727 pp_first->p_prev = expp;
2728 }
2729
2730 }
2731 for (i = 0; i < extpages; i++) {
2732 pplist[i] = pp;
2733 pp = pp->p_next;
2734 }
2735 nbits = highbit(mattr->dma_attr_addr_hi);
2736 extents = contig ? 1 : minctg;
2737 if (balloon_replace_pages(extents, pplist, nbits, order,
2738 mfnlist) != extents) {
2739 if (ioalloc_dbg)
2740 cmn_err(CE_NOTE, "request to hypervisor"
2741 " for %d pages, maxaddr %" PRIx64 " failed",
2742 extpages, mattr->dma_attr_addr_hi);
2743 goto balloon_fail;
2744 }
2745
2746 kmem_free(pplist, extpages * sizeof (page_t *));
2747 kmem_free(mfnlist, extpages * sizeof (mfn_t));
2748 /*
2749 * Return any excess pages to free list
2750 */
2751 if (extpages > minctg) {
2752 for (i = 0; i < extra; i++) {
2753 pp = pp_first->p_prev;
2754 page_sub(&pp_first, pp);
2755 page_io_unlock(pp);
2756 page_unresv(1);
2757 page_free(pp, 1);
2758 }
2759 }
2760 return (pp_first);
2761 balloon_fail:
2762 /*
2763 * Return pages to free list and return failure
2764 */
2765 while (pp_first != NULL) {
2766 pp = pp_first;
2767 page_sub(&pp_first, pp);
2768 page_io_unlock(pp);
2769 if (pp->p_vnode != NULL)
2770 page_hashout(pp, NULL);
2771 page_free(pp, 1);
2772 }
2773 if (pplist)
2774 kmem_free(pplist, extpages * sizeof (page_t *));
2775 if (mfnlist)
2776 kmem_free(mfnlist, extpages * sizeof (mfn_t));
2777 page_unresv(extpages - minctg);
2778 return (NULL);
2779 }
2780
2781 static void
return_partial_alloc(page_t * plist)2782 return_partial_alloc(page_t *plist)
2783 {
2784 page_t *pp;
2785
2786 while (plist != NULL) {
2787 pp = plist;
2788 page_sub(&plist, pp);
2789 page_io_unlock(pp);
2790 page_destroy_io(pp);
2791 }
2792 }
2793
2794 static page_t *
page_get_contigpages(struct vnode * vp,u_offset_t off,int * npagesp,uint_t flags,caddr_t vaddr,ddi_dma_attr_t * mattr)2795 page_get_contigpages(
2796 struct vnode *vp,
2797 u_offset_t off,
2798 int *npagesp,
2799 uint_t flags,
2800 caddr_t vaddr,
2801 ddi_dma_attr_t *mattr)
2802 {
2803 mfn_t max_mfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
2804 page_t *plist; /* list to return */
2805 page_t *pp, *mcpl;
2806 int contig, anyaddr, npages, getone = 0;
2807 mfn_t lo_mfn;
2808 mfn_t hi_mfn;
2809 pgcnt_t pfnalign = 0;
2810 int align, sgllen;
2811 uint64_t pfnseg;
2812 pgcnt_t minctg;
2813
2814 npages = *npagesp;
2815 ASSERT(mattr != NULL);
2816 lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
2817 hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
2818 sgllen = mattr->dma_attr_sgllen;
2819 pfnseg = mmu_btop(mattr->dma_attr_seg);
2820 align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
2821 if (align > MMU_PAGESIZE)
2822 pfnalign = mmu_btop(align);
2823
2824 contig = flags & PG_PHYSCONTIG;
2825 if (npages == -1) {
2826 npages = 1;
2827 pfnalign = 0;
2828 }
2829 /*
2830 * Clear the contig flag if only one page is needed.
2831 */
2832 if (npages == 1) {
2833 getone = 1;
2834 contig = 0;
2835 }
2836
2837 /*
2838 * Check if any page in the system is fine.
2839 */
2840 anyaddr = lo_mfn == 0 && hi_mfn >= max_mfn;
2841 if (!contig && anyaddr && !pfnalign) {
2842 flags &= ~PG_PHYSCONTIG;
2843 plist = page_create_va(vp, off, npages * MMU_PAGESIZE,
2844 flags, &kvseg, vaddr);
2845 if (plist != NULL) {
2846 *npagesp = 0;
2847 return (plist);
2848 }
2849 }
2850 plist = NULL;
2851 minctg = howmany(npages, sgllen);
2852 while (npages > sgllen || getone) {
2853 if (minctg > npages)
2854 minctg = npages;
2855 mcpl = NULL;
2856 /*
2857 * We could want contig pages with no address range limits.
2858 */
2859 if (anyaddr && contig) {
2860 /*
2861 * Look for free contig pages to satisfy the request.
2862 */
2863 mcpl = find_contig_free(minctg, flags, pfnseg,
2864 pfnalign);
2865 }
2866 /*
2867 * Try the reserved io pools next
2868 */
2869 if (mcpl == NULL)
2870 mcpl = page_io_pool_alloc(mattr, contig, minctg);
2871 if (mcpl != NULL) {
2872 pp = mcpl;
2873 do {
2874 if (!page_hashin(pp, vp, off, NULL)) {
2875 panic("page_get_contigpages:"
2876 " hashin failed"
2877 " pp %p, vp %p, off %llx",
2878 (void *)pp, (void *)vp, off);
2879 }
2880 off += MMU_PAGESIZE;
2881 PP_CLRFREE(pp);
2882 PP_CLRAGED(pp);
2883 page_set_props(pp, P_REF);
2884 page_io_lock(pp);
2885 pp = pp->p_next;
2886 } while (pp != mcpl);
2887 } else {
2888 /*
2889 * Hypervisor exchange doesn't handle segment or
2890 * alignment constraints
2891 */
2892 if (mattr->dma_attr_seg < mattr->dma_attr_addr_hi ||
2893 pfnalign)
2894 goto fail;
2895 /*
2896 * Try exchanging pages with the hypervisor
2897 */
2898 mcpl = page_swap_with_hypervisor(vp, off, vaddr, mattr,
2899 flags, minctg);
2900 if (mcpl == NULL)
2901 goto fail;
2902 off += minctg * MMU_PAGESIZE;
2903 }
2904 check_dma(mattr, mcpl, minctg);
2905 /*
2906 * Here with a minctg run of contiguous pages, add them to the
2907 * list we will return for this request.
2908 */
2909 page_list_concat(&plist, &mcpl);
2910 npages -= minctg;
2911 *npagesp = npages;
2912 sgllen--;
2913 if (getone)
2914 break;
2915 }
2916 return (plist);
2917 fail:
2918 return_partial_alloc(plist);
2919 return (NULL);
2920 }
2921
2922 /*
2923 * Allocator for domain 0 I/O pages. We match the required
2924 * DMA attributes and contiguity constraints.
2925 */
2926 /*ARGSUSED*/
2927 page_t *
page_create_io(struct vnode * vp,u_offset_t off,uint_t bytes,uint_t flags,struct as * as,caddr_t vaddr,ddi_dma_attr_t * mattr)2928 page_create_io(
2929 struct vnode *vp,
2930 u_offset_t off,
2931 uint_t bytes,
2932 uint_t flags,
2933 struct as *as,
2934 caddr_t vaddr,
2935 ddi_dma_attr_t *mattr)
2936 {
2937 page_t *plist = NULL, *pp;
2938 int npages = 0, contig, anyaddr, pages_req;
2939 mfn_t lo_mfn;
2940 mfn_t hi_mfn;
2941 pgcnt_t pfnalign = 0;
2942 int align;
2943 int is_domu = 0;
2944 int dummy, bytes_got;
2945 mfn_t max_mfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
2946
2947 ASSERT(mattr != NULL);
2948 lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
2949 hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
2950 align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
2951 if (align > MMU_PAGESIZE)
2952 pfnalign = mmu_btop(align);
2953
2954 /*
2955 * Clear the contig flag if only one page is needed or the scatter
2956 * gather list length is >= npages.
2957 */
2958 pages_req = npages = mmu_btopr(bytes);
2959 contig = (flags & PG_PHYSCONTIG);
2960 bytes = P2ROUNDUP(bytes, MMU_PAGESIZE);
2961 if (bytes == MMU_PAGESIZE || mattr->dma_attr_sgllen >= npages)
2962 contig = 0;
2963
2964 /*
2965 * Check if any old page in the system is fine.
2966 * DomU should always go down this path.
2967 */
2968 is_domu = !DOMAIN_IS_INITDOMAIN(xen_info);
2969 anyaddr = lo_mfn == 0 && hi_mfn >= max_mfn && !pfnalign;
2970 if ((!contig && anyaddr) || is_domu) {
2971 flags &= ~PG_PHYSCONTIG;
2972 plist = page_create_va(vp, off, bytes, flags, &kvseg, vaddr);
2973 if (plist != NULL)
2974 return (plist);
2975 else if (is_domu)
2976 return (NULL); /* no memory available */
2977 }
2978 /*
2979 * DomU should never reach here
2980 */
2981 if (contig) {
2982 plist = page_get_contigpages(vp, off, &npages, flags, vaddr,
2983 mattr);
2984 if (plist == NULL)
2985 goto fail;
2986 bytes_got = (pages_req - npages) << MMU_PAGESHIFT;
2987 vaddr += bytes_got;
2988 off += bytes_got;
2989 /*
2990 * We now have all the contiguous pages we need, but
2991 * we may still need additional non-contiguous pages.
2992 */
2993 }
2994 /*
2995 * now loop collecting the requested number of pages, these do
2996 * not have to be contiguous pages but we will use the contig
2997 * page alloc code to get the pages since it will honor any
2998 * other constraints the pages may have.
2999 */
3000 while (npages--) {
3001 dummy = -1;
3002 pp = page_get_contigpages(vp, off, &dummy, flags, vaddr, mattr);
3003 if (pp == NULL)
3004 goto fail;
3005 page_add(&plist, pp);
3006 vaddr += MMU_PAGESIZE;
3007 off += MMU_PAGESIZE;
3008 }
3009 return (plist);
3010 fail:
3011 /*
3012 * Failed to get enough pages, return ones we did get
3013 */
3014 return_partial_alloc(plist);
3015 return (NULL);
3016 }
3017
3018 /*
3019 * Lock and return the page with the highest mfn that we can find. last_mfn
3020 * holds the last one found, so the next search can start from there. We
3021 * also keep a counter so that we don't loop forever if the machine has no
3022 * free pages.
3023 *
3024 * This is called from the balloon thread to find pages to give away. new_high
3025 * is used when new mfn's have been added to the system - we will reset our
3026 * search if the new mfn's are higher than our current search position.
3027 */
3028 page_t *
page_get_high_mfn(mfn_t new_high)3029 page_get_high_mfn(mfn_t new_high)
3030 {
3031 static mfn_t last_mfn = 0;
3032 pfn_t pfn;
3033 page_t *pp;
3034 ulong_t loop_count = 0;
3035
3036 if (new_high > last_mfn)
3037 last_mfn = new_high;
3038
3039 for (; loop_count < mfn_count; loop_count++, last_mfn--) {
3040 if (last_mfn == 0) {
3041 last_mfn = cached_max_mfn;
3042 }
3043
3044 pfn = mfn_to_pfn(last_mfn);
3045 if (pfn & PFN_IS_FOREIGN_MFN)
3046 continue;
3047
3048 /* See if the page is free. If so, lock it. */
3049 pp = page_numtopp_alloc(pfn);
3050 if (pp == NULL)
3051 continue;
3052 PP_CLRFREE(pp);
3053
3054 ASSERT(PAGE_EXCL(pp));
3055 ASSERT(pp->p_vnode == NULL);
3056 ASSERT(!hat_page_is_mapped(pp));
3057 last_mfn--;
3058 return (pp);
3059 }
3060 return (NULL);
3061 }
3062
3063 #else /* !__xpv */
3064
3065 /*
3066 * get a page from any list with the given mnode
3067 */
3068 static page_t *
page_get_mnode_anylist(ulong_t origbin,uchar_t szc,uint_t flags,int mnode,int mtype,ddi_dma_attr_t * dma_attr)3069 page_get_mnode_anylist(ulong_t origbin, uchar_t szc, uint_t flags,
3070 int mnode, int mtype, ddi_dma_attr_t *dma_attr)
3071 {
3072 kmutex_t *pcm;
3073 int i;
3074 page_t *pp;
3075 page_t *first_pp;
3076 uint64_t pgaddr;
3077 ulong_t bin;
3078 int mtypestart;
3079 int plw_initialized;
3080 page_list_walker_t plw;
3081
3082 VM_STAT_ADD(pga_vmstats.pgma_alloc);
3083
3084 ASSERT((flags & PG_MATCH_COLOR) == 0);
3085 ASSERT(szc == 0);
3086 ASSERT(dma_attr != NULL);
3087
3088 MTYPE_START(mnode, mtype, flags);
3089 if (mtype < 0) {
3090 VM_STAT_ADD(pga_vmstats.pgma_allocempty);
3091 return (NULL);
3092 }
3093
3094 mtypestart = mtype;
3095
3096 bin = origbin;
3097
3098 /*
3099 * check up to page_colors + 1 bins - origbin may be checked twice
3100 * because of BIN_STEP skip
3101 */
3102 do {
3103 plw_initialized = 0;
3104
3105 for (plw.plw_count = 0;
3106 plw.plw_count < page_colors; plw.plw_count++) {
3107
3108 if (PAGE_FREELISTS(mnode, szc, bin, mtype) == NULL)
3109 goto nextfreebin;
3110
3111 pcm = PC_BIN_MUTEX(mnode, bin, PG_FREE_LIST);
3112 mutex_enter(pcm);
3113 pp = PAGE_FREELISTS(mnode, szc, bin, mtype);
3114 first_pp = pp;
3115 while (pp != NULL) {
3116 if (IS_DUMP_PAGE(pp) || page_trylock(pp,
3117 SE_EXCL) == 0) {
3118 pp = pp->p_next;
3119 if (pp == first_pp) {
3120 pp = NULL;
3121 }
3122 continue;
3123 }
3124
3125 ASSERT(PP_ISFREE(pp));
3126 ASSERT(PP_ISAGED(pp));
3127 ASSERT(pp->p_vnode == NULL);
3128 ASSERT(pp->p_hash == NULL);
3129 ASSERT(pp->p_offset == (u_offset_t)-1);
3130 ASSERT(pp->p_szc == szc);
3131 ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode);
3132 /* check if page within DMA attributes */
3133 pgaddr = pa_to_ma(pfn_to_pa(pp->p_pagenum));
3134 if ((pgaddr >= dma_attr->dma_attr_addr_lo) &&
3135 (pgaddr + MMU_PAGESIZE - 1 <=
3136 dma_attr->dma_attr_addr_hi)) {
3137 break;
3138 }
3139
3140 /* continue looking */
3141 page_unlock(pp);
3142 pp = pp->p_next;
3143 if (pp == first_pp)
3144 pp = NULL;
3145
3146 }
3147 if (pp != NULL) {
3148 ASSERT(mtype == PP_2_MTYPE(pp));
3149 ASSERT(pp->p_szc == 0);
3150
3151 /* found a page with specified DMA attributes */
3152 page_sub(&PAGE_FREELISTS(mnode, szc, bin,
3153 mtype), pp);
3154 page_ctr_sub(mnode, mtype, pp, PG_FREE_LIST);
3155
3156 if ((PP_ISFREE(pp) == 0) ||
3157 (PP_ISAGED(pp) == 0)) {
3158 cmn_err(CE_PANIC, "page %p is not free",
3159 (void *)pp);
3160 }
3161
3162 mutex_exit(pcm);
3163 check_dma(dma_attr, pp, 1);
3164 VM_STAT_ADD(pga_vmstats.pgma_allocok);
3165 return (pp);
3166 }
3167 mutex_exit(pcm);
3168 nextfreebin:
3169 if (plw_initialized == 0) {
3170 page_list_walk_init(szc, 0, bin, 1, 0, &plw);
3171 ASSERT(plw.plw_ceq_dif == page_colors);
3172 plw_initialized = 1;
3173 }
3174
3175 if (plw.plw_do_split) {
3176 pp = page_freelist_split(szc, bin, mnode,
3177 mtype,
3178 mmu_btop(dma_attr->dma_attr_addr_lo),
3179 mmu_btop(dma_attr->dma_attr_addr_hi + 1),
3180 &plw);
3181 if (pp != NULL) {
3182 check_dma(dma_attr, pp, 1);
3183 return (pp);
3184 }
3185 }
3186
3187 bin = page_list_walk_next_bin(szc, bin, &plw);
3188 }
3189
3190 MTYPE_NEXT(mnode, mtype, flags);
3191 } while (mtype >= 0);
3192
3193 /* failed to find a page in the freelist; try it in the cachelist */
3194
3195 /* reset mtype start for cachelist search */
3196 mtype = mtypestart;
3197 ASSERT(mtype >= 0);
3198
3199 /* start with the bin of matching color */
3200 bin = origbin;
3201
3202 do {
3203 for (i = 0; i <= page_colors; i++) {
3204 if (PAGE_CACHELISTS(mnode, bin, mtype) == NULL)
3205 goto nextcachebin;
3206 pcm = PC_BIN_MUTEX(mnode, bin, PG_CACHE_LIST);
3207 mutex_enter(pcm);
3208 pp = PAGE_CACHELISTS(mnode, bin, mtype);
3209 first_pp = pp;
3210 while (pp != NULL) {
3211 if (IS_DUMP_PAGE(pp) || page_trylock(pp,
3212 SE_EXCL) == 0) {
3213 pp = pp->p_next;
3214 if (pp == first_pp)
3215 pp = NULL;
3216 continue;
3217 }
3218 ASSERT(pp->p_vnode);
3219 ASSERT(PP_ISAGED(pp) == 0);
3220 ASSERT(pp->p_szc == 0);
3221 ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode);
3222
3223 /* check if page within DMA attributes */
3224
3225 pgaddr = pa_to_ma(pfn_to_pa(pp->p_pagenum));
3226 if ((pgaddr >= dma_attr->dma_attr_addr_lo) &&
3227 (pgaddr + MMU_PAGESIZE - 1 <=
3228 dma_attr->dma_attr_addr_hi)) {
3229 break;
3230 }
3231
3232 /* continue looking */
3233 page_unlock(pp);
3234 pp = pp->p_next;
3235 if (pp == first_pp)
3236 pp = NULL;
3237 }
3238
3239 if (pp != NULL) {
3240 ASSERT(mtype == PP_2_MTYPE(pp));
3241 ASSERT(pp->p_szc == 0);
3242
3243 /* found a page with specified DMA attributes */
3244 page_sub(&PAGE_CACHELISTS(mnode, bin,
3245 mtype), pp);
3246 page_ctr_sub(mnode, mtype, pp, PG_CACHE_LIST);
3247
3248 mutex_exit(pcm);
3249 ASSERT(pp->p_vnode);
3250 ASSERT(PP_ISAGED(pp) == 0);
3251 check_dma(dma_attr, pp, 1);
3252 VM_STAT_ADD(pga_vmstats.pgma_allocok);
3253 return (pp);
3254 }
3255 mutex_exit(pcm);
3256 nextcachebin:
3257 bin += (i == 0) ? BIN_STEP : 1;
3258 bin &= page_colors_mask;
3259 }
3260 MTYPE_NEXT(mnode, mtype, flags);
3261 } while (mtype >= 0);
3262
3263 VM_STAT_ADD(pga_vmstats.pgma_allocfailed);
3264 return (NULL);
3265 }
3266
3267 /*
3268 * This function is similar to page_get_freelist()/page_get_cachelist()
3269 * but it searches both the lists to find a page with the specified
3270 * color (or no color) and DMA attributes. The search is done in the
3271 * freelist first and then in the cache list within the highest memory
3272 * range (based on DMA attributes) before searching in the lower
3273 * memory ranges.
3274 *
3275 * Note: This function is called only by page_create_io().
3276 */
3277 /*ARGSUSED*/
3278 static page_t *
page_get_anylist(struct vnode * vp,u_offset_t off,struct as * as,caddr_t vaddr,size_t size,uint_t flags,ddi_dma_attr_t * dma_attr,lgrp_t * lgrp)3279 page_get_anylist(struct vnode *vp, u_offset_t off, struct as *as, caddr_t vaddr,
3280 size_t size, uint_t flags, ddi_dma_attr_t *dma_attr, lgrp_t *lgrp)
3281 {
3282 uint_t bin;
3283 int mtype;
3284 page_t *pp;
3285 int n;
3286 int m;
3287 int szc;
3288 int fullrange;
3289 int mnode;
3290 int local_failed_stat = 0;
3291 lgrp_mnode_cookie_t lgrp_cookie;
3292
3293 VM_STAT_ADD(pga_vmstats.pga_alloc);
3294
3295 /* only base pagesize currently supported */
3296 if (size != MMU_PAGESIZE)
3297 return (NULL);
3298
3299 /*
3300 * If we're passed a specific lgroup, we use it. Otherwise,
3301 * assume first-touch placement is desired.
3302 */
3303 if (!LGRP_EXISTS(lgrp))
3304 lgrp = lgrp_home_lgrp();
3305
3306 /* LINTED */
3307 AS_2_BIN(as, seg, vp, vaddr, bin, 0);
3308
3309 /*
3310 * Only hold one freelist or cachelist lock at a time, that way we
3311 * can start anywhere and not have to worry about lock
3312 * ordering.
3313 */
3314 if (dma_attr == NULL) {
3315 n = mtype16m;
3316 m = mtypetop;
3317 fullrange = 1;
3318 VM_STAT_ADD(pga_vmstats.pga_nulldmaattr);
3319 } else {
3320 pfn_t pfnlo = mmu_btop(dma_attr->dma_attr_addr_lo);
3321 pfn_t pfnhi = mmu_btop(dma_attr->dma_attr_addr_hi);
3322
3323 /*
3324 * We can guarantee alignment only for page boundary.
3325 */
3326 if (dma_attr->dma_attr_align > MMU_PAGESIZE)
3327 return (NULL);
3328
3329 /* Sanity check the dma_attr */
3330 if (pfnlo > pfnhi)
3331 return (NULL);
3332
3333 n = pfn_2_mtype(pfnlo);
3334 m = pfn_2_mtype(pfnhi);
3335
3336 fullrange = ((pfnlo == mnoderanges[n].mnr_pfnlo) &&
3337 (pfnhi >= mnoderanges[m].mnr_pfnhi));
3338 }
3339 VM_STAT_COND_ADD(fullrange == 0, pga_vmstats.pga_notfullrange);
3340
3341 szc = 0;
3342
3343 /* cylcing thru mtype handled by RANGE0 if n == mtype16m */
3344 if (n == mtype16m) {
3345 flags |= PGI_MT_RANGE0;
3346 n = m;
3347 }
3348
3349 /*
3350 * Try local memory node first, but try remote if we can't
3351 * get a page of the right color.
3352 */
3353 LGRP_MNODE_COOKIE_INIT(lgrp_cookie, lgrp, LGRP_SRCH_HIER);
3354 while ((mnode = lgrp_memnode_choose(&lgrp_cookie)) >= 0) {
3355 /*
3356 * allocate pages from high pfn to low.
3357 */
3358 mtype = m;
3359 do {
3360 if (fullrange != 0) {
3361 pp = page_get_mnode_freelist(mnode,
3362 bin, mtype, szc, flags);
3363 if (pp == NULL) {
3364 pp = page_get_mnode_cachelist(
3365 bin, flags, mnode, mtype);
3366 }
3367 } else {
3368 pp = page_get_mnode_anylist(bin, szc,
3369 flags, mnode, mtype, dma_attr);
3370 }
3371 if (pp != NULL) {
3372 VM_STAT_ADD(pga_vmstats.pga_allocok);
3373 check_dma(dma_attr, pp, 1);
3374 return (pp);
3375 }
3376 } while (mtype != n &&
3377 (mtype = mnoderanges[mtype].mnr_next) != -1);
3378 if (!local_failed_stat) {
3379 lgrp_stat_add(lgrp->lgrp_id, LGRP_NUM_ALLOC_FAIL, 1);
3380 local_failed_stat = 1;
3381 }
3382 }
3383 VM_STAT_ADD(pga_vmstats.pga_allocfailed);
3384
3385 return (NULL);
3386 }
3387
3388 /*
3389 * page_create_io()
3390 *
3391 * This function is a copy of page_create_va() with an additional
3392 * argument 'mattr' that specifies DMA memory requirements to
3393 * the page list functions. This function is used by the segkmem
3394 * allocator so it is only to create new pages (i.e PG_EXCL is
3395 * set).
3396 *
3397 * Note: This interface is currently used by x86 PSM only and is
3398 * not fully specified so the commitment level is only for
3399 * private interface specific to x86. This interface uses PSM
3400 * specific page_get_anylist() interface.
3401 */
3402
3403 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \
3404 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \
3405 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \
3406 break; \
3407 } \
3408 }
3409
3410
3411 page_t *
page_create_io(struct vnode * vp,u_offset_t off,uint_t bytes,uint_t flags,struct as * as,caddr_t vaddr,ddi_dma_attr_t * mattr)3412 page_create_io(
3413 struct vnode *vp,
3414 u_offset_t off,
3415 uint_t bytes,
3416 uint_t flags,
3417 struct as *as,
3418 caddr_t vaddr,
3419 ddi_dma_attr_t *mattr) /* DMA memory attributes if any */
3420 {
3421 page_t *plist = NULL;
3422 uint_t plist_len = 0;
3423 pgcnt_t npages;
3424 page_t *npp = NULL;
3425 uint_t pages_req;
3426 page_t *pp;
3427 kmutex_t *phm = NULL;
3428 uint_t index;
3429
3430 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START,
3431 "page_create_start:vp %p off %llx bytes %u flags %x",
3432 vp, off, bytes, flags);
3433
3434 ASSERT((flags & ~(PG_EXCL | PG_WAIT | PG_PHYSCONTIG)) == 0);
3435
3436 pages_req = npages = mmu_btopr(bytes);
3437
3438 /*
3439 * Do the freemem and pcf accounting.
3440 */
3441 if (!page_create_wait(npages, flags)) {
3442 return (NULL);
3443 }
3444
3445 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS,
3446 "page_create_success:vp %p off %llx", vp, off);
3447
3448 /*
3449 * If satisfying this request has left us with too little
3450 * memory, start the wheels turning to get some back. The
3451 * first clause of the test prevents waking up the pageout
3452 * daemon in situations where it would decide that there's
3453 * nothing to do.
3454 */
3455 if (nscan < desscan && freemem < minfree) {
3456 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL,
3457 "pageout_cv_signal:freemem %ld", freemem);
3458 cv_signal(&proc_pageout->p_cv);
3459 }
3460
3461 if (flags & PG_PHYSCONTIG) {
3462
3463 plist = page_get_contigpage(&npages, mattr, 1);
3464 if (plist == NULL) {
3465 page_create_putback(npages);
3466 return (NULL);
3467 }
3468
3469 pp = plist;
3470
3471 do {
3472 if (!page_hashin(pp, vp, off, NULL)) {
3473 panic("pg_creat_io: hashin failed %p %p %llx",
3474 (void *)pp, (void *)vp, off);
3475 }
3476 VM_STAT_ADD(page_create_new);
3477 off += MMU_PAGESIZE;
3478 PP_CLRFREE(pp);
3479 PP_CLRAGED(pp);
3480 page_set_props(pp, P_REF);
3481 pp = pp->p_next;
3482 } while (pp != plist);
3483
3484 if (!npages) {
3485 check_dma(mattr, plist, pages_req);
3486 return (plist);
3487 } else {
3488 vaddr += (pages_req - npages) << MMU_PAGESHIFT;
3489 }
3490
3491 /*
3492 * fall-thru:
3493 *
3494 * page_get_contigpage returns when npages <= sgllen.
3495 * Grab the rest of the non-contig pages below from anylist.
3496 */
3497 }
3498
3499 /*
3500 * Loop around collecting the requested number of pages.
3501 * Most of the time, we have to `create' a new page. With
3502 * this in mind, pull the page off the free list before
3503 * getting the hash lock. This will minimize the hash
3504 * lock hold time, nesting, and the like. If it turns
3505 * out we don't need the page, we put it back at the end.
3506 */
3507 while (npages--) {
3508 phm = NULL;
3509
3510 index = PAGE_HASH_FUNC(vp, off);
3511 top:
3512 ASSERT(phm == NULL);
3513 ASSERT(index == PAGE_HASH_FUNC(vp, off));
3514 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
3515
3516 if (npp == NULL) {
3517 /*
3518 * Try to get the page of any color either from
3519 * the freelist or from the cache list.
3520 */
3521 npp = page_get_anylist(vp, off, as, vaddr, MMU_PAGESIZE,
3522 flags & ~PG_MATCH_COLOR, mattr, NULL);
3523 if (npp == NULL) {
3524 if (mattr == NULL) {
3525 /*
3526 * Not looking for a special page;
3527 * panic!
3528 */
3529 panic("no page found %d", (int)npages);
3530 }
3531 /*
3532 * No page found! This can happen
3533 * if we are looking for a page
3534 * within a specific memory range
3535 * for DMA purposes. If PG_WAIT is
3536 * specified then we wait for a
3537 * while and then try again. The
3538 * wait could be forever if we
3539 * don't get the page(s) we need.
3540 *
3541 * Note: XXX We really need a mechanism
3542 * to wait for pages in the desired
3543 * range. For now, we wait for any
3544 * pages and see if we can use it.
3545 */
3546
3547 if ((mattr != NULL) && (flags & PG_WAIT)) {
3548 delay(10);
3549 goto top;
3550 }
3551 goto fail; /* undo accounting stuff */
3552 }
3553
3554 if (PP_ISAGED(npp) == 0) {
3555 /*
3556 * Since this page came from the
3557 * cachelist, we must destroy the
3558 * old vnode association.
3559 */
3560 page_hashout(npp, (kmutex_t *)NULL);
3561 }
3562 }
3563
3564 /*
3565 * We own this page!
3566 */
3567 ASSERT(PAGE_EXCL(npp));
3568 ASSERT(npp->p_vnode == NULL);
3569 ASSERT(!hat_page_is_mapped(npp));
3570 PP_CLRFREE(npp);
3571 PP_CLRAGED(npp);
3572
3573 /*
3574 * Here we have a page in our hot little mits and are
3575 * just waiting to stuff it on the appropriate lists.
3576 * Get the mutex and check to see if it really does
3577 * not exist.
3578 */
3579 phm = PAGE_HASH_MUTEX(index);
3580 mutex_enter(phm);
3581 PAGE_HASH_SEARCH(index, pp, vp, off);
3582 if (pp == NULL) {
3583 VM_STAT_ADD(page_create_new);
3584 pp = npp;
3585 npp = NULL;
3586 if (!page_hashin(pp, vp, off, phm)) {
3587 /*
3588 * Since we hold the page hash mutex and
3589 * just searched for this page, page_hashin
3590 * had better not fail. If it does, that
3591 * means somethread did not follow the
3592 * page hash mutex rules. Panic now and
3593 * get it over with. As usual, go down
3594 * holding all the locks.
3595 */
3596 ASSERT(MUTEX_HELD(phm));
3597 panic("page_create: hashin fail %p %p %llx %p",
3598 (void *)pp, (void *)vp, off, (void *)phm);
3599
3600 }
3601 ASSERT(MUTEX_HELD(phm));
3602 mutex_exit(phm);
3603 phm = NULL;
3604
3605 /*
3606 * Hat layer locking need not be done to set
3607 * the following bits since the page is not hashed
3608 * and was on the free list (i.e., had no mappings).
3609 *
3610 * Set the reference bit to protect
3611 * against immediate pageout
3612 *
3613 * XXXmh modify freelist code to set reference
3614 * bit so we don't have to do it here.
3615 */
3616 page_set_props(pp, P_REF);
3617 } else {
3618 ASSERT(MUTEX_HELD(phm));
3619 mutex_exit(phm);
3620 phm = NULL;
3621 /*
3622 * NOTE: This should not happen for pages associated
3623 * with kernel vnode 'kvp'.
3624 */
3625 /* XX64 - to debug why this happens! */
3626 ASSERT(!VN_ISKAS(vp));
3627 if (VN_ISKAS(vp))
3628 cmn_err(CE_NOTE,
3629 "page_create: page not expected "
3630 "in hash list for kernel vnode - pp 0x%p",
3631 (void *)pp);
3632 VM_STAT_ADD(page_create_exists);
3633 goto fail;
3634 }
3635
3636 /*
3637 * Got a page! It is locked. Acquire the i/o
3638 * lock since we are going to use the p_next and
3639 * p_prev fields to link the requested pages together.
3640 */
3641 page_io_lock(pp);
3642 page_add(&plist, pp);
3643 plist = plist->p_next;
3644 off += MMU_PAGESIZE;
3645 vaddr += MMU_PAGESIZE;
3646 }
3647
3648 check_dma(mattr, plist, pages_req);
3649 return (plist);
3650
3651 fail:
3652 if (npp != NULL) {
3653 /*
3654 * Did not need this page after all.
3655 * Put it back on the free list.
3656 */
3657 VM_STAT_ADD(page_create_putbacks);
3658 PP_SETFREE(npp);
3659 PP_SETAGED(npp);
3660 npp->p_offset = (u_offset_t)-1;
3661 page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL);
3662 page_unlock(npp);
3663 }
3664
3665 /*
3666 * Give up the pages we already got.
3667 */
3668 while (plist != NULL) {
3669 pp = plist;
3670 page_sub(&plist, pp);
3671 page_io_unlock(pp);
3672 plist_len++;
3673 /*LINTED: constant in conditional ctx*/
3674 VN_DISPOSE(pp, B_INVAL, 0, kcred);
3675 }
3676
3677 /*
3678 * VN_DISPOSE does freemem accounting for the pages in plist
3679 * by calling page_free. So, we need to undo the pcf accounting
3680 * for only the remaining pages.
3681 */
3682 VM_STAT_ADD(page_create_putbacks);
3683 page_create_putback(pages_req - plist_len);
3684
3685 return (NULL);
3686 }
3687 #endif /* !__xpv */
3688
3689
3690 /*
3691 * Copy the data from the physical page represented by "frompp" to
3692 * that represented by "topp". ppcopy uses CPU->cpu_caddr1 and
3693 * CPU->cpu_caddr2. It assumes that no one uses either map at interrupt
3694 * level and no one sleeps with an active mapping there.
3695 *
3696 * Note that the ref/mod bits in the page_t's are not affected by
3697 * this operation, hence it is up to the caller to update them appropriately.
3698 */
3699 int
ppcopy(page_t * frompp,page_t * topp)3700 ppcopy(page_t *frompp, page_t *topp)
3701 {
3702 caddr_t pp_addr1;
3703 caddr_t pp_addr2;
3704 hat_mempte_t pte1;
3705 hat_mempte_t pte2;
3706 kmutex_t *ppaddr_mutex;
3707 label_t ljb;
3708 int ret = 1;
3709
3710 ASSERT_STACK_ALIGNED();
3711 ASSERT(PAGE_LOCKED(frompp));
3712 ASSERT(PAGE_LOCKED(topp));
3713
3714 if (kpm_enable) {
3715 pp_addr1 = hat_kpm_page2va(frompp, 0);
3716 pp_addr2 = hat_kpm_page2va(topp, 0);
3717 kpreempt_disable();
3718 } else {
3719 /*
3720 * disable pre-emption so that CPU can't change
3721 */
3722 kpreempt_disable();
3723
3724 pp_addr1 = CPU->cpu_caddr1;
3725 pp_addr2 = CPU->cpu_caddr2;
3726 pte1 = CPU->cpu_caddr1pte;
3727 pte2 = CPU->cpu_caddr2pte;
3728
3729 ppaddr_mutex = &CPU->cpu_ppaddr_mutex;
3730 mutex_enter(ppaddr_mutex);
3731
3732 hat_mempte_remap(page_pptonum(frompp), pp_addr1, pte1,
3733 PROT_READ | HAT_STORECACHING_OK, HAT_LOAD_NOCONSIST);
3734 hat_mempte_remap(page_pptonum(topp), pp_addr2, pte2,
3735 PROT_READ | PROT_WRITE | HAT_STORECACHING_OK,
3736 HAT_LOAD_NOCONSIST);
3737 }
3738
3739 if (on_fault(&ljb)) {
3740 ret = 0;
3741 goto faulted;
3742 }
3743 if (use_sse_pagecopy)
3744 #ifdef __xpv
3745 page_copy_no_xmm(pp_addr2, pp_addr1);
3746 #else
3747 hwblkpagecopy(pp_addr1, pp_addr2);
3748 #endif
3749 else
3750 bcopy(pp_addr1, pp_addr2, PAGESIZE);
3751
3752 no_fault();
3753 faulted:
3754 if (!kpm_enable) {
3755 #ifdef __xpv
3756 /*
3757 * We can't leave unused mappings laying about under the
3758 * hypervisor, so blow them away.
3759 */
3760 if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr1, 0,
3761 UVMF_INVLPG | UVMF_LOCAL) < 0)
3762 panic("HYPERVISOR_update_va_mapping() failed");
3763 if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr2, 0,
3764 UVMF_INVLPG | UVMF_LOCAL) < 0)
3765 panic("HYPERVISOR_update_va_mapping() failed");
3766 #endif
3767 mutex_exit(ppaddr_mutex);
3768 }
3769 kpreempt_enable();
3770 return (ret);
3771 }
3772
3773 void
pagezero(page_t * pp,uint_t off,uint_t len)3774 pagezero(page_t *pp, uint_t off, uint_t len)
3775 {
3776 ASSERT(PAGE_LOCKED(pp));
3777 pfnzero(page_pptonum(pp), off, len);
3778 }
3779
3780 /*
3781 * Zero the physical page from off to off + len given by pfn
3782 * without changing the reference and modified bits of page.
3783 *
3784 * We use this using CPU private page address #2, see ppcopy() for more info.
3785 * pfnzero() must not be called at interrupt level.
3786 */
3787 void
pfnzero(pfn_t pfn,uint_t off,uint_t len)3788 pfnzero(pfn_t pfn, uint_t off, uint_t len)
3789 {
3790 caddr_t pp_addr2;
3791 hat_mempte_t pte2;
3792 kmutex_t *ppaddr_mutex = NULL;
3793
3794 ASSERT_STACK_ALIGNED();
3795 ASSERT(len <= MMU_PAGESIZE);
3796 ASSERT(off <= MMU_PAGESIZE);
3797 ASSERT(off + len <= MMU_PAGESIZE);
3798
3799 if (kpm_enable && !pfn_is_foreign(pfn)) {
3800 pp_addr2 = hat_kpm_pfn2va(pfn);
3801 kpreempt_disable();
3802 } else {
3803 kpreempt_disable();
3804
3805 pp_addr2 = CPU->cpu_caddr2;
3806 pte2 = CPU->cpu_caddr2pte;
3807
3808 ppaddr_mutex = &CPU->cpu_ppaddr_mutex;
3809 mutex_enter(ppaddr_mutex);
3810
3811 hat_mempte_remap(pfn, pp_addr2, pte2,
3812 PROT_READ | PROT_WRITE | HAT_STORECACHING_OK,
3813 HAT_LOAD_NOCONSIST);
3814 }
3815
3816 if (use_sse_pagezero) {
3817 #ifdef __xpv
3818 uint_t rem;
3819
3820 /*
3821 * zero a byte at a time until properly aligned for
3822 * block_zero_no_xmm().
3823 */
3824 while (!P2NPHASE(off, ((uint_t)BLOCKZEROALIGN)) && len-- > 0)
3825 pp_addr2[off++] = 0;
3826
3827 /*
3828 * Now use faster block_zero_no_xmm() for any range
3829 * that is properly aligned and sized.
3830 */
3831 rem = P2PHASE(len, ((uint_t)BLOCKZEROALIGN));
3832 len -= rem;
3833 if (len != 0) {
3834 block_zero_no_xmm(pp_addr2 + off, len);
3835 off += len;
3836 }
3837
3838 /*
3839 * zero remainder with byte stores.
3840 */
3841 while (rem-- > 0)
3842 pp_addr2[off++] = 0;
3843 #else
3844 hwblkclr(pp_addr2 + off, len);
3845 #endif
3846 } else {
3847 bzero(pp_addr2 + off, len);
3848 }
3849
3850 if (!kpm_enable || pfn_is_foreign(pfn)) {
3851 #ifdef __xpv
3852 /*
3853 * On the hypervisor this page might get used for a page
3854 * table before any intervening change to this mapping,
3855 * so blow it away.
3856 */
3857 if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr2, 0,
3858 UVMF_INVLPG) < 0)
3859 panic("HYPERVISOR_update_va_mapping() failed");
3860 #endif
3861 mutex_exit(ppaddr_mutex);
3862 }
3863
3864 kpreempt_enable();
3865 }
3866
3867 /*
3868 * Platform-dependent page scrub call.
3869 */
3870 void
pagescrub(page_t * pp,uint_t off,uint_t len)3871 pagescrub(page_t *pp, uint_t off, uint_t len)
3872 {
3873 /*
3874 * For now, we rely on the fact that pagezero() will
3875 * always clear UEs.
3876 */
3877 pagezero(pp, off, len);
3878 }
3879
3880 /*
3881 * set up two private addresses for use on a given CPU for use in ppcopy()
3882 */
3883 void
setup_vaddr_for_ppcopy(struct cpu * cpup)3884 setup_vaddr_for_ppcopy(struct cpu *cpup)
3885 {
3886 void *addr;
3887 hat_mempte_t pte_pa;
3888
3889 addr = vmem_alloc(heap_arena, mmu_ptob(1), VM_SLEEP);
3890 pte_pa = hat_mempte_setup(addr);
3891 cpup->cpu_caddr1 = addr;
3892 cpup->cpu_caddr1pte = pte_pa;
3893
3894 addr = vmem_alloc(heap_arena, mmu_ptob(1), VM_SLEEP);
3895 pte_pa = hat_mempte_setup(addr);
3896 cpup->cpu_caddr2 = addr;
3897 cpup->cpu_caddr2pte = pte_pa;
3898
3899 mutex_init(&cpup->cpu_ppaddr_mutex, NULL, MUTEX_DEFAULT, NULL);
3900 }
3901
3902 /*
3903 * Undo setup_vaddr_for_ppcopy
3904 */
3905 void
teardown_vaddr_for_ppcopy(struct cpu * cpup)3906 teardown_vaddr_for_ppcopy(struct cpu *cpup)
3907 {
3908 mutex_destroy(&cpup->cpu_ppaddr_mutex);
3909
3910 hat_mempte_release(cpup->cpu_caddr2, cpup->cpu_caddr2pte);
3911 cpup->cpu_caddr2pte = 0;
3912 vmem_free(heap_arena, cpup->cpu_caddr2, mmu_ptob(1));
3913 cpup->cpu_caddr2 = 0;
3914
3915 hat_mempte_release(cpup->cpu_caddr1, cpup->cpu_caddr1pte);
3916 cpup->cpu_caddr1pte = 0;
3917 vmem_free(heap_arena, cpup->cpu_caddr1, mmu_ptob(1));
3918 cpup->cpu_caddr1 = 0;
3919 }
3920
3921 /*
3922 * Function for flushing D-cache when performing module relocations
3923 * to an alternate mapping. Unnecessary on Intel / AMD platforms.
3924 */
3925 void
dcache_flushall()3926 dcache_flushall()
3927 {}
3928
3929 size_t
exec_get_spslew(void)3930 exec_get_spslew(void)
3931 {
3932 return (0);
3933 }
3934
3935 /*
3936 * Allocate a memory page. The argument 'seed' can be any pseudo-random
3937 * number to vary where the pages come from. This is quite a hacked up
3938 * method -- it works for now, but really needs to be fixed up a bit.
3939 *
3940 * We currently use page_create_va() on the kvp with fake offsets,
3941 * segments and virt address. This is pretty bogus, but was copied from the
3942 * old hat_i86.c code. A better approach would be to specify either mnode
3943 * random or mnode local and takes a page from whatever color has the MOST
3944 * available - this would have a minimal impact on page coloring.
3945 */
3946 page_t *
page_get_physical(uintptr_t seed)3947 page_get_physical(uintptr_t seed)
3948 {
3949 page_t *pp;
3950 u_offset_t offset;
3951 static struct seg tmpseg;
3952 static uintptr_t ctr = 0;
3953
3954 /*
3955 * This code is gross, we really need a simpler page allocator.
3956 *
3957 * We need to assign an offset for the page to call page_create_va()
3958 * To avoid conflicts with other pages, we get creative with the offset.
3959 * For 32 bits, we need an offset > 4Gig
3960 * For 64 bits, need an offset somewhere in the VA hole.
3961 */
3962 offset = seed;
3963 if (offset > kernelbase)
3964 offset -= kernelbase;
3965 offset <<= MMU_PAGESHIFT;
3966 #if defined(__amd64)
3967 offset += mmu.hole_start; /* something in VA hole */
3968 #else
3969 offset += 1ULL << 40; /* something > 4 Gig */
3970 #endif
3971
3972 if (page_resv(1, KM_NOSLEEP) == 0)
3973 return (NULL);
3974
3975 #ifdef DEBUG
3976 pp = page_exists(&kvp, offset);
3977 if (pp != NULL)
3978 panic("page already exists %p", (void *)pp);
3979 #endif
3980
3981 pp = page_create_va(&kvp, offset, MMU_PAGESIZE, PG_EXCL,
3982 &tmpseg, (caddr_t)(ctr += MMU_PAGESIZE)); /* changing VA usage */
3983 if (pp != NULL) {
3984 page_io_unlock(pp);
3985 page_downgrade(pp);
3986 }
3987 return (pp);
3988 }
3989