vm_init.c (2a22df74e9fceaffd62ee437de08383d6cf8cfe0) vm_init.c (46b0292a826b6042291e3191c7150d197162ef85)
1/*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.

--- 207 unchanged lines hidden (view full) ---

216
217 /*
218 * End of second pass, addresses have been assigned
219 */
220 if ((vm_size_t)((char *)v - firstaddr) != size)
221 panic("startup: table size inconsistency");
222
223 /*
1/*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.

--- 207 unchanged lines hidden (view full) ---

216
217 /*
218 * End of second pass, addresses have been assigned
219 */
220 if ((vm_size_t)((char *)v - firstaddr) != size)
221 panic("startup: table size inconsistency");
222
223 /*
224 * Allocate the clean map to hold all of the paging and I/O virtual
225 * memory.
224 * Allocate the clean map to hold all of I/O virtual memory.
226 */
225 */
227 size = (long)nbuf * BKVASIZE + (long)nswbuf * MAXPHYS +
228 (long)bio_transient_maxcnt * MAXPHYS;
226 size = (long)nbuf * BKVASIZE + (long)bio_transient_maxcnt * MAXPHYS;
229 kmi->clean_sva = firstaddr = kva_alloc(size);
230 kmi->clean_eva = firstaddr + size;
231
232 /*
233 * Allocate the buffer arena.
234 *
235 * Enable the quantum cache if we have more than 4 cpus. This
236 * avoids lock contention at the expense of some fragmentation.
237 */
238 size = (long)nbuf * BKVASIZE;
239 kmi->buffer_sva = firstaddr;
240 kmi->buffer_eva = kmi->buffer_sva + size;
241 vmem_init(buffer_arena, "buffer arena", kmi->buffer_sva, size,
242 PAGE_SIZE, (mp_ncpus > 4) ? BKVASIZE * 8 : 0, 0);
243 firstaddr += size;
244
245 /*
227 kmi->clean_sva = firstaddr = kva_alloc(size);
228 kmi->clean_eva = firstaddr + size;
229
230 /*
231 * Allocate the buffer arena.
232 *
233 * Enable the quantum cache if we have more than 4 cpus. This
234 * avoids lock contention at the expense of some fragmentation.
235 */
236 size = (long)nbuf * BKVASIZE;
237 kmi->buffer_sva = firstaddr;
238 kmi->buffer_eva = kmi->buffer_sva + size;
239 vmem_init(buffer_arena, "buffer arena", kmi->buffer_sva, size,
240 PAGE_SIZE, (mp_ncpus > 4) ? BKVASIZE * 8 : 0, 0);
241 firstaddr += size;
242
243 /*
246 * Now swap kva.
247 */
248 swapbkva = firstaddr;
249 size = (long)nswbuf * MAXPHYS;
250 firstaddr += size;
251
252 /*
253 * And optionally transient bio space.
254 */
255 if (bio_transient_maxcnt != 0) {
256 size = (long)bio_transient_maxcnt * MAXPHYS;
257 vmem_init(transient_arena, "transient arena",
258 firstaddr, size, PAGE_SIZE, 0, 0);
259 firstaddr += size;
260 }

--- 20 unchanged lines hidden ---
244 * And optionally transient bio space.
245 */
246 if (bio_transient_maxcnt != 0) {
247 size = (long)bio_transient_maxcnt * MAXPHYS;
248 vmem_init(transient_arena, "transient arena",
249 firstaddr, size, PAGE_SIZE, 0, 0);
250 firstaddr += size;
251 }

--- 20 unchanged lines hidden ---