1099a0e58SBosko Milekic /*- 28076cb52SBosko Milekic * Copyright (c) 2004, 2005, 38076cb52SBosko Milekic * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved. 4099a0e58SBosko Milekic * 5099a0e58SBosko Milekic * Redistribution and use in source and binary forms, with or without 6099a0e58SBosko Milekic * modification, are permitted provided that the following conditions 7099a0e58SBosko Milekic * are met: 8099a0e58SBosko Milekic * 1. Redistributions of source code must retain the above copyright 9099a0e58SBosko Milekic * notice unmodified, this list of conditions and the following 10099a0e58SBosko Milekic * disclaimer. 11099a0e58SBosko Milekic * 2. Redistributions in binary form must reproduce the above copyright 12099a0e58SBosko Milekic * notice, this list of conditions and the following disclaimer in the 13099a0e58SBosko Milekic * documentation and/or other materials provided with the distribution. 14099a0e58SBosko Milekic * 15099a0e58SBosko Milekic * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16099a0e58SBosko Milekic * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17099a0e58SBosko Milekic * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18099a0e58SBosko Milekic * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19099a0e58SBosko Milekic * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20099a0e58SBosko Milekic * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21099a0e58SBosko Milekic * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22099a0e58SBosko Milekic * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23099a0e58SBosko Milekic * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24099a0e58SBosko Milekic * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25099a0e58SBosko Milekic * SUCH DAMAGE. 26099a0e58SBosko Milekic */ 27099a0e58SBosko Milekic 28099a0e58SBosko Milekic #include <sys/cdefs.h> 29099a0e58SBosko Milekic __FBSDID("$FreeBSD$"); 30099a0e58SBosko Milekic 31099a0e58SBosko Milekic #include "opt_param.h" 32099a0e58SBosko Milekic 33099a0e58SBosko Milekic #include <sys/param.h> 34099a0e58SBosko Milekic #include <sys/malloc.h> 35099a0e58SBosko Milekic #include <sys/systm.h> 36099a0e58SBosko Milekic #include <sys/mbuf.h> 37099a0e58SBosko Milekic #include <sys/domain.h> 38099a0e58SBosko Milekic #include <sys/eventhandler.h> 39099a0e58SBosko Milekic #include <sys/kernel.h> 40099a0e58SBosko Milekic #include <sys/protosw.h> 41099a0e58SBosko Milekic #include <sys/smp.h> 42099a0e58SBosko Milekic #include <sys/sysctl.h> 43099a0e58SBosko Milekic 44aed55708SRobert Watson #include <security/mac/mac_framework.h> 45aed55708SRobert Watson 46099a0e58SBosko Milekic #include <vm/vm.h> 47c45c0034SAlan Cox #include <vm/vm_extern.h> 48c45c0034SAlan Cox #include <vm/vm_kern.h> 49099a0e58SBosko Milekic #include <vm/vm_page.h> 5037140716SAndre Oppermann #include <vm/vm_map.h> 51099a0e58SBosko Milekic #include <vm/uma.h> 52121f0509SMike Silbersack #include <vm/uma_int.h> 53121f0509SMike Silbersack #include <vm/uma_dbg.h> 54099a0e58SBosko Milekic 55099a0e58SBosko Milekic /* 56099a0e58SBosko Milekic * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA 57099a0e58SBosko Milekic * Zones. 58099a0e58SBosko Milekic * 59099a0e58SBosko Milekic * Mbuf Clusters (2K, contiguous) are allocated from the Cluster 60099a0e58SBosko Milekic * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the 61099a0e58SBosko Milekic * administrator so desires. 62099a0e58SBosko Milekic * 63099a0e58SBosko Milekic * Mbufs are allocated from a UMA Master Zone called the Mbuf 64099a0e58SBosko Milekic * Zone. 65099a0e58SBosko Milekic * 66099a0e58SBosko Milekic * Additionally, FreeBSD provides a Packet Zone, which it 67099a0e58SBosko Milekic * configures as a Secondary Zone to the Mbuf Master Zone, 68099a0e58SBosko Milekic * thus sharing backend Slab kegs with the Mbuf Master Zone. 69099a0e58SBosko Milekic * 70099a0e58SBosko Milekic * Thus common-case allocations and locking are simplified: 71099a0e58SBosko Milekic * 72099a0e58SBosko Milekic * m_clget() m_getcl() 73099a0e58SBosko Milekic * | | 74099a0e58SBosko Milekic * | .------------>[(Packet Cache)] m_get(), m_gethdr() 75099a0e58SBosko Milekic * | | [ Packet ] | 76099a0e58SBosko Milekic * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ] 77099a0e58SBosko Milekic * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ] 78099a0e58SBosko Milekic * | \________ | 79099a0e58SBosko Milekic * [ Cluster Keg ] \ / 80099a0e58SBosko Milekic * | [ Mbuf Keg ] 81099a0e58SBosko Milekic * [ Cluster Slabs ] | 82099a0e58SBosko Milekic * | [ Mbuf Slabs ] 83099a0e58SBosko Milekic * \____________(VM)_________________/ 8456a4e45aSAndre Oppermann * 8556a4e45aSAndre Oppermann * 86fcf90618SGleb Smirnoff * Whenever an object is allocated with uma_zalloc() out of 8756a4e45aSAndre Oppermann * one of the Zones its _ctor_ function is executed. The same 88fcf90618SGleb Smirnoff * for any deallocation through uma_zfree() the _dtor_ function 8956a4e45aSAndre Oppermann * is executed. 9056a4e45aSAndre Oppermann * 9156a4e45aSAndre Oppermann * Caches are per-CPU and are filled from the Master Zone. 9256a4e45aSAndre Oppermann * 93fcf90618SGleb Smirnoff * Whenever an object is allocated from the underlying global 9456a4e45aSAndre Oppermann * memory pool it gets pre-initialized with the _zinit_ functions. 9556a4e45aSAndre Oppermann * When the Keg's are overfull objects get decomissioned with 9656a4e45aSAndre Oppermann * _zfini_ functions and free'd back to the global memory pool. 9756a4e45aSAndre Oppermann * 98099a0e58SBosko Milekic */ 99099a0e58SBosko Milekic 100ead46972SAndre Oppermann int nmbufs; /* limits number of mbufs */ 10156a4e45aSAndre Oppermann int nmbclusters; /* limits number of mbuf clusters */ 102ec63cb90SAndre Oppermann int nmbjumbop; /* limits number of page size jumbo clusters */ 10356a4e45aSAndre Oppermann int nmbjumbo9; /* limits number of 9k jumbo clusters */ 10456a4e45aSAndre Oppermann int nmbjumbo16; /* limits number of 16k jumbo clusters */ 105099a0e58SBosko Milekic 106e0c00addSAndre Oppermann static quad_t maxmbufmem; /* overall real memory limit for all mbufs */ 107e0c00addSAndre Oppermann 108e0c00addSAndre Oppermann SYSCTL_QUAD(_kern_ipc, OID_AUTO, maxmbufmem, CTLFLAG_RDTUN, &maxmbufmem, 0, 109e0c00addSAndre Oppermann "Maximum real memory allocateable to various mbuf types"); 110e0c00addSAndre Oppermann 11162938659SBjoern A. Zeeb /* 11237140716SAndre Oppermann * tunable_mbinit() has to be run before any mbuf allocations are done. 11362938659SBjoern A. Zeeb */ 114099a0e58SBosko Milekic static void 115099a0e58SBosko Milekic tunable_mbinit(void *dummy) 116099a0e58SBosko Milekic { 117e0c00addSAndre Oppermann quad_t realmem; 11837140716SAndre Oppermann 11937140716SAndre Oppermann /* 12037140716SAndre Oppermann * The default limit for all mbuf related memory is 1/2 of all 12137140716SAndre Oppermann * available kernel memory (physical or kmem). 12237140716SAndre Oppermann * At most it can be 3/4 of available kernel memory. 12337140716SAndre Oppermann */ 1245df87b21SJeff Roberson realmem = qmin((quad_t)physmem * PAGE_SIZE, vm_kmem_size); 12537140716SAndre Oppermann maxmbufmem = realmem / 2; 126e0c00addSAndre Oppermann TUNABLE_QUAD_FETCH("kern.ipc.maxmbufmem", &maxmbufmem); 12737140716SAndre Oppermann if (maxmbufmem > realmem / 4 * 3) 12837140716SAndre Oppermann maxmbufmem = realmem / 4 * 3; 129099a0e58SBosko Milekic 130812302c3SNavdeep Parhar TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters); 131416a434cSAndre Oppermann if (nmbclusters == 0) 132416a434cSAndre Oppermann nmbclusters = maxmbufmem / MCLBYTES / 4; 133812302c3SNavdeep Parhar 134812302c3SNavdeep Parhar TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop); 135812302c3SNavdeep Parhar if (nmbjumbop == 0) 136416a434cSAndre Oppermann nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4; 137812302c3SNavdeep Parhar 138812302c3SNavdeep Parhar TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9); 139812302c3SNavdeep Parhar if (nmbjumbo9 == 0) 140416a434cSAndre Oppermann nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6; 141812302c3SNavdeep Parhar 142812302c3SNavdeep Parhar TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16); 143812302c3SNavdeep Parhar if (nmbjumbo16 == 0) 144416a434cSAndre Oppermann nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6; 145416a434cSAndre Oppermann 146416a434cSAndre Oppermann /* 147416a434cSAndre Oppermann * We need at least as many mbufs as we have clusters of 148416a434cSAndre Oppermann * the various types added together. 149416a434cSAndre Oppermann */ 150416a434cSAndre Oppermann TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs); 151416a434cSAndre Oppermann if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) 152416a434cSAndre Oppermann nmbufs = lmax(maxmbufmem / MSIZE / 5, 153416a434cSAndre Oppermann nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16); 154099a0e58SBosko Milekic } 15537140716SAndre Oppermann SYSINIT(tunable_mbinit, SI_SUB_KMEM, SI_ORDER_MIDDLE, tunable_mbinit, NULL); 156099a0e58SBosko Milekic 1574f590175SPaul Saab static int 1584f590175SPaul Saab sysctl_nmbclusters(SYSCTL_HANDLER_ARGS) 1594f590175SPaul Saab { 1604f590175SPaul Saab int error, newnmbclusters; 1614f590175SPaul Saab 1624f590175SPaul Saab newnmbclusters = nmbclusters; 163041b706bSDavid Malone error = sysctl_handle_int(oidp, &newnmbclusters, 0, req); 1644f590175SPaul Saab if (error == 0 && req->newptr) { 165ead46972SAndre Oppermann if (newnmbclusters > nmbclusters && 166ead46972SAndre Oppermann nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 1674f590175SPaul Saab nmbclusters = newnmbclusters; 168bc4a1b8cSAndre Oppermann nmbclusters = uma_zone_set_max(zone_clust, nmbclusters); 1694f590175SPaul Saab EVENTHANDLER_INVOKE(nmbclusters_change); 1704f590175SPaul Saab } else 1714f590175SPaul Saab error = EINVAL; 1724f590175SPaul Saab } 1734f590175SPaul Saab return (error); 1744f590175SPaul Saab } 1754f590175SPaul Saab SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW, 1764f590175SPaul Saab &nmbclusters, 0, sysctl_nmbclusters, "IU", 177099a0e58SBosko Milekic "Maximum number of mbuf clusters allowed"); 178cf70a46bSRandall Stewart 179cf70a46bSRandall Stewart static int 180cf70a46bSRandall Stewart sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS) 181cf70a46bSRandall Stewart { 182cf70a46bSRandall Stewart int error, newnmbjumbop; 183cf70a46bSRandall Stewart 184cf70a46bSRandall Stewart newnmbjumbop = nmbjumbop; 185cf70a46bSRandall Stewart error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req); 186cf70a46bSRandall Stewart if (error == 0 && req->newptr) { 187ead46972SAndre Oppermann if (newnmbjumbop > nmbjumbop && 188ead46972SAndre Oppermann nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 189cf70a46bSRandall Stewart nmbjumbop = newnmbjumbop; 190bc4a1b8cSAndre Oppermann nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop); 191cf70a46bSRandall Stewart } else 192cf70a46bSRandall Stewart error = EINVAL; 193cf70a46bSRandall Stewart } 194cf70a46bSRandall Stewart return (error); 195cf70a46bSRandall Stewart } 196cf70a46bSRandall Stewart SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW, 197cf70a46bSRandall Stewart &nmbjumbop, 0, sysctl_nmbjumbop, "IU", 198ec63cb90SAndre Oppermann "Maximum number of mbuf page size jumbo clusters allowed"); 199cf70a46bSRandall Stewart 200cf70a46bSRandall Stewart static int 201cf70a46bSRandall Stewart sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS) 202cf70a46bSRandall Stewart { 203cf70a46bSRandall Stewart int error, newnmbjumbo9; 204cf70a46bSRandall Stewart 205cf70a46bSRandall Stewart newnmbjumbo9 = nmbjumbo9; 206cf70a46bSRandall Stewart error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req); 207cf70a46bSRandall Stewart if (error == 0 && req->newptr) { 208ead46972SAndre Oppermann if (newnmbjumbo9 > nmbjumbo9 && 209ead46972SAndre Oppermann nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 210cf70a46bSRandall Stewart nmbjumbo9 = newnmbjumbo9; 211bc4a1b8cSAndre Oppermann nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9); 212cf70a46bSRandall Stewart } else 213cf70a46bSRandall Stewart error = EINVAL; 214cf70a46bSRandall Stewart } 215cf70a46bSRandall Stewart return (error); 216cf70a46bSRandall Stewart } 217cf70a46bSRandall Stewart SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW, 218cf70a46bSRandall Stewart &nmbjumbo9, 0, sysctl_nmbjumbo9, "IU", 21956a4e45aSAndre Oppermann "Maximum number of mbuf 9k jumbo clusters allowed"); 220cf70a46bSRandall Stewart 221cf70a46bSRandall Stewart static int 222cf70a46bSRandall Stewart sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS) 223cf70a46bSRandall Stewart { 224cf70a46bSRandall Stewart int error, newnmbjumbo16; 225cf70a46bSRandall Stewart 226cf70a46bSRandall Stewart newnmbjumbo16 = nmbjumbo16; 227cf70a46bSRandall Stewart error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req); 228cf70a46bSRandall Stewart if (error == 0 && req->newptr) { 229ead46972SAndre Oppermann if (newnmbjumbo16 > nmbjumbo16 && 230ead46972SAndre Oppermann nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 231cf70a46bSRandall Stewart nmbjumbo16 = newnmbjumbo16; 232bc4a1b8cSAndre Oppermann nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16); 233cf70a46bSRandall Stewart } else 234cf70a46bSRandall Stewart error = EINVAL; 235cf70a46bSRandall Stewart } 236cf70a46bSRandall Stewart return (error); 237cf70a46bSRandall Stewart } 238cf70a46bSRandall Stewart SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW, 239cf70a46bSRandall Stewart &nmbjumbo16, 0, sysctl_nmbjumbo16, "IU", 24056a4e45aSAndre Oppermann "Maximum number of mbuf 16k jumbo clusters allowed"); 241cf70a46bSRandall Stewart 242ead46972SAndre Oppermann static int 243ead46972SAndre Oppermann sysctl_nmbufs(SYSCTL_HANDLER_ARGS) 244ead46972SAndre Oppermann { 245ead46972SAndre Oppermann int error, newnmbufs; 246ead46972SAndre Oppermann 247ead46972SAndre Oppermann newnmbufs = nmbufs; 248ead46972SAndre Oppermann error = sysctl_handle_int(oidp, &newnmbufs, 0, req); 249ead46972SAndre Oppermann if (error == 0 && req->newptr) { 250ead46972SAndre Oppermann if (newnmbufs > nmbufs) { 251ead46972SAndre Oppermann nmbufs = newnmbufs; 252bc4a1b8cSAndre Oppermann nmbufs = uma_zone_set_max(zone_mbuf, nmbufs); 253ead46972SAndre Oppermann EVENTHANDLER_INVOKE(nmbufs_change); 254ead46972SAndre Oppermann } else 255ead46972SAndre Oppermann error = EINVAL; 256ead46972SAndre Oppermann } 257ead46972SAndre Oppermann return (error); 258ead46972SAndre Oppermann } 259e0c00addSAndre Oppermann SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbufs, CTLTYPE_INT|CTLFLAG_RW, 260ead46972SAndre Oppermann &nmbufs, 0, sysctl_nmbufs, "IU", 261ead46972SAndre Oppermann "Maximum number of mbufs allowed"); 262cf70a46bSRandall Stewart 263099a0e58SBosko Milekic /* 264099a0e58SBosko Milekic * Zones from which we allocate. 265099a0e58SBosko Milekic */ 266099a0e58SBosko Milekic uma_zone_t zone_mbuf; 267099a0e58SBosko Milekic uma_zone_t zone_clust; 268099a0e58SBosko Milekic uma_zone_t zone_pack; 269ec63cb90SAndre Oppermann uma_zone_t zone_jumbop; 27056a4e45aSAndre Oppermann uma_zone_t zone_jumbo9; 27156a4e45aSAndre Oppermann uma_zone_t zone_jumbo16; 27256a4e45aSAndre Oppermann uma_zone_t zone_ext_refcnt; 273099a0e58SBosko Milekic 274099a0e58SBosko Milekic /* 275099a0e58SBosko Milekic * Local prototypes. 276099a0e58SBosko Milekic */ 277b23f72e9SBrian Feldman static int mb_ctor_mbuf(void *, int, void *, int); 278b23f72e9SBrian Feldman static int mb_ctor_clust(void *, int, void *, int); 279b23f72e9SBrian Feldman static int mb_ctor_pack(void *, int, void *, int); 280099a0e58SBosko Milekic static void mb_dtor_mbuf(void *, int, void *); 28156a4e45aSAndre Oppermann static void mb_dtor_clust(void *, int, void *); 28256a4e45aSAndre Oppermann static void mb_dtor_pack(void *, int, void *); 28356a4e45aSAndre Oppermann static int mb_zinit_pack(void *, int, int); 28456a4e45aSAndre Oppermann static void mb_zfini_pack(void *, int); 285099a0e58SBosko Milekic 286099a0e58SBosko Milekic static void mb_reclaim(void *); 28760ae52f7SEd Schouten static void *mbuf_jumbo_alloc(uma_zone_t, int, uint8_t *, int); 288099a0e58SBosko Milekic 28937140716SAndre Oppermann /* Ensure that MSIZE is a power of 2. */ 290a04946cfSBrian Somers CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE); 291a04946cfSBrian Somers 292099a0e58SBosko Milekic /* 293099a0e58SBosko Milekic * Initialize FreeBSD Network buffer allocation. 294099a0e58SBosko Milekic */ 295099a0e58SBosko Milekic static void 296099a0e58SBosko Milekic mbuf_init(void *dummy) 297099a0e58SBosko Milekic { 298099a0e58SBosko Milekic 299099a0e58SBosko Milekic /* 300099a0e58SBosko Milekic * Configure UMA zones for Mbufs, Clusters, and Packets. 301099a0e58SBosko Milekic */ 30256a4e45aSAndre Oppermann zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE, 30356a4e45aSAndre Oppermann mb_ctor_mbuf, mb_dtor_mbuf, 304121f0509SMike Silbersack #ifdef INVARIANTS 30556a4e45aSAndre Oppermann trash_init, trash_fini, 306121f0509SMike Silbersack #else 30756a4e45aSAndre Oppermann NULL, NULL, 308121f0509SMike Silbersack #endif 30956a4e45aSAndre Oppermann MSIZE - 1, UMA_ZONE_MAXBUCKET); 31045fe0bf7SPawel Jakub Dawidek if (nmbufs > 0) 31145fe0bf7SPawel Jakub Dawidek nmbufs = uma_zone_set_max(zone_mbuf, nmbufs); 3126e0b6746SPawel Jakub Dawidek uma_zone_set_warning(zone_mbuf, "kern.ipc.nmbufs limit reached"); 31356a4e45aSAndre Oppermann 31468352adfSRobert Watson zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 31556a4e45aSAndre Oppermann mb_ctor_clust, mb_dtor_clust, 316121f0509SMike Silbersack #ifdef INVARIANTS 31756a4e45aSAndre Oppermann trash_init, trash_fini, 318121f0509SMike Silbersack #else 31956a4e45aSAndre Oppermann NULL, NULL, 320121f0509SMike Silbersack #endif 32156a4e45aSAndre Oppermann UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 32245fe0bf7SPawel Jakub Dawidek if (nmbclusters > 0) 32345fe0bf7SPawel Jakub Dawidek nmbclusters = uma_zone_set_max(zone_clust, nmbclusters); 3246e0b6746SPawel Jakub Dawidek uma_zone_set_warning(zone_clust, "kern.ipc.nmbclusters limit reached"); 325099a0e58SBosko Milekic 32656a4e45aSAndre Oppermann zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack, 32756a4e45aSAndre Oppermann mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf); 32856a4e45aSAndre Oppermann 329fcf90618SGleb Smirnoff /* Make jumbo frame zone too. Page size, 9k and 16k. */ 330ec63cb90SAndre Oppermann zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE, 331d5269a63SAndre Oppermann mb_ctor_clust, mb_dtor_clust, 332d5269a63SAndre Oppermann #ifdef INVARIANTS 333d5269a63SAndre Oppermann trash_init, trash_fini, 334d5269a63SAndre Oppermann #else 335d5269a63SAndre Oppermann NULL, NULL, 336d5269a63SAndre Oppermann #endif 337d5269a63SAndre Oppermann UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 33845fe0bf7SPawel Jakub Dawidek if (nmbjumbop > 0) 33945fe0bf7SPawel Jakub Dawidek nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop); 3406e0b6746SPawel Jakub Dawidek uma_zone_set_warning(zone_jumbop, "kern.ipc.nmbjumbop limit reached"); 341d5269a63SAndre Oppermann 34256a4e45aSAndre Oppermann zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES, 34356a4e45aSAndre Oppermann mb_ctor_clust, mb_dtor_clust, 34456a4e45aSAndre Oppermann #ifdef INVARIANTS 34556a4e45aSAndre Oppermann trash_init, trash_fini, 34656a4e45aSAndre Oppermann #else 34756a4e45aSAndre Oppermann NULL, NULL, 34856a4e45aSAndre Oppermann #endif 34956a4e45aSAndre Oppermann UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 350ba63339aSAlan Cox uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc); 35145fe0bf7SPawel Jakub Dawidek if (nmbjumbo9 > 0) 35245fe0bf7SPawel Jakub Dawidek nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9); 3536e0b6746SPawel Jakub Dawidek uma_zone_set_warning(zone_jumbo9, "kern.ipc.nmbjumbo9 limit reached"); 35456a4e45aSAndre Oppermann 35556a4e45aSAndre Oppermann zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES, 35656a4e45aSAndre Oppermann mb_ctor_clust, mb_dtor_clust, 35756a4e45aSAndre Oppermann #ifdef INVARIANTS 35856a4e45aSAndre Oppermann trash_init, trash_fini, 35956a4e45aSAndre Oppermann #else 36056a4e45aSAndre Oppermann NULL, NULL, 36156a4e45aSAndre Oppermann #endif 36256a4e45aSAndre Oppermann UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 363ba63339aSAlan Cox uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc); 36445fe0bf7SPawel Jakub Dawidek if (nmbjumbo16 > 0) 36545fe0bf7SPawel Jakub Dawidek nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16); 3666e0b6746SPawel Jakub Dawidek uma_zone_set_warning(zone_jumbo16, "kern.ipc.nmbjumbo16 limit reached"); 36756a4e45aSAndre Oppermann 36856a4e45aSAndre Oppermann zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 36956a4e45aSAndre Oppermann NULL, NULL, 37056a4e45aSAndre Oppermann NULL, NULL, 37156a4e45aSAndre Oppermann UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 37256a4e45aSAndre Oppermann 37356a4e45aSAndre Oppermann /* uma_prealloc() goes here... */ 374099a0e58SBosko Milekic 375099a0e58SBosko Milekic /* 376099a0e58SBosko Milekic * Hook event handler for low-memory situation, used to 377099a0e58SBosko Milekic * drain protocols and push data back to the caches (UMA 378099a0e58SBosko Milekic * later pushes it back to VM). 379099a0e58SBosko Milekic */ 380099a0e58SBosko Milekic EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL, 381099a0e58SBosko Milekic EVENTHANDLER_PRI_FIRST); 382099a0e58SBosko Milekic } 38337140716SAndre Oppermann SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL); 384099a0e58SBosko Milekic 385099a0e58SBosko Milekic /* 386ba63339aSAlan Cox * UMA backend page allocator for the jumbo frame zones. 387ba63339aSAlan Cox * 388ba63339aSAlan Cox * Allocates kernel virtual memory that is backed by contiguous physical 389ba63339aSAlan Cox * pages. 390ba63339aSAlan Cox */ 391ba63339aSAlan Cox static void * 39260ae52f7SEd Schouten mbuf_jumbo_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait) 393ba63339aSAlan Cox { 394ba63339aSAlan Cox 3957630c265SAlan Cox /* Inform UMA that this allocator uses kernel_map/object. */ 3967630c265SAlan Cox *flags = UMA_SLAB_KERNEL; 3975df87b21SJeff Roberson return ((void *)kmem_alloc_contig(kernel_arena, bytes, wait, 3983153e878SAlan Cox (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT)); 399ba63339aSAlan Cox } 400ba63339aSAlan Cox 401ba63339aSAlan Cox /* 402099a0e58SBosko Milekic * Constructor for Mbuf master zone. 403099a0e58SBosko Milekic * 404099a0e58SBosko Milekic * The 'arg' pointer points to a mb_args structure which 405099a0e58SBosko Milekic * contains call-specific information required to support the 40656a4e45aSAndre Oppermann * mbuf allocation API. See mbuf.h. 407099a0e58SBosko Milekic */ 408b23f72e9SBrian Feldman static int 409b23f72e9SBrian Feldman mb_ctor_mbuf(void *mem, int size, void *arg, int how) 410099a0e58SBosko Milekic { 411099a0e58SBosko Milekic struct mbuf *m; 412099a0e58SBosko Milekic struct mb_args *args; 413b23f72e9SBrian Feldman int error; 414099a0e58SBosko Milekic int flags; 415099a0e58SBosko Milekic short type; 416099a0e58SBosko Milekic 417121f0509SMike Silbersack #ifdef INVARIANTS 418121f0509SMike Silbersack trash_ctor(mem, size, arg, how); 419121f0509SMike Silbersack #endif 420099a0e58SBosko Milekic args = (struct mb_args *)arg; 421099a0e58SBosko Milekic type = args->type; 422099a0e58SBosko Milekic 42356a4e45aSAndre Oppermann /* 42456a4e45aSAndre Oppermann * The mbuf is initialized later. The caller has the 425fcf90618SGleb Smirnoff * responsibility to set up any MAC labels too. 42656a4e45aSAndre Oppermann */ 42756a4e45aSAndre Oppermann if (type == MT_NOINIT) 42856a4e45aSAndre Oppermann return (0); 42956a4e45aSAndre Oppermann 430*afb295ccSAndre Oppermann m = (struct mbuf *)mem; 431*afb295ccSAndre Oppermann flags = args->flags; 432*afb295ccSAndre Oppermann 433*afb295ccSAndre Oppermann error = m_init(m, NULL, size, how, type, flags); 434*afb295ccSAndre Oppermann 435b23f72e9SBrian Feldman return (error); 436099a0e58SBosko Milekic } 437099a0e58SBosko Milekic 438099a0e58SBosko Milekic /* 43956a4e45aSAndre Oppermann * The Mbuf master zone destructor. 440099a0e58SBosko Milekic */ 441099a0e58SBosko Milekic static void 442099a0e58SBosko Milekic mb_dtor_mbuf(void *mem, int size, void *arg) 443099a0e58SBosko Milekic { 444099a0e58SBosko Milekic struct mbuf *m; 445629b9e08SKip Macy unsigned long flags; 446099a0e58SBosko Milekic 447099a0e58SBosko Milekic m = (struct mbuf *)mem; 448629b9e08SKip Macy flags = (unsigned long)arg; 449629b9e08SKip Macy 450629b9e08SKip Macy if ((flags & MB_NOTAGS) == 0 && (m->m_flags & M_PKTHDR) != 0) 451099a0e58SBosko Milekic m_tag_delete_chain(m, NULL); 45256a4e45aSAndre Oppermann KASSERT((m->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__)); 4531227f20dSAndre Oppermann KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__)); 454121f0509SMike Silbersack #ifdef INVARIANTS 455121f0509SMike Silbersack trash_dtor(mem, size, arg); 456121f0509SMike Silbersack #endif 457099a0e58SBosko Milekic } 458099a0e58SBosko Milekic 45956a4e45aSAndre Oppermann /* 46056a4e45aSAndre Oppermann * The Mbuf Packet zone destructor. 46156a4e45aSAndre Oppermann */ 462099a0e58SBosko Milekic static void 463099a0e58SBosko Milekic mb_dtor_pack(void *mem, int size, void *arg) 464099a0e58SBosko Milekic { 465099a0e58SBosko Milekic struct mbuf *m; 466099a0e58SBosko Milekic 467099a0e58SBosko Milekic m = (struct mbuf *)mem; 468099a0e58SBosko Milekic if ((m->m_flags & M_PKTHDR) != 0) 469099a0e58SBosko Milekic m_tag_delete_chain(m, NULL); 47056a4e45aSAndre Oppermann 47156a4e45aSAndre Oppermann /* Make sure we've got a clean cluster back. */ 47256a4e45aSAndre Oppermann KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); 47356a4e45aSAndre Oppermann KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__)); 47456a4e45aSAndre Oppermann KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__)); 475cf827063SPoul-Henning Kamp KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__)); 476cf827063SPoul-Henning Kamp KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__)); 47756a4e45aSAndre Oppermann KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__)); 47849d46b61SGleb Smirnoff KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__)); 47956a4e45aSAndre Oppermann KASSERT(*m->m_ext.ref_cnt == 1, ("%s: ref_cnt != 1", __func__)); 480121f0509SMike Silbersack #ifdef INVARIANTS 481121f0509SMike Silbersack trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg); 482121f0509SMike Silbersack #endif 4836c125b8dSMohan Srinivasan /* 484ef44c8d2SDavid E. O'Brien * If there are processes blocked on zone_clust, waiting for pages 485ef44c8d2SDavid E. O'Brien * to be freed up, * cause them to be woken up by draining the 486ef44c8d2SDavid E. O'Brien * packet zone. We are exposed to a race here * (in the check for 487ef44c8d2SDavid E. O'Brien * the UMA_ZFLAG_FULL) where we might miss the flag set, but that 488ef44c8d2SDavid E. O'Brien * is deliberate. We don't want to acquire the zone lock for every 489ef44c8d2SDavid E. O'Brien * mbuf free. 4906c125b8dSMohan Srinivasan */ 4916c125b8dSMohan Srinivasan if (uma_zone_exhausted_nolock(zone_clust)) 4926c125b8dSMohan Srinivasan zone_drain(zone_pack); 493099a0e58SBosko Milekic } 494099a0e58SBosko Milekic 495099a0e58SBosko Milekic /* 496ec63cb90SAndre Oppermann * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor. 497099a0e58SBosko Milekic * 498099a0e58SBosko Milekic * Here the 'arg' pointer points to the Mbuf which we 49956a4e45aSAndre Oppermann * are configuring cluster storage for. If 'arg' is 50056a4e45aSAndre Oppermann * empty we allocate just the cluster without setting 50156a4e45aSAndre Oppermann * the mbuf to it. See mbuf.h. 502099a0e58SBosko Milekic */ 503b23f72e9SBrian Feldman static int 504b23f72e9SBrian Feldman mb_ctor_clust(void *mem, int size, void *arg, int how) 505099a0e58SBosko Milekic { 506099a0e58SBosko Milekic struct mbuf *m; 50756a4e45aSAndre Oppermann u_int *refcnt; 5080f4d9d04SKip Macy int type; 5090f4d9d04SKip Macy uma_zone_t zone; 510099a0e58SBosko Milekic 511121f0509SMike Silbersack #ifdef INVARIANTS 512121f0509SMike Silbersack trash_ctor(mem, size, arg, how); 513121f0509SMike Silbersack #endif 51456a4e45aSAndre Oppermann switch (size) { 51556a4e45aSAndre Oppermann case MCLBYTES: 51656a4e45aSAndre Oppermann type = EXT_CLUSTER; 5170f4d9d04SKip Macy zone = zone_clust; 51856a4e45aSAndre Oppermann break; 519ec63cb90SAndre Oppermann #if MJUMPAGESIZE != MCLBYTES 520ec63cb90SAndre Oppermann case MJUMPAGESIZE: 521ec63cb90SAndre Oppermann type = EXT_JUMBOP; 5220f4d9d04SKip Macy zone = zone_jumbop; 523d5269a63SAndre Oppermann break; 52436ae3fd3SAndre Oppermann #endif 52556a4e45aSAndre Oppermann case MJUM9BYTES: 52656a4e45aSAndre Oppermann type = EXT_JUMBO9; 5270f4d9d04SKip Macy zone = zone_jumbo9; 52856a4e45aSAndre Oppermann break; 52956a4e45aSAndre Oppermann case MJUM16BYTES: 53056a4e45aSAndre Oppermann type = EXT_JUMBO16; 5310f4d9d04SKip Macy zone = zone_jumbo16; 53256a4e45aSAndre Oppermann break; 53356a4e45aSAndre Oppermann default: 53456a4e45aSAndre Oppermann panic("unknown cluster size"); 53556a4e45aSAndre Oppermann break; 53656a4e45aSAndre Oppermann } 5370f4d9d04SKip Macy 5380f4d9d04SKip Macy m = (struct mbuf *)arg; 5390f4d9d04SKip Macy refcnt = uma_find_refcnt(zone, mem); 5400f4d9d04SKip Macy *refcnt = 1; 5410f4d9d04SKip Macy if (m != NULL) { 542099a0e58SBosko Milekic m->m_ext.ext_buf = (caddr_t)mem; 543099a0e58SBosko Milekic m->m_data = m->m_ext.ext_buf; 544099a0e58SBosko Milekic m->m_flags |= M_EXT; 545099a0e58SBosko Milekic m->m_ext.ext_free = NULL; 546cf827063SPoul-Henning Kamp m->m_ext.ext_arg1 = NULL; 547cf827063SPoul-Henning Kamp m->m_ext.ext_arg2 = NULL; 54856a4e45aSAndre Oppermann m->m_ext.ext_size = size; 54956a4e45aSAndre Oppermann m->m_ext.ext_type = type; 5500f4d9d04SKip Macy m->m_ext.ref_cnt = refcnt; 55156a4e45aSAndre Oppermann } 5520f4d9d04SKip Macy 553b23f72e9SBrian Feldman return (0); 554099a0e58SBosko Milekic } 555099a0e58SBosko Milekic 55656a4e45aSAndre Oppermann /* 55756a4e45aSAndre Oppermann * The Mbuf Cluster zone destructor. 55856a4e45aSAndre Oppermann */ 559099a0e58SBosko Milekic static void 560099a0e58SBosko Milekic mb_dtor_clust(void *mem, int size, void *arg) 561099a0e58SBosko Milekic { 562121f0509SMike Silbersack #ifdef INVARIANTS 5630f4d9d04SKip Macy uma_zone_t zone; 5640f4d9d04SKip Macy 5650f4d9d04SKip Macy zone = m_getzone(size); 5660f4d9d04SKip Macy KASSERT(*(uma_find_refcnt(zone, mem)) <= 1, 5670f4d9d04SKip Macy ("%s: refcnt incorrect %u", __func__, 5680f4d9d04SKip Macy *(uma_find_refcnt(zone, mem))) ); 5690f4d9d04SKip Macy 570121f0509SMike Silbersack trash_dtor(mem, size, arg); 571121f0509SMike Silbersack #endif 572099a0e58SBosko Milekic } 573099a0e58SBosko Milekic 574099a0e58SBosko Milekic /* 575099a0e58SBosko Milekic * The Packet secondary zone's init routine, executed on the 57656a4e45aSAndre Oppermann * object's transition from mbuf keg slab to zone cache. 577099a0e58SBosko Milekic */ 578b23f72e9SBrian Feldman static int 57956a4e45aSAndre Oppermann mb_zinit_pack(void *mem, int size, int how) 580099a0e58SBosko Milekic { 581099a0e58SBosko Milekic struct mbuf *m; 582099a0e58SBosko Milekic 58356a4e45aSAndre Oppermann m = (struct mbuf *)mem; /* m is virgin. */ 584a7bd90efSAndre Oppermann if (uma_zalloc_arg(zone_clust, m, how) == NULL || 585a7bd90efSAndre Oppermann m->m_ext.ext_buf == NULL) 586b23f72e9SBrian Feldman return (ENOMEM); 587cd5bb63bSAndre Oppermann m->m_ext.ext_type = EXT_PACKET; /* Override. */ 588121f0509SMike Silbersack #ifdef INVARIANTS 589121f0509SMike Silbersack trash_init(m->m_ext.ext_buf, MCLBYTES, how); 590121f0509SMike Silbersack #endif 591b23f72e9SBrian Feldman return (0); 592099a0e58SBosko Milekic } 593099a0e58SBosko Milekic 594099a0e58SBosko Milekic /* 595099a0e58SBosko Milekic * The Packet secondary zone's fini routine, executed on the 596099a0e58SBosko Milekic * object's transition from zone cache to keg slab. 597099a0e58SBosko Milekic */ 598099a0e58SBosko Milekic static void 59956a4e45aSAndre Oppermann mb_zfini_pack(void *mem, int size) 600099a0e58SBosko Milekic { 601099a0e58SBosko Milekic struct mbuf *m; 602099a0e58SBosko Milekic 603099a0e58SBosko Milekic m = (struct mbuf *)mem; 604121f0509SMike Silbersack #ifdef INVARIANTS 605121f0509SMike Silbersack trash_fini(m->m_ext.ext_buf, MCLBYTES); 606121f0509SMike Silbersack #endif 607099a0e58SBosko Milekic uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL); 608a7b844d2SMike Silbersack #ifdef INVARIANTS 609a7b844d2SMike Silbersack trash_dtor(mem, size, NULL); 610a7b844d2SMike Silbersack #endif 611099a0e58SBosko Milekic } 612099a0e58SBosko Milekic 613099a0e58SBosko Milekic /* 614099a0e58SBosko Milekic * The "packet" keg constructor. 615099a0e58SBosko Milekic */ 616b23f72e9SBrian Feldman static int 617b23f72e9SBrian Feldman mb_ctor_pack(void *mem, int size, void *arg, int how) 618099a0e58SBosko Milekic { 619099a0e58SBosko Milekic struct mbuf *m; 620099a0e58SBosko Milekic struct mb_args *args; 621b23f72e9SBrian Feldman #ifdef MAC 622b23f72e9SBrian Feldman int error; 623b23f72e9SBrian Feldman #endif 624b23f72e9SBrian Feldman int flags; 625099a0e58SBosko Milekic short type; 626099a0e58SBosko Milekic 627099a0e58SBosko Milekic m = (struct mbuf *)mem; 628099a0e58SBosko Milekic args = (struct mb_args *)arg; 629099a0e58SBosko Milekic flags = args->flags; 630099a0e58SBosko Milekic type = args->type; 631099a0e58SBosko Milekic 632121f0509SMike Silbersack #ifdef INVARIANTS 633121f0509SMike Silbersack trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how); 634121f0509SMike Silbersack #endif 635099a0e58SBosko Milekic 636*afb295ccSAndre Oppermann error = m_init(m, NULL, size, how, type, flags); 637*afb295ccSAndre Oppermann 63856a4e45aSAndre Oppermann /* m_ext is already initialized. */ 639*afb295ccSAndre Oppermann m->m_data = m->m_ext.ext_buf; 640*afb295ccSAndre Oppermann m->m_flags = (flags | M_EXT); 64156a4e45aSAndre Oppermann 642*afb295ccSAndre Oppermann return (error); 643099a0e58SBosko Milekic } 644099a0e58SBosko Milekic 6455b204a11SKip Macy int 6465b204a11SKip Macy m_pkthdr_init(struct mbuf *m, int how) 6475b204a11SKip Macy { 6485b204a11SKip Macy #ifdef MAC 6495b204a11SKip Macy int error; 6505b204a11SKip Macy #endif 6515b204a11SKip Macy m->m_data = m->m_pktdat; 6525b204a11SKip Macy SLIST_INIT(&m->m_pkthdr.tags); 6535b204a11SKip Macy m->m_pkthdr.rcvif = NULL; 6545b204a11SKip Macy m->m_pkthdr.header = NULL; 6555b204a11SKip Macy m->m_pkthdr.len = 0; 6565b204a11SKip Macy m->m_pkthdr.flowid = 0; 6574591f0d3SJulian Elischer m->m_pkthdr.fibnum = 0; 6585b204a11SKip Macy m->m_pkthdr.csum_flags = 0; 6595b204a11SKip Macy m->m_pkthdr.csum_data = 0; 6605b204a11SKip Macy m->m_pkthdr.tso_segsz = 0; 6615b204a11SKip Macy m->m_pkthdr.ether_vtag = 0; 6625b204a11SKip Macy #ifdef MAC 6635b204a11SKip Macy /* If the label init fails, fail the alloc */ 6645b204a11SKip Macy error = mac_mbuf_init(m, how); 6655b204a11SKip Macy if (error) 6665b204a11SKip Macy return (error); 6675b204a11SKip Macy #endif 6685b204a11SKip Macy 6695b204a11SKip Macy return (0); 6705b204a11SKip Macy } 6715b204a11SKip Macy 672099a0e58SBosko Milekic /* 673099a0e58SBosko Milekic * This is the protocol drain routine. 674099a0e58SBosko Milekic * 675099a0e58SBosko Milekic * No locks should be held when this is called. The drain routines have to 676099a0e58SBosko Milekic * presently acquire some locks which raises the possibility of lock order 677099a0e58SBosko Milekic * reversal. 678099a0e58SBosko Milekic */ 679099a0e58SBosko Milekic static void 680099a0e58SBosko Milekic mb_reclaim(void *junk) 681099a0e58SBosko Milekic { 682099a0e58SBosko Milekic struct domain *dp; 683099a0e58SBosko Milekic struct protosw *pr; 684099a0e58SBosko Milekic 685099a0e58SBosko Milekic WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL, 686099a0e58SBosko Milekic "mb_reclaim()"); 687099a0e58SBosko Milekic 688099a0e58SBosko Milekic for (dp = domains; dp != NULL; dp = dp->dom_next) 689099a0e58SBosko Milekic for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 690099a0e58SBosko Milekic if (pr->pr_drain != NULL) 691099a0e58SBosko Milekic (*pr->pr_drain)(); 692099a0e58SBosko Milekic } 693