1 /*- 2 * Copyright (c) 2004, 2005 3 * Bosko Milekic <bmilekic@FreeBSD.org>. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_mac.h" 32 #include "opt_param.h" 33 34 #include <sys/param.h> 35 #include <sys/mac.h> 36 #include <sys/malloc.h> 37 #include <sys/systm.h> 38 #include <sys/mbuf.h> 39 #include <sys/domain.h> 40 #include <sys/eventhandler.h> 41 #include <sys/kernel.h> 42 #include <sys/protosw.h> 43 #include <sys/smp.h> 44 #include <sys/sysctl.h> 45 46 #include <vm/vm.h> 47 #include <vm/vm_page.h> 48 #include <vm/uma.h> 49 50 /* 51 * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA 52 * Zones. 53 * 54 * Mbuf Clusters (2K, contiguous) are allocated from the Cluster 55 * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the 56 * administrator so desires. 57 * 58 * Mbufs are allocated from a UMA Master Zone called the Mbuf 59 * Zone. 60 * 61 * Additionally, FreeBSD provides a Packet Zone, which it 62 * configures as a Secondary Zone to the Mbuf Master Zone, 63 * thus sharing backend Slab kegs with the Mbuf Master Zone. 64 * 65 * Thus common-case allocations and locking are simplified: 66 * 67 * m_clget() m_getcl() 68 * | | 69 * | .------------>[(Packet Cache)] m_get(), m_gethdr() 70 * | | [ Packet ] | 71 * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ] 72 * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ] 73 * | \________ | 74 * [ Cluster Keg ] \ / 75 * | [ Mbuf Keg ] 76 * [ Cluster Slabs ] | 77 * | [ Mbuf Slabs ] 78 * \____________(VM)_________________/ 79 */ 80 81 int nmbclusters; 82 struct mbstat mbstat; 83 84 static void 85 tunable_mbinit(void *dummy) 86 { 87 88 /* This has to be done before VM init. */ 89 nmbclusters = 1024 + maxusers * 64; 90 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters); 91 } 92 SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL); 93 94 SYSCTL_DECL(_kern_ipc); 95 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbclusters, CTLFLAG_RW, &nmbclusters, 0, 96 "Maximum number of mbuf clusters allowed"); 97 SYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat, 98 "Mbuf general information and statistics"); 99 100 /* 101 * Zones from which we allocate. 102 */ 103 uma_zone_t zone_mbuf; 104 uma_zone_t zone_clust; 105 uma_zone_t zone_pack; 106 107 /* 108 * Local prototypes. 109 */ 110 static int mb_ctor_mbuf(void *, int, void *, int); 111 static int mb_ctor_clust(void *, int, void *, int); 112 static int mb_ctor_pack(void *, int, void *, int); 113 static void mb_dtor_mbuf(void *, int, void *); 114 static void mb_dtor_clust(void *, int, void *); /* XXX */ 115 static void mb_dtor_pack(void *, int, void *); /* XXX */ 116 static int mb_init_pack(void *, int, int); 117 static void mb_fini_pack(void *, int); 118 119 static void mb_reclaim(void *); 120 static void mbuf_init(void *); 121 122 /* Ensure that MSIZE doesn't break dtom() - it must be a power of 2 */ 123 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE); 124 125 /* 126 * Initialize FreeBSD Network buffer allocation. 127 */ 128 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL) 129 static void 130 mbuf_init(void *dummy) 131 { 132 133 /* 134 * Configure UMA zones for Mbufs, Clusters, and Packets. 135 */ 136 zone_mbuf = uma_zcreate("Mbuf", MSIZE, mb_ctor_mbuf, mb_dtor_mbuf, 137 NULL, NULL, MSIZE - 1, UMA_ZONE_MAXBUCKET); 138 zone_clust = uma_zcreate("MbufClust", MCLBYTES, mb_ctor_clust, 139 mb_dtor_clust, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 140 if (nmbclusters > 0) 141 uma_zone_set_max(zone_clust, nmbclusters); 142 zone_pack = uma_zsecond_create("Packet", mb_ctor_pack, mb_dtor_pack, 143 mb_init_pack, mb_fini_pack, zone_mbuf); 144 145 /* uma_prealloc() goes here */ 146 147 /* 148 * Hook event handler for low-memory situation, used to 149 * drain protocols and push data back to the caches (UMA 150 * later pushes it back to VM). 151 */ 152 EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL, 153 EVENTHANDLER_PRI_FIRST); 154 155 /* 156 * [Re]set counters and local statistics knobs. 157 * XXX Some of these should go and be replaced, but UMA stat 158 * gathering needs to be revised. 159 */ 160 mbstat.m_mbufs = 0; 161 mbstat.m_mclusts = 0; 162 mbstat.m_drain = 0; 163 mbstat.m_msize = MSIZE; 164 mbstat.m_mclbytes = MCLBYTES; 165 mbstat.m_minclsize = MINCLSIZE; 166 mbstat.m_mlen = MLEN; 167 mbstat.m_mhlen = MHLEN; 168 mbstat.m_numtypes = MT_NTYPES; 169 170 mbstat.m_mcfail = mbstat.m_mpfail = 0; 171 mbstat.sf_iocnt = 0; 172 mbstat.sf_allocwait = mbstat.sf_allocfail = 0; 173 } 174 175 /* 176 * Constructor for Mbuf master zone. 177 * 178 * The 'arg' pointer points to a mb_args structure which 179 * contains call-specific information required to support the 180 * mbuf allocation API. 181 */ 182 static int 183 mb_ctor_mbuf(void *mem, int size, void *arg, int how) 184 { 185 struct mbuf *m; 186 struct mb_args *args; 187 #ifdef MAC 188 int error; 189 #endif 190 int flags; 191 short type; 192 193 m = (struct mbuf *)mem; 194 args = (struct mb_args *)arg; 195 flags = args->flags; 196 type = args->type; 197 198 m->m_type = type; 199 m->m_next = NULL; 200 m->m_nextpkt = NULL; 201 m->m_flags = flags; 202 if (flags & M_PKTHDR) { 203 m->m_data = m->m_pktdat; 204 m->m_pkthdr.rcvif = NULL; 205 m->m_pkthdr.csum_flags = 0; 206 SLIST_INIT(&m->m_pkthdr.tags); 207 #ifdef MAC 208 /* If the label init fails, fail the alloc */ 209 error = mac_init_mbuf(m, how); 210 if (error) 211 return (error); 212 #endif 213 } else 214 m->m_data = m->m_dat; 215 mbstat.m_mbufs += 1; /* XXX */ 216 return (0); 217 } 218 219 /* 220 * The Mbuf master zone and Packet secondary zone destructor. 221 */ 222 static void 223 mb_dtor_mbuf(void *mem, int size, void *arg) 224 { 225 struct mbuf *m; 226 227 m = (struct mbuf *)mem; 228 if ((m->m_flags & M_PKTHDR) != 0) 229 m_tag_delete_chain(m, NULL); 230 mbstat.m_mbufs -= 1; /* XXX */ 231 } 232 233 /* XXX Only because of stats */ 234 static void 235 mb_dtor_pack(void *mem, int size, void *arg) 236 { 237 struct mbuf *m; 238 239 m = (struct mbuf *)mem; 240 if ((m->m_flags & M_PKTHDR) != 0) 241 m_tag_delete_chain(m, NULL); 242 mbstat.m_mbufs -= 1; /* XXX */ 243 mbstat.m_mclusts -= 1; /* XXX */ 244 } 245 246 /* 247 * The Cluster zone constructor. 248 * 249 * Here the 'arg' pointer points to the Mbuf which we 250 * are configuring cluster storage for. 251 */ 252 static int 253 mb_ctor_clust(void *mem, int size, void *arg, int how) 254 { 255 struct mbuf *m; 256 257 m = (struct mbuf *)arg; 258 m->m_ext.ext_buf = (caddr_t)mem; 259 m->m_data = m->m_ext.ext_buf; 260 m->m_flags |= M_EXT; 261 m->m_ext.ext_free = NULL; 262 m->m_ext.ext_args = NULL; 263 m->m_ext.ext_size = MCLBYTES; 264 m->m_ext.ext_type = EXT_CLUSTER; 265 m->m_ext.ref_cnt = (u_int *)uma_find_refcnt(zone_clust, 266 m->m_ext.ext_buf); 267 *(m->m_ext.ref_cnt) = 1; 268 mbstat.m_mclusts += 1; /* XXX */ 269 return (0); 270 } 271 272 /* XXX */ 273 static void 274 mb_dtor_clust(void *mem, int size, void *arg) 275 { 276 mbstat.m_mclusts -= 1; /* XXX */ 277 } 278 279 /* 280 * The Packet secondary zone's init routine, executed on the 281 * object's transition from keg slab to zone cache. 282 */ 283 static int 284 mb_init_pack(void *mem, int size, int how) 285 { 286 struct mbuf *m; 287 288 m = (struct mbuf *)mem; 289 m->m_ext.ext_buf = NULL; 290 uma_zalloc_arg(zone_clust, m, how); 291 if (m->m_ext.ext_buf == NULL) 292 return (ENOMEM); 293 mbstat.m_mclusts -= 1; /* XXX */ 294 return (0); 295 } 296 297 /* 298 * The Packet secondary zone's fini routine, executed on the 299 * object's transition from zone cache to keg slab. 300 */ 301 static void 302 mb_fini_pack(void *mem, int size) 303 { 304 struct mbuf *m; 305 306 m = (struct mbuf *)mem; 307 uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL); 308 m->m_ext.ext_buf = NULL; 309 mbstat.m_mclusts += 1; /* XXX */ 310 } 311 312 /* 313 * The "packet" keg constructor. 314 */ 315 static int 316 mb_ctor_pack(void *mem, int size, void *arg, int how) 317 { 318 struct mbuf *m; 319 struct mb_args *args; 320 #ifdef MAC 321 int error; 322 #endif 323 int flags; 324 short type; 325 326 m = (struct mbuf *)mem; 327 args = (struct mb_args *)arg; 328 flags = args->flags; 329 type = args->type; 330 331 m->m_type = type; 332 m->m_next = NULL; 333 m->m_nextpkt = NULL; 334 m->m_data = m->m_ext.ext_buf; 335 m->m_flags = flags|M_EXT; 336 m->m_ext.ext_free = NULL; 337 m->m_ext.ext_args = NULL; 338 m->m_ext.ext_size = MCLBYTES; 339 m->m_ext.ext_type = EXT_PACKET; 340 *(m->m_ext.ref_cnt) = 1; 341 342 if (flags & M_PKTHDR) { 343 m->m_pkthdr.rcvif = NULL; 344 m->m_pkthdr.csum_flags = 0; 345 SLIST_INIT(&m->m_pkthdr.tags); 346 #ifdef MAC 347 /* If the label init fails, fail the alloc */ 348 error = mac_init_mbuf(m, how); 349 if (error) 350 return (error); 351 #endif 352 } 353 mbstat.m_mbufs += 1; /* XXX */ 354 mbstat.m_mclusts += 1; /* XXX */ 355 return (0); 356 } 357 358 /* 359 * This is the protocol drain routine. 360 * 361 * No locks should be held when this is called. The drain routines have to 362 * presently acquire some locks which raises the possibility of lock order 363 * reversal. 364 */ 365 static void 366 mb_reclaim(void *junk) 367 { 368 struct domain *dp; 369 struct protosw *pr; 370 371 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL, 372 "mb_reclaim()"); 373 374 mbstat.m_drain++; 375 for (dp = domains; dp != NULL; dp = dp->dom_next) 376 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 377 if (pr->pr_drain != NULL) 378 (*pr->pr_drain)(); 379 } 380