Lines Matching +full:- +full:m

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
82 * Thus common-case allocations and locking are simplified:
86 * | .------------>[(Packet Cache)] m_get(), m_gethdr()
103 * Caches are per-CPU and are filled from the Primary Zone.
106 * memory pool it gets pre-initialized with the _zinit_ functions.
127 if (error == 0 && req->newptr != NULL) { in sysctl_mb_use_ext_pgs()
213 if (error == 0 && req->newptr && newnmbclusters != nmbclusters) { in sysctl_nmbclusters()
236 if (error == 0 && req->newptr && newnmbjumbop != nmbjumbop) { in sysctl_nmbjumbop()
258 if (error == 0 && req->newptr && newnmbjumbo9 != nmbjumbo9) { in sysctl_nmbjumbo9()
280 if (error == 0 && req->newptr && newnmbjumbo16 != nmbjumbo16) { in sysctl_nmbjumbo16()
302 if (error == 0 && req->newptr && newnmbufs != nmbufs) { in sysctl_nmbufs()
340 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
356 MSIZE - 1, UMA_ZONE_CONTIG | UMA_ZONE_MAXBUCKET); in mbuf_init()
404 * debugnet makes use of a pre-allocated pool of mbufs and clusters. When
406 * items from this pool. At panic-time, the regular UMA zone pointers are
438 struct mbuf *m; in dn_buf_import() local
444 m = mbufq_dequeue(q); in dn_buf_import()
445 if (m == NULL) in dn_buf_import()
447 trash_init(m, q == &dn_mbufq ? MSIZE : dn_clsize, flags); in dn_buf_import()
448 store[i] = m; in dn_buf_import()
451 ("%s: ran out of pre-allocated mbufs", __func__)); in dn_buf_import()
459 struct mbuf *m; in dn_buf_release() local
465 m = store[i]; in dn_buf_release()
466 (void)mbufq_enqueue(q, m); in dn_buf_release()
474 struct mbuf *m; in dn_pack_import() local
479 m = m_get(M_NOWAIT, MT_DATA); in dn_pack_import()
480 if (m == NULL) in dn_pack_import()
484 m_free(m); in dn_pack_import()
487 mb_ctor_clust(clust, dn_clsize, m, 0); in dn_pack_import()
488 store[i] = m; in dn_pack_import()
491 ("%s: ran out of pre-allocated mbufs", __func__)); in dn_pack_import()
498 struct mbuf *m; in dn_pack_release() local
503 m = store[i]; in dn_pack_release()
504 clust = m->m_ext.ext_buf; in dn_pack_release()
506 uma_zfree(dn_zone_mbuf, m); in dn_pack_release()
511 * Free the pre-allocated mbufs and clusters reserved for debugnet, and destroy
517 struct mbuf *m; in debugnet_mbuf_drain() local
533 while ((m = mbufq_dequeue(&dn_mbufq)) != NULL) in debugnet_mbuf_drain()
534 m_free(m); in debugnet_mbuf_drain()
562 * MTU of a debugnet-enabled interface changes. in debugnet_mbuf_start()
599 struct mbuf *m; in debugnet_mbuf_reinit() local
621 while (nmbuf-- > 0) { in debugnet_mbuf_reinit()
622 m = m_get(M_WAITOK, MT_DATA); in debugnet_mbuf_reinit()
623 uma_zfree(dn_zone_mbuf, m); in debugnet_mbuf_reinit()
625 while (nclust-- > 0) { in debugnet_mbuf_reinit()
636 * contains call-specific information required to support the
642 struct mbuf *m; in mb_ctor_mbuf() local
649 type = args->type; in mb_ctor_mbuf()
658 m = (struct mbuf *)mem; in mb_ctor_mbuf()
659 flags = args->flags; in mb_ctor_mbuf()
662 error = m_init(m, how, type, flags); in mb_ctor_mbuf()
673 struct mbuf *m; in mb_dtor_mbuf() local
676 m = (struct mbuf *)mem; in mb_dtor_mbuf()
679 KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__)); in mb_dtor_mbuf()
681 if ((m->m_flags & M_PKTHDR) && !SLIST_EMPTY(&m->m_pkthdr.tags)) in mb_dtor_mbuf()
682 m_tag_delete_chain(m, NULL); in mb_dtor_mbuf()
691 struct mbuf *m; in mb_dtor_pack() local
693 m = (struct mbuf *)mem; in mb_dtor_pack()
694 if ((m->m_flags & M_PKTHDR) != 0) in mb_dtor_pack()
695 m_tag_delete_chain(m, NULL); in mb_dtor_pack()
698 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); in mb_dtor_pack()
699 KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__)); in mb_dtor_pack()
700 KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__)); in mb_dtor_pack()
701 KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__)); in mb_dtor_pack()
702 KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__)); in mb_dtor_pack()
703 KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__)); in mb_dtor_pack()
704 KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__)); in mb_dtor_pack()
706 trash_dtor(m->m_ext.ext_buf, MCLBYTES, zone_clust); in mb_dtor_pack()
731 struct mbuf *m; in mb_ctor_clust() local
733 m = (struct mbuf *)arg; in mb_ctor_clust()
734 if (m != NULL) { in mb_ctor_clust()
735 m->m_ext.ext_buf = (char *)mem; in mb_ctor_clust()
736 m->m_data = m->m_ext.ext_buf; in mb_ctor_clust()
737 m->m_flags |= M_EXT; in mb_ctor_clust()
738 m->m_ext.ext_free = NULL; in mb_ctor_clust()
739 m->m_ext.ext_arg1 = NULL; in mb_ctor_clust()
740 m->m_ext.ext_arg2 = NULL; in mb_ctor_clust()
741 m->m_ext.ext_size = size; in mb_ctor_clust()
742 m->m_ext.ext_type = m_gettype(size); in mb_ctor_clust()
743 m->m_ext.ext_flags = EXT_FLAG_EMBREF; in mb_ctor_clust()
744 m->m_ext.ext_count = 1; in mb_ctor_clust()
757 struct mbuf *m; in mb_zinit_pack() local
759 m = (struct mbuf *)mem; /* m is virgin. */ in mb_zinit_pack()
760 if (uma_zalloc_arg(zone_clust, m, how) == NULL || in mb_zinit_pack()
761 m->m_ext.ext_buf == NULL) in mb_zinit_pack()
763 m->m_ext.ext_type = EXT_PACKET; /* Override. */ in mb_zinit_pack()
765 trash_init(m->m_ext.ext_buf, MCLBYTES, how); in mb_zinit_pack()
777 struct mbuf *m; in mb_zfini_pack() local
779 m = (struct mbuf *)mem; in mb_zfini_pack()
781 trash_fini(m->m_ext.ext_buf, MCLBYTES); in mb_zfini_pack()
783 uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL); in mb_zfini_pack()
795 struct mbuf *m; in mb_ctor_pack() local
800 m = (struct mbuf *)mem; in mb_ctor_pack()
802 flags = args->flags; in mb_ctor_pack()
803 type = args->type; in mb_ctor_pack()
807 trash_ctor(m->m_ext.ext_buf, MCLBYTES, zone_clust, how); in mb_ctor_pack()
810 error = m_init(m, how, type, flags); in mb_ctor_pack()
813 m->m_data = m->m_ext.ext_buf; in mb_ctor_pack()
814 m->m_flags = (flags | M_EXT); in mb_ctor_pack()
836 mb_free_notready(struct mbuf *m, int count) in mb_free_notready() argument
840 for (i = 0; i < count && m != NULL; i++) { in mb_free_notready()
841 if ((m->m_flags & M_EXTPG) != 0) { in mb_free_notready()
842 m->m_epg_nrdy--; in mb_free_notready()
843 if (m->m_epg_nrdy != 0) in mb_free_notready()
846 m = m_free(m); in mb_free_notready()
848 KASSERT(i == count, ("Removed only %d items from %p", i, m)); in mb_free_notready()
856 * be used if there are no other references to 'm'.
859 mb_unmapped_compress(struct mbuf *m) in mb_unmapped_compress() argument
865 * Assert that 'm' does not have a packet header. If 'm' had in mb_unmapped_compress()
869 KASSERT((m->m_flags & M_PKTHDR) == 0 && (m->m_flags & M_EXTPG), in mb_unmapped_compress()
870 ("%s: m %p !M_EXTPG or M_PKTHDR", __func__, m)); in mb_unmapped_compress()
871 KASSERT(m->m_len <= MLEN, ("m_len too large %p", m)); in mb_unmapped_compress()
873 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { in mb_unmapped_compress()
874 refcnt = &m->m_ext.ext_count; in mb_unmapped_compress()
876 KASSERT(m->m_ext.ext_cnt != NULL, in mb_unmapped_compress()
877 ("%s: no refcounting pointer on %p", __func__, m)); in mb_unmapped_compress()
878 refcnt = m->m_ext.ext_cnt; in mb_unmapped_compress()
884 m_copydata(m, 0, m->m_len, buf); in mb_unmapped_compress()
887 m->m_ext.ext_free(m); in mb_unmapped_compress()
889 /* Turn 'm' into a "normal" mbuf. */ in mb_unmapped_compress()
890 m->m_flags &= ~(M_EXT | M_RDONLY | M_EXTPG); in mb_unmapped_compress()
891 m->m_data = m->m_dat; in mb_unmapped_compress()
893 /* Copy data back into m. */ in mb_unmapped_compress()
894 bcopy(buf, mtod(m, char *), m->m_len); in mb_unmapped_compress()
926 mb_unmapped_free_mext(struct mbuf *m) in mb_unmapped_free_mext() argument
931 sf = m->m_ext.ext_arg1; in mb_unmapped_free_mext()
935 old_m = m->m_ext.ext_arg2; in mb_unmapped_free_mext()
940 _mb_unmapped_to_ext(struct mbuf *m, struct mbuf **mres) in _mb_unmapped_to_ext() argument
949 M_ASSERTEXTPG(m); in _mb_unmapped_to_ext()
951 if (m->m_epg_tls != NULL) { in _mb_unmapped_to_ext()
953 m_free(m); in _mb_unmapped_to_ext()
958 len = m->m_len; in _mb_unmapped_to_ext()
961 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { in _mb_unmapped_to_ext()
962 refcnt = &m->m_ext.ext_count; in _mb_unmapped_to_ext()
963 mref = m; in _mb_unmapped_to_ext()
965 KASSERT(m->m_ext.ext_cnt != NULL, in _mb_unmapped_to_ext()
966 ("%s: no refcounting pointer on %p", __func__, m)); in _mb_unmapped_to_ext()
967 refcnt = m->m_ext.ext_cnt; in _mb_unmapped_to_ext()
972 off = mtod(m, vm_offset_t); in _mb_unmapped_to_ext()
975 if (m->m_epg_hdrlen != 0) { in _mb_unmapped_to_ext()
976 if (off >= m->m_epg_hdrlen) { in _mb_unmapped_to_ext()
977 off -= m->m_epg_hdrlen; in _mb_unmapped_to_ext()
979 seglen = m->m_epg_hdrlen - off; in _mb_unmapped_to_ext()
983 len -= seglen; in _mb_unmapped_to_ext()
987 m_new->m_len = seglen; in _mb_unmapped_to_ext()
989 memcpy(mtod(m_new, void *), &m->m_epg_hdr[segoff], in _mb_unmapped_to_ext()
993 pgoff = m->m_epg_1st_off; in _mb_unmapped_to_ext()
994 for (i = 0; i < m->m_epg_npgs && len > 0; i++) { in _mb_unmapped_to_ext()
995 pglen = m_epg_pagelen(m, i, pgoff); in _mb_unmapped_to_ext()
997 off -= pglen; in _mb_unmapped_to_ext()
1001 seglen = pglen - off; in _mb_unmapped_to_ext()
1005 len -= seglen; in _mb_unmapped_to_ext()
1007 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]); in _mb_unmapped_to_ext()
1014 prev->m_next = m_new; in _mb_unmapped_to_ext()
1023 mb_unmapped_free_mext, sf, mref, m->m_flags & M_RDONLY, in _mb_unmapped_to_ext()
1025 m_new->m_data += segoff; in _mb_unmapped_to_ext()
1026 m_new->m_len = seglen; in _mb_unmapped_to_ext()
1031 KASSERT((off + len) <= m->m_epg_trllen, in _mb_unmapped_to_ext()
1033 m->m_epg_trllen)); in _mb_unmapped_to_ext()
1040 prev->m_next = m_new; in _mb_unmapped_to_ext()
1041 m_new->m_len = len; in _mb_unmapped_to_ext()
1042 memcpy(mtod(m_new, void *), &m->m_epg_trail[off], len); in _mb_unmapped_to_ext()
1056 m_free(m); in _mb_unmapped_to_ext()
1073 m_free(m); in _mb_unmapped_to_ext()
1082 struct mbuf *m, *m1, *next, *prev = NULL; in mb_unmapped_to_ext() local
1086 for (m = top; m != NULL; m = next) { in mb_unmapped_to_ext()
1087 /* m might be freed, so cache the next pointer. */ in mb_unmapped_to_ext()
1088 next = m->m_next; in mb_unmapped_to_ext()
1089 if (m->m_flags & M_EXTPG) { in mb_unmapped_to_ext()
1092 * Remove 'm' from the new chain so in mb_unmapped_to_ext()
1094 * before 'm' in case 'top' is freed in mb_unmapped_to_ext()
1097 prev->m_next = NULL; in mb_unmapped_to_ext()
1099 error = _mb_unmapped_to_ext(m, &m1); in mb_unmapped_to_ext()
1101 if (top != m) in mb_unmapped_to_ext()
1107 m = m1; in mb_unmapped_to_ext()
1109 top = m; in mb_unmapped_to_ext()
1111 prev->m_next = m; in mb_unmapped_to_ext()
1118 prev = m_last(m); in mb_unmapped_to_ext()
1121 prev->m_next = m; in mb_unmapped_to_ext()
1123 prev = m; in mb_unmapped_to_ext()
1138 struct mbuf *m; in mb_alloc_ext_pgs() local
1140 m = m_get(how, MT_DATA); in mb_alloc_ext_pgs()
1141 if (m == NULL) in mb_alloc_ext_pgs()
1144 m->m_epg_npgs = 0; in mb_alloc_ext_pgs()
1145 m->m_epg_nrdy = 0; in mb_alloc_ext_pgs()
1146 m->m_epg_1st_off = 0; in mb_alloc_ext_pgs()
1147 m->m_epg_last_len = 0; in mb_alloc_ext_pgs()
1148 m->m_epg_flags = 0; in mb_alloc_ext_pgs()
1149 m->m_epg_hdrlen = 0; in mb_alloc_ext_pgs()
1150 m->m_epg_trllen = 0; in mb_alloc_ext_pgs()
1151 m->m_epg_tls = NULL; in mb_alloc_ext_pgs()
1152 m->m_epg_so = NULL; in mb_alloc_ext_pgs()
1153 m->m_data = NULL; in mb_alloc_ext_pgs()
1154 m->m_flags |= M_EXT | M_EXTPG | flags; in mb_alloc_ext_pgs()
1155 m->m_ext.ext_flags = EXT_FLAG_EMBREF; in mb_alloc_ext_pgs()
1156 m->m_ext.ext_count = 1; in mb_alloc_ext_pgs()
1157 m->m_ext.ext_size = 0; in mb_alloc_ext_pgs()
1158 m->m_ext.ext_free = ext_free; in mb_alloc_ext_pgs()
1159 return (m); in mb_alloc_ext_pgs()
1167 mb_free_ext(struct mbuf *m) in mb_free_ext() argument
1173 KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m)); in mb_free_ext()
1176 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { in mb_free_ext()
1177 refcnt = &m->m_ext.ext_count; in mb_free_ext()
1178 mref = m; in mb_free_ext()
1180 KASSERT(m->m_ext.ext_cnt != NULL, in mb_free_ext()
1181 ("%s: no refcounting pointer on %p", __func__, m)); in mb_free_ext()
1182 refcnt = m->m_ext.ext_cnt; in mb_free_ext()
1194 if (m->m_flags & M_NOFREE) { in mb_free_ext()
1196 KASSERT(m->m_ext.ext_type == EXT_EXTREF || in mb_free_ext()
1197 m->m_ext.ext_type == EXT_RXRING, in mb_free_ext()
1198 ("%s: no-free mbuf %p has wrong type", __func__, m)); in mb_free_ext()
1203 if (*refcnt == 1 || atomic_fetchadd_int(refcnt, -1) == 1) { in mb_free_ext()
1204 switch (m->m_ext.ext_type) { in mb_free_ext()
1212 uma_zfree(zone_clust, m->m_ext.ext_buf); in mb_free_ext()
1216 uma_zfree(zone_jumbop, m->m_ext.ext_buf); in mb_free_ext()
1220 uma_zfree(zone_jumbo9, m->m_ext.ext_buf); in mb_free_ext()
1224 uma_zfree(zone_jumbo16, m->m_ext.ext_buf); in mb_free_ext()
1232 KASSERT(mref->m_ext.ext_free != NULL, in mb_free_ext()
1234 mref->m_ext.ext_free(mref); in mb_free_ext()
1238 KASSERT(m->m_ext.ext_free != NULL, in mb_free_ext()
1240 m->m_ext.ext_free(m); in mb_free_ext()
1243 KASSERT(m->m_ext.ext_free == NULL, in mb_free_ext()
1247 KASSERT(m->m_ext.ext_type == 0, in mb_free_ext()
1252 if (freembuf && m != mref) in mb_free_ext()
1253 m_free_raw(m); in mb_free_ext()
1261 mb_free_extpg(struct mbuf *m) in mb_free_extpg() argument
1266 M_ASSERTEXTPG(m); in mb_free_extpg()
1269 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { in mb_free_extpg()
1270 refcnt = &m->m_ext.ext_count; in mb_free_extpg()
1271 mref = m; in mb_free_extpg()
1273 KASSERT(m->m_ext.ext_cnt != NULL, in mb_free_extpg()
1274 ("%s: no refcounting pointer on %p", __func__, m)); in mb_free_extpg()
1275 refcnt = m->m_ext.ext_cnt; in mb_free_extpg()
1280 if (*refcnt == 1 || atomic_fetchadd_int(refcnt, -1) == 1) { in mb_free_extpg()
1281 KASSERT(mref->m_ext.ext_free != NULL, in mb_free_extpg()
1284 mref->m_ext.ext_free(mref); in mb_free_extpg()
1286 if (mref->m_epg_tls != NULL && in mb_free_extpg()
1287 !refcount_release_if_not_last(&mref->m_epg_tls->refcount)) in mb_free_extpg()
1294 if (m != mref) in mb_free_extpg()
1295 m_free_raw(m); in mb_free_extpg()
1301 * m_get() - a single mbuf without any attachments, sys/mbuf.h.
1302 * m_gethdr() - a single mbuf initialized as M_PKTHDR, sys/mbuf.h.
1303 * m_getcl() - an mbuf + 2k cluster, sys/mbuf.h.
1304 * m_clget() - attach cluster to already allocated mbuf.
1305 * m_cljget() - attach jumbo cluster to already allocated mbuf.
1306 * m_get2() - allocate minimum mbuf that would fit size argument.
1307 * m_getm2() - allocate a chain of mbufs/clusters.
1308 * m_extadd() - attach external cluster to mbuf.
1310 * m_free() - free single mbuf with its tags and ext, sys/mbuf.h.
1311 * m_freem() - free chain of mbufs.
1315 m_clget(struct mbuf *m, int how) in m_clget() argument
1318 KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT", in m_clget()
1319 __func__, m)); in m_clget()
1320 m->m_ext.ext_buf = (char *)NULL; in m_clget()
1321 uma_zalloc_arg(zone_clust, m, how); in m_clget()
1326 if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) { in m_clget()
1328 uma_zalloc_arg(zone_clust, m, how); in m_clget()
1330 MBUF_PROBE2(m__clget, m, how); in m_clget()
1331 return (m->m_flags & M_EXT); in m_clget()
1342 m_cljget(struct mbuf *m, int how, int size) in m_cljget() argument
1347 if (m != NULL) { in m_cljget()
1348 KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT", in m_cljget()
1349 __func__, m)); in m_cljget()
1350 m->m_ext.ext_buf = NULL; in m_cljget()
1354 retval = uma_zalloc_arg(zone, m, how); in m_cljget()
1356 MBUF_PROBE4(m__cljget, m, how, size, retval); in m_cljget()
1368 struct mbuf *m, *n; in m_get2() local
1381 m = uma_zalloc_arg(zone_mbuf, &args, how); in m_get2()
1382 if (m == NULL) in m_get2()
1385 n = uma_zalloc_arg(zone_jumbop, m, how); in m_get2()
1387 m_free_raw(m); in m_get2()
1391 return (m); in m_get2()
1402 struct mbuf *m, *n; in m_get3() local
1414 m = uma_zalloc_arg(zone_mbuf, &args, how); in m_get3()
1415 if (m == NULL) in m_get3()
1423 n = uma_zalloc_arg(zone, m, how); in m_get3()
1425 m_free_raw(m); in m_get3()
1429 return (m); in m_get3()
1440 struct mbuf *m, *n; in m_getjcl() local
1449 m = uma_zalloc_arg(zone_mbuf, &args, how); in m_getjcl()
1450 if (m == NULL) in m_getjcl()
1454 n = uma_zalloc_arg(zone, m, how); in m_getjcl()
1456 m_free_raw(m); in m_getjcl()
1459 MBUF_PROBE5(m__getjcl, how, type, flags, size, m); in m_getjcl()
1460 return (m); in m_getjcl()
1482 if (length - progress > MCLBYTES) { in mc_get()
1493 if (length - progress >= MINCLSIZE) in mc_get()
1517 mc_last(mc)->m_flags |= M_EOR; in mc_get()
1529 m_getm2(struct mbuf *m, int len, int how, short type, int flags) in m_getm2() argument
1534 if (m != NULL && (flags & M_PKTHDR)) in m_getm2()
1541 if (m != NULL) { in m_getm2()
1544 mtail = m_last(m); in m_getm2()
1545 mtail->m_next = mc_first(&mc); in m_getm2()
1546 mtail->m_flags &= ~M_EOR; in m_getm2()
1548 m = mc_first(&mc); in m_getm2()
1550 return (m); in m_getm2()
1553 /*-
1579 mb->m_flags |= (M_EXT | flags); in m_extadd()
1580 mb->m_ext.ext_buf = buf; in m_extadd()
1581 mb->m_data = mb->m_ext.ext_buf; in m_extadd()
1582 mb->m_ext.ext_size = size; in m_extadd()
1583 mb->m_ext.ext_free = freef; in m_extadd()
1584 mb->m_ext.ext_arg1 = arg1; in m_extadd()
1585 mb->m_ext.ext_arg2 = arg2; in m_extadd()
1586 mb->m_ext.ext_type = type; in m_extadd()
1589 mb->m_ext.ext_count = 1; in m_extadd()
1590 mb->m_ext.ext_flags = EXT_FLAG_EMBREF; in m_extadd()
1592 mb->m_ext.ext_flags = 0; in m_extadd()
1614 m_freemp(struct mbuf *m) in m_freemp() argument
1618 MBUF_PROBE1(m__freemp, m); in m_freemp()
1620 n = m->m_nextpkt; in m_freemp()
1621 while (m != NULL) in m_freemp()
1622 m = m_free(m); in m_freemp()
1623 m = n; in m_freemp()
1624 } while (m != NULL); in m_freemp()
1651 mst->ifp = ifp; in m_snd_tag_init()
1652 refcount_init(&mst->refcount, 1); in m_snd_tag_init()
1653 mst->sw = sw; in m_snd_tag_init()
1662 ifp = mst->ifp; in m_snd_tag_destroy()
1663 mst->sw->snd_tag_free(mst); in m_snd_tag_destroy()
1665 counter_u64_add(snd_tag_count, -1); in m_snd_tag_destroy()
1669 m_rcvif_serialize(struct mbuf *m) in m_rcvif_serialize() argument
1673 M_ASSERTPKTHDR(m); in m_rcvif_serialize()
1674 idx = if_getindex(m->m_pkthdr.rcvif); in m_rcvif_serialize()
1675 gen = if_getidxgen(m->m_pkthdr.rcvif); in m_rcvif_serialize()
1676 m->m_pkthdr.rcvidx = idx; in m_rcvif_serialize()
1677 m->m_pkthdr.rcvgen = gen; in m_rcvif_serialize()
1678 if (__predict_false(m->m_pkthdr.leaf_rcvif != NULL)) { in m_rcvif_serialize()
1679 idx = if_getindex(m->m_pkthdr.leaf_rcvif); in m_rcvif_serialize()
1680 gen = if_getidxgen(m->m_pkthdr.leaf_rcvif); in m_rcvif_serialize()
1682 idx = -1; in m_rcvif_serialize()
1685 m->m_pkthdr.leaf_rcvidx = idx; in m_rcvif_serialize()
1686 m->m_pkthdr.leaf_rcvgen = gen; in m_rcvif_serialize()
1690 m_rcvif_restore(struct mbuf *m) in m_rcvif_restore() argument
1694 M_ASSERTPKTHDR(m); in m_rcvif_restore()
1697 ifp = ifnet_byindexgen(m->m_pkthdr.rcvidx, m->m_pkthdr.rcvgen); in m_rcvif_restore()
1701 if (__predict_true(m->m_pkthdr.leaf_rcvidx == (u_short)-1)) { in m_rcvif_restore()
1704 leaf_ifp = ifnet_byindexgen(m->m_pkthdr.leaf_rcvidx, in m_rcvif_restore()
1705 m->m_pkthdr.leaf_rcvgen); in m_rcvif_restore()
1710 m->m_pkthdr.leaf_rcvif = leaf_ifp; in m_rcvif_restore()
1711 m->m_pkthdr.rcvif = ifp; in m_rcvif_restore()
1722 struct mbuf *m; in mb_alloc_ext_plus_pages() local
1726 m = mb_alloc_ext_pgs(how, mb_free_mext_pgs, 0); in mb_alloc_ext_plus_pages()
1727 if (m == NULL) in mb_alloc_ext_plus_pages()
1729 m->m_epg_flags |= EPG_FLAG_ANON; in mb_alloc_ext_plus_pages()
1737 m->m_epg_npgs = i; in mb_alloc_ext_plus_pages()
1738 m_free(m); in mb_alloc_ext_plus_pages()
1744 m->m_epg_pa[i] = VM_PAGE_TO_PHYS(pg); in mb_alloc_ext_plus_pages()
1746 m->m_epg_npgs = npgs; in mb_alloc_ext_plus_pages()
1747 return (m); in mb_alloc_ext_plus_pages()
1760 struct mbuf *m, *mout; in mb_mapped_to_unmapped() local
1767 m = mout = mb_alloc_ext_plus_pages(mbufsiz, how); in mb_mapped_to_unmapped()
1768 if (m == NULL) in mb_mapped_to_unmapped()
1769 return (m); in mb_mapped_to_unmapped()
1770 pgpos = (char *)(void *)PHYS_TO_DMAP(m->m_epg_pa[0]); in mb_mapped_to_unmapped()
1776 if (++i == m->m_epg_npgs) { in mb_mapped_to_unmapped()
1777 m->m_epg_last_len = PAGE_SIZE; in mb_mapped_to_unmapped()
1779 m->m_next = mb_alloc_ext_plus_pages(mbufsiz, in mb_mapped_to_unmapped()
1781 m = m->m_next; in mb_mapped_to_unmapped()
1782 if (m == NULL) { in mb_mapped_to_unmapped()
1784 return (m); in mb_mapped_to_unmapped()
1788 pgpos = (char *)(void *)PHYS_TO_DMAP(m->m_epg_pa[i]); in mb_mapped_to_unmapped()
1796 KASSERT((mp->m_flags & M_EXTPG) == 0, in mb_mapped_to_unmapped()
1799 mblen = mp->m_len; in mb_mapped_to_unmapped()
1800 mp = mp->m_next; in mb_mapped_to_unmapped()
1806 pglen -= xfer; in mb_mapped_to_unmapped()
1807 mblen -= xfer; in mb_mapped_to_unmapped()
1808 len -= xfer; in mb_mapped_to_unmapped()
1809 m->m_len += xfer; in mb_mapped_to_unmapped()
1811 m->m_epg_last_len = PAGE_SIZE - pglen; in mb_mapped_to_unmapped()
1813 *mlast = m; in mb_mapped_to_unmapped()