Lines Matching +full:0 +full:m

170 	   &m_defragpackets, 0, "");
172 &m_defragbytes, 0, "");
174 &m_defraguseless, 0, "");
176 &m_defragfailure, 0, "");
178 &m_defragrandomfailures, 0, "");
192 CTASSERT(offsetof(struct mbuf, m_dat) % 8 == 0);
193 CTASSERT(offsetof(struct mbuf, m_pktdat) % 8 == 0);
234 * Attach the cluster from *m to *n, set up m_ext in *n
238 mb_dupcl(struct mbuf *n, struct mbuf *m) in mb_dupcl() argument
242 KASSERT(m->m_flags & (M_EXT | M_EXTPG), in mb_dupcl()
243 ("%s: M_EXT | M_EXTPG not set on %p", __func__, m)); in mb_dupcl()
259 if (m->m_flags & M_EXTPG) { in mb_dupcl()
260 bcopy(&m->m_epg_startcopy, &n->m_epg_startcopy, in mb_dupcl()
262 bcopy(&m->m_ext, &n->m_ext, m_epg_ext_copylen); in mb_dupcl()
263 } else if (m->m_ext.ext_type == EXT_EXTREF) in mb_dupcl()
264 bcopy(&m->m_ext, &n->m_ext, sizeof(struct m_ext)); in mb_dupcl()
266 bcopy(&m->m_ext, &n->m_ext, m_ext_copylen); in mb_dupcl()
268 n->m_flags |= m->m_flags & (M_RDONLY | M_EXT | M_EXTPG); in mb_dupcl()
271 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { in mb_dupcl()
272 refcnt = n->m_ext.ext_cnt = &m->m_ext.ext_count; in mb_dupcl()
275 KASSERT(m->m_ext.ext_cnt != NULL, in mb_dupcl()
276 ("%s: no refcounting pointer on %p", __func__, m)); in mb_dupcl()
277 refcnt = m->m_ext.ext_cnt; in mb_dupcl()
287 m_demote_pkthdr(struct mbuf *m) in m_demote_pkthdr() argument
290 M_ASSERTPKTHDR(m); in m_demote_pkthdr()
291 M_ASSERT_NO_SND_TAG(m); in m_demote_pkthdr()
293 m_tag_delete_chain(m, NULL); in m_demote_pkthdr()
294 m->m_flags &= ~M_PKTHDR; in m_demote_pkthdr()
295 bzero(&m->m_pkthdr, sizeof(struct pkthdr)); in m_demote_pkthdr()
306 struct mbuf *m; in m_demote() local
310 for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) { in m_demote()
311 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt in m %p, m0 %p", in m_demote()
312 __func__, m, m0)); in m_demote()
313 if (m->m_flags & M_PKTHDR) in m_demote()
314 m_demote_pkthdr(m); in m_demote()
315 m->m_flags &= flags; in m_demote()
322 * Returns 0 or panics when bad and 1 on all tests passed.
323 * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
329 struct mbuf *m; in m_sanity() local
331 int pktlen = 0; in m_sanity()
334 #define M_SANITY_ACTION(s) panic("mbuf %p: " s, m) in m_sanity()
336 #define M_SANITY_ACTION(s) printf("mbuf %p: " s, m) in m_sanity()
339 for (m = m0; m != NULL; m = m->m_next) { in m_sanity()
345 a = M_START(m); in m_sanity()
346 b = a + M_SIZE(m); in m_sanity()
347 if ((caddr_t)m->m_data < a) in m_sanity()
349 if ((caddr_t)m->m_data > b) in m_sanity()
351 if ((caddr_t)m->m_data + m->m_len > b) in m_sanity()
354 /* m->m_nextpkt may only be set on first mbuf in chain. */ in m_sanity()
355 if (m != m0 && m->m_nextpkt != NULL) { in m_sanity()
357 m_freem(m->m_nextpkt); in m_sanity()
358 m->m_nextpkt = (struct mbuf *)0xDEADC0DE; in m_sanity()
360 M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf"); in m_sanity()
365 pktlen += m->m_len; in m_sanity()
368 if (m != m0 && m->m_flags & M_PKTHDR && in m_sanity()
369 !SLIST_EMPTY(&m->m_pkthdr.tags)) { in m_sanity()
371 m_tag_delete_chain(m, NULL); in m_sanity()
372 /* put in 0xDEADC0DE perhaps? */ in m_sanity()
378 if (m != m0 && m->m_flags & M_PKTHDR) { in m_sanity()
380 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr)); in m_sanity()
381 m->m_flags &= ~M_PKTHDR; in m_sanity()
382 /* put in 0xDEADCODE and leave hdr flag in */ in m_sanity()
387 m = m0; in m_sanity()
388 if (pktlen && pktlen != m->m_pkthdr.len) { in m_sanity()
390 m->m_pkthdr.len = 0; in m_sanity()
403 m_pkthdr_init(struct mbuf *m, int how) in m_pkthdr_init() argument
408 m->m_data = m->m_pktdat; in m_pkthdr_init()
409 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr)); in m_pkthdr_init()
411 m->m_pkthdr.numa_domain = M_NODOM; in m_pkthdr_init()
415 error = mac_mbuf_init(m, how); in m_pkthdr_init()
420 return (0); in m_pkthdr_init()
431 #if 0 in m_move_pkthdr()
447 if ((to->m_flags & M_EXT) == 0) in m_move_pkthdr()
467 #if 0 in m_dup_pkthdr()
486 if ((to->m_flags & M_EXT) == 0) in m_dup_pkthdr()
501 m_prepend(struct mbuf *m, int len, int how) in m_prepend() argument
505 if (m->m_flags & M_PKTHDR) in m_prepend()
506 mn = m_gethdr(how, m->m_type); in m_prepend()
508 mn = m_get(how, m->m_type); in m_prepend()
510 m_freem(m); in m_prepend()
513 if (m->m_flags & M_PKTHDR) in m_prepend()
514 m_move_pkthdr(mn, m); in m_prepend()
515 mn->m_next = m; in m_prepend()
516 m = mn; in m_prepend()
517 if (len < M_SIZE(m)) in m_prepend()
518 M_ALIGN(m, len); in m_prepend()
519 m->m_len = len; in m_prepend()
520 return (m); in m_prepend()
531 m_copym(struct mbuf *m, int off0, int len, int wait) in m_copym() argument
536 int copyhdr = 0; in m_copym()
538 KASSERT(off >= 0, ("m_copym, negative off %d", off)); in m_copym()
539 KASSERT(len >= 0, ("m_copym, negative len %d", len)); in m_copym()
541 if (off == 0 && m->m_flags & M_PKTHDR) in m_copym()
543 while (off > 0) { in m_copym()
544 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); in m_copym()
545 if (off < m->m_len) in m_copym()
547 off -= m->m_len; in m_copym()
548 m = m->m_next; in m_copym()
552 while (len > 0) { in m_copym()
553 if (m == NULL) { in m_copym()
559 n = m_gethdr(wait, m->m_type); in m_copym()
561 n = m_get(wait, m->m_type); in m_copym()
566 if (!m_dup_pkthdr(n, m, wait)) in m_copym()
572 copyhdr = 0; in m_copym()
574 n->m_len = min(len, m->m_len - off); in m_copym()
575 if (m->m_flags & (M_EXT | M_EXTPG)) { in m_copym()
576 n->m_data = m->m_data + off; in m_copym()
577 mb_dupcl(n, m); in m_copym()
579 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), in m_copym()
583 off = 0; in m_copym()
584 m = m->m_next; in m_copym()
596 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
604 m_copypacket(struct mbuf *m, int how) in m_copypacket() argument
609 n = m_get(how, m->m_type); in m_copypacket()
614 if (!m_dup_pkthdr(n, m, how)) in m_copypacket()
616 n->m_len = m->m_len; in m_copypacket()
617 if (m->m_flags & (M_EXT | M_EXTPG)) { in m_copypacket()
618 n->m_data = m->m_data; in m_copypacket()
619 mb_dupcl(n, m); in m_copypacket()
621 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); in m_copypacket()
622 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); in m_copypacket()
625 m = m->m_next; in m_copypacket()
626 while (m) { in m_copypacket()
627 o = m_get(how, m->m_type); in m_copypacket()
634 n->m_len = m->m_len; in m_copypacket()
635 if (m->m_flags & (M_EXT | M_EXTPG)) { in m_copypacket()
636 n->m_data = m->m_data; in m_copypacket()
637 mb_dupcl(n, m); in m_copypacket()
639 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); in m_copypacket()
642 m = m->m_next; in m_copypacket()
651 m_copyfromunmapped(const struct mbuf *m, int off, int len, caddr_t cp) in m_copyfromunmapped() argument
657 KASSERT(off >= 0, ("m_copyfromunmapped: negative off %d", off)); in m_copyfromunmapped()
658 KASSERT(len >= 0, ("m_copyfromunmapped: negative len %d", len)); in m_copyfromunmapped()
659 KASSERT(off < m->m_len, in m_copyfromunmapped()
667 uio.uio_offset = 0; in m_copyfromunmapped()
669 error = m_unmapped_uiomove(m, off, &uio, len); in m_copyfromunmapped()
670 KASSERT(error == 0, ("m_unmapped_uiomove failed: off %d, len %d", off, in m_copyfromunmapped()
679 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) in m_copydata() argument
683 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); in m_copydata()
684 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); in m_copydata()
685 while (off > 0) { in m_copydata()
686 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); in m_copydata()
687 if (off < m->m_len) in m_copydata()
689 off -= m->m_len; in m_copydata()
690 m = m->m_next; in m_copydata()
692 while (len > 0) { in m_copydata()
693 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); in m_copydata()
694 count = min(m->m_len - off, len); in m_copydata()
695 if ((m->m_flags & M_EXTPG) != 0) in m_copydata()
696 m_copyfromunmapped(m, off, count, cp); in m_copydata()
698 bcopy(mtod(m, caddr_t) + off, cp, count); in m_copydata()
701 off = 0; in m_copydata()
702 m = m->m_next; in m_copydata()
712 m_dup(const struct mbuf *m, int how) in m_dup() argument
719 if (m == NULL) in m_dup()
721 M_ASSERTPKTHDR(m); in m_dup()
724 remain = m->m_pkthdr.len; in m_dup()
725 moff = 0; in m_dup()
727 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ in m_dup()
732 n = m_getcl(how, m->m_type, 0); in m_dup()
735 n = m_get(how, m->m_type); in m_dup()
742 if (!m_dup_pkthdr(n, m, how)) { in m_dup()
746 if ((n->m_flags & M_EXT) == 0) in m_dup()
750 n->m_len = 0; in m_dup()
757 while (n->m_len < nsize && m != NULL) { in m_dup()
758 int chunk = min(nsize - n->m_len, m->m_len - moff); in m_dup()
760 m_copydata(m, moff, chunk, n->m_data + n->m_len); in m_dup()
764 if (moff == m->m_len) { in m_dup()
765 m = m->m_next; in m_dup()
766 moff = 0; in m_dup()
771 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), in m_dup()
782 * Concatenate mbuf chain n to m.
787 m_cat(struct mbuf *m, struct mbuf *n) in m_cat() argument
789 while (m->m_next) in m_cat()
790 m = m->m_next; in m_cat()
792 if (!M_WRITABLE(m) || in m_cat()
793 (n->m_flags & M_EXTPG) != 0 || in m_cat()
794 M_TRAILINGSPACE(m) < n->m_len) { in m_cat()
796 m->m_next = n; in m_cat()
800 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, in m_cat()
802 m->m_len += n->m_len; in m_cat()
811 m_catpkt(struct mbuf *m, struct mbuf *n) in m_catpkt() argument
814 M_ASSERTPKTHDR(m); in m_catpkt()
817 m->m_pkthdr.len += n->m_pkthdr.len; in m_catpkt()
818 m_demote(n, 1, 0); in m_catpkt()
820 m_cat(m, n); in m_catpkt()
827 struct mbuf *m; in m_adj() local
830 if ((m = mp) == NULL) in m_adj()
832 if (len >= 0) { in m_adj()
836 while (m != NULL && len > 0) { in m_adj()
837 if (m->m_len <= len) { in m_adj()
838 len -= m->m_len; in m_adj()
839 m->m_len = 0; in m_adj()
840 m = m->m_next; in m_adj()
842 m->m_len -= len; in m_adj()
843 m->m_data += len; in m_adj()
844 len = 0; in m_adj()
858 count = 0; in m_adj()
860 count += m->m_len; in m_adj()
861 if (m->m_next == (struct mbuf *)0) in m_adj()
863 m = m->m_next; in m_adj()
865 if (m->m_len >= len) { in m_adj()
866 m->m_len -= len; in m_adj()
872 if (count < 0) in m_adj()
873 count = 0; in m_adj()
879 m = mp; in m_adj()
880 if (m->m_flags & M_PKTHDR) in m_adj()
881 m->m_pkthdr.len = count; in m_adj()
882 for (; m; m = m->m_next) { in m_adj()
883 if (m->m_len >= count) { in m_adj()
884 m->m_len = count; in m_adj()
885 if (m->m_next != NULL) { in m_adj()
886 m_freem(m->m_next); in m_adj()
887 m->m_next = NULL; in m_adj()
891 count -= m->m_len; in m_adj()
902 if ((mp->m_flags & M_PKTHDR) != 0) { in m_adj_decap()
911 if ((rsstype & M_HASHTYPE_INNER) != 0) { in m_adj_decap()
930 struct mbuf *m; in m_pullup() local
934 KASSERT((n->m_flags & M_EXTPG) == 0, in m_pullup()
942 if ((n->m_flags & M_EXT) == 0 && in m_pullup()
946 m = n; in m_pullup()
948 len -= m->m_len; in m_pullup()
952 m = m_get(M_NOWAIT, n->m_type); in m_pullup()
953 if (m == NULL) in m_pullup()
956 m_move_pkthdr(m, n); in m_pullup()
958 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); in m_pullup()
961 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, in m_pullup()
964 m->m_len += count; in m_pullup()
971 } while (len > 0 && n); in m_pullup()
972 if (len > 0) { in m_pullup()
973 (void) m_free(m); in m_pullup()
976 m->m_next = n; in m_pullup()
977 return (m); in m_pullup()
991 struct mbuf *m; in m_copyup() local
996 m = m_get(M_NOWAIT, n->m_type); in m_copyup()
997 if (m == NULL) in m_copyup()
1000 m_move_pkthdr(m, n); in m_copyup()
1001 m->m_data += dstoff; in m_copyup()
1002 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); in m_copyup()
1005 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), in m_copyup()
1008 m->m_len += count; in m_copyup()
1015 } while (len > 0 && n); in m_copyup()
1016 if (len > 0) { in m_copyup()
1017 (void) m_free(m); in m_copyup()
1020 m->m_next = n; in m_copyup()
1021 return (m); in m_copyup()
1040 struct mbuf *m, *n; in m_split() local
1044 for (m = m0; m && len > m->m_len; m = m->m_next) in m_split()
1045 len -= m->m_len; in m_split()
1046 if (m == NULL) in m_split()
1048 remain = m->m_len - len; in m_split()
1049 if (m0->m_flags & M_PKTHDR && remain == 0) { in m_split()
1053 n->m_next = m->m_next; in m_split()
1054 m->m_next = NULL; in m_split()
1076 if (m->m_flags & (M_EXT | M_EXTPG)) in m_split()
1079 /* m can't be the lead packet */ in m_split()
1080 M_ALIGN(n, 0); in m_split()
1081 n->m_next = m_split(m, len, wait); in m_split()
1086 n->m_len = 0; in m_split()
1091 } else if (remain == 0) { in m_split()
1092 n = m->m_next; in m_split()
1093 m->m_next = NULL; in m_split()
1096 n = m_get(wait, m->m_type); in m_split()
1102 if (m->m_flags & (M_EXT | M_EXTPG)) { in m_split()
1103 n->m_data = m->m_data + len; in m_split()
1104 mb_dupcl(n, m); in m_split()
1106 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); in m_split()
1109 m->m_len = len; in m_split()
1110 n->m_next = m->m_next; in m_split()
1111 m->m_next = NULL; in m_split()
1124 struct mbuf *m, *n; in mc_split() local
1130 mlen = 0; in mc_split()
1132 STAILQ_FOREACH(m, &head->mc_q, m_stailq) { in mc_split()
1134 if (m->m_flags & M_EXT) in mc_split()
1135 mlen += m->m_ext.ext_size; in mc_split()
1136 if (len > m->m_len) in mc_split()
1137 len -= m->m_len; in mc_split()
1141 if (__predict_false(m == NULL)) { in mc_split()
1143 return (0); in mc_split()
1145 remain = m->m_len - len; in mc_split()
1146 if (remain > 0) { in mc_split()
1147 if (__predict_false((n = m_get(wait, m->m_type)) == NULL)) in mc_split()
1150 if (m->m_flags & M_EXT) { in mc_split()
1151 n->m_data = m->m_data + len; in mc_split()
1152 mb_dupcl(n, m); in mc_split()
1154 bcopy(mtod(m, char *) + len, mtod(n, char *), remain); in mc_split()
1158 STAILQ_FIRST(&tail->mc_q) = STAILQ_NEXT(m, m_stailq); in mc_split()
1162 if (remain > 0) { in mc_split()
1163 MPASS(n->m_len == 0); in mc_split()
1166 m->m_len -= remain; in mc_split()
1167 if (m->m_flags & M_EOR) { in mc_split()
1168 m->m_flags &= ~M_EOR; in mc_split()
1172 head->mc_q.stqh_last = &STAILQ_NEXT(m, m_stailq); in mc_split()
1173 STAILQ_NEXT(m, m_stailq) = NULL; in mc_split()
1177 return (0); in mc_split()
1189 struct mbuf *m; in m_devget() local
1193 if (off < 0 || off > MHLEN) in m_devget()
1196 while (totlen > 0) { in m_devget()
1199 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); in m_devget()
1202 m = m_gethdr(M_NOWAIT, MT_DATA); in m_devget()
1206 if (m && totlen + off + max_linkhdr <= MHLEN) { in m_devget()
1207 m->m_data += max_linkhdr; in m_devget()
1211 if (m == NULL) in m_devget()
1213 m->m_pkthdr.rcvif = ifp; in m_devget()
1214 m->m_pkthdr.len = totlen; in m_devget()
1217 m = m_getcl(M_NOWAIT, MT_DATA, 0); in m_devget()
1220 m = m_get(M_NOWAIT, MT_DATA); in m_devget()
1223 if (m == NULL) { in m_devget()
1229 m->m_data += off; in m_devget()
1231 off = 0; in m_devget()
1233 m->m_len = len = min(totlen, len); in m_devget()
1235 copy(buf, mtod(m, caddr_t), (u_int)len); in m_devget()
1237 bcopy(buf, mtod(m, caddr_t), (u_int)len); in m_devget()
1239 *mp = m; in m_devget()
1240 mp = &m->m_next; in m_devget()
1247 m_copytounmapped(const struct mbuf *m, int off, int len, c_caddr_t cp) in m_copytounmapped() argument
1253 KASSERT(off >= 0, ("m_copytounmapped: negative off %d", off)); in m_copytounmapped()
1254 KASSERT(len >= 0, ("m_copytounmapped: negative len %d", len)); in m_copytounmapped()
1255 KASSERT(off < m->m_len, ("m_copytounmapped: len exceeds mbuf length")); in m_copytounmapped()
1262 uio.uio_offset = 0; in m_copytounmapped()
1264 error = m_unmapped_uiomove(m, off, &uio, len); in m_copytounmapped()
1265 KASSERT(error == 0, ("m_unmapped_uiomove failed: off %d, len %d", off, in m_copytounmapped()
1278 struct mbuf *m = m0, *n; in m_copyback() local
1279 int totlen = 0; in m_copyback()
1283 while (off > (mlen = m->m_len)) { in m_copyback()
1286 if (m->m_next == NULL) { in m_copyback()
1287 n = m_get(M_NOWAIT, m->m_type); in m_copyback()
1292 m->m_next = n; in m_copyback()
1294 m = m->m_next; in m_copyback()
1296 while (len > 0) { in m_copyback()
1297 if (m->m_next == NULL && (len > m->m_len - off)) { in m_copyback()
1298 m->m_len += min(len - (m->m_len - off), in m_copyback()
1299 M_TRAILINGSPACE(m)); in m_copyback()
1301 mlen = min (m->m_len - off, len); in m_copyback()
1302 if ((m->m_flags & M_EXTPG) != 0) in m_copyback()
1303 m_copytounmapped(m, off, mlen, cp); in m_copyback()
1305 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen); in m_copyback()
1309 off = 0; in m_copyback()
1311 if (len == 0) in m_copyback()
1313 if (m->m_next == NULL) { in m_copyback()
1314 n = m_get(M_NOWAIT, m->m_type); in m_copyback()
1318 m->m_next = n; in m_copyback()
1320 m = m->m_next; in m_copyback()
1322 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) in m_copyback()
1323 m->m_pkthdr.len = totlen; in m_copyback()
1331 * Return 1 if able to complete the job; otherwise 0.
1336 struct mbuf *m, *n; in m_append() local
1339 for (m = m0; m->m_next != NULL; m = m->m_next) in m_append()
1342 space = M_TRAILINGSPACE(m); in m_append()
1343 if (space > 0) { in m_append()
1349 bcopy(cp, mtod(m, caddr_t) + m->m_len, space); in m_append()
1350 m->m_len += space; in m_append()
1353 while (remainder > 0) { in m_append()
1358 n = m_get(M_NOWAIT, m->m_type); in m_append()
1364 m->m_next = n; in m_append()
1365 m = n; in m_append()
1369 return (remainder == 0); in m_append()
1373 m_apply_extpg_one(struct mbuf *m, int off, int len, in m_apply_extpg_one() argument
1382 off += mtod(m, vm_offset_t); in m_apply_extpg_one()
1383 if (off < m->m_epg_hdrlen) { in m_apply_extpg_one()
1384 count = min(m->m_epg_hdrlen - off, len); in m_apply_extpg_one()
1385 rval = f(arg, m->m_epg_hdr + off, count); in m_apply_extpg_one()
1389 off = 0; in m_apply_extpg_one()
1391 off -= m->m_epg_hdrlen; in m_apply_extpg_one()
1392 pgoff = m->m_epg_1st_off; in m_apply_extpg_one()
1393 for (i = 0; i < m->m_epg_npgs && len > 0; i++) { in m_apply_extpg_one()
1394 pglen = m_epg_pagelen(m, i, pgoff); in m_apply_extpg_one()
1397 p = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] + pgoff + off); in m_apply_extpg_one()
1402 off = 0; in m_apply_extpg_one()
1405 pgoff = 0; in m_apply_extpg_one()
1407 if (len > 0) { in m_apply_extpg_one()
1408 KASSERT(off < m->m_epg_trllen, in m_apply_extpg_one()
1410 KASSERT(len <= m->m_epg_trllen - off, in m_apply_extpg_one()
1412 return (f(arg, m->m_epg_trail + off, len)); in m_apply_extpg_one()
1414 return (0); in m_apply_extpg_one()
1419 m_apply_one(struct mbuf *m, int off, int len, in m_apply_one() argument
1422 if ((m->m_flags & M_EXTPG) != 0) in m_apply_one()
1423 return (m_apply_extpg_one(m, off, len, f, arg)); in m_apply_one()
1425 return (f(arg, mtod(m, caddr_t) + off, len)); in m_apply_one()
1433 m_apply(struct mbuf *m, int off, int len, in m_apply() argument
1439 KASSERT(off >= 0, ("m_apply, negative off %d", off)); in m_apply()
1440 KASSERT(len >= 0, ("m_apply, negative len %d", len)); in m_apply()
1441 while (off > 0) { in m_apply()
1442 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain " in m_apply()
1444 if (off < m->m_len) in m_apply()
1446 off -= m->m_len; in m_apply()
1447 m = m->m_next; in m_apply()
1449 while (len > 0) { in m_apply()
1450 KASSERT(m != NULL, ("m_apply, length > size of mbuf chain " in m_apply()
1452 count = min(m->m_len - off, len); in m_apply()
1453 rval = m_apply_one(m, off, count, f, arg); in m_apply()
1457 off = 0; in m_apply()
1458 m = m->m_next; in m_apply()
1460 return (0); in m_apply()
1467 m_getptr(struct mbuf *m, int loc, int *off) in m_getptr() argument
1470 while (loc >= 0) { in m_getptr()
1472 if (m->m_len > loc) { in m_getptr()
1474 return (m); in m_getptr()
1476 loc -= m->m_len; in m_getptr()
1477 if (m->m_next == NULL) { in m_getptr()
1478 if (loc == 0) { in m_getptr()
1480 *off = m->m_len; in m_getptr()
1481 return (m); in m_getptr()
1485 m = m->m_next; in m_getptr()
1492 m_print(const struct mbuf *m, int maxlen) in m_print() argument
1498 if (m == NULL) { in m_print()
1499 printf("mbuf: %p\n", m); in m_print()
1503 if (m->m_flags & M_PKTHDR) in m_print()
1504 len = m->m_pkthdr.len; in m_print()
1507 m2 = m; in m_print()
1522 if (len > 0) in m_print()
1540 struct mbuf *m; in m_length() local
1543 len = 0; in m_length()
1544 for (m = m0; m != NULL; m = m->m_next) { in m_length()
1545 len += m->m_len; in m_length()
1546 if (m->m_next == NULL) in m_length()
1550 *last = m; in m_length()
1569 int progress = 0, length; in m_defrag()
1579 int temp = arc4random() & 0xff; in m_defrag()
1580 if (temp == 0xba) in m_defrag()
1593 if (m_dup_pkthdr(m_final, m0, how) == 0) in m_defrag()
1605 m_new = m_getcl(how, MT_DATA, 0); in m_defrag()
1648 frags_per_mbuf(struct mbuf *m) in frags_per_mbuf() argument
1652 if ((m->m_flags & M_EXTPG) == 0) in frags_per_mbuf()
1662 frags = 0; in frags_per_mbuf()
1663 if (m->m_epg_hdrlen != 0) in frags_per_mbuf()
1665 frags += m->m_epg_npgs; in frags_per_mbuf()
1666 if (m->m_epg_trllen != 0) in frags_per_mbuf()
1684 struct mbuf *m, *n, *n2, **prev; in m_collapse() local
1690 curfrags = 0; in m_collapse()
1691 for (m = m0; m != NULL; m = m->m_next) in m_collapse()
1692 curfrags += frags_per_mbuf(m); in m_collapse()
1699 m = m0; in m_collapse()
1702 n = m->m_next; in m_collapse()
1705 if (M_WRITABLE(m) && in m_collapse()
1706 n->m_len < M_TRAILINGSPACE(m)) { in m_collapse()
1707 m_copydata(n, 0, n->m_len, in m_collapse()
1708 mtod(m, char *) + m->m_len); in m_collapse()
1709 m->m_len += n->m_len; in m_collapse()
1710 m->m_next = n->m_next; in m_collapse()
1716 m = n; in m_collapse()
1727 m = m_getcl(how, MT_DATA, 0); in m_collapse()
1728 if (m == NULL) in m_collapse()
1730 m_copydata(n, 0, n->m_len, mtod(m, char *)); in m_collapse()
1731 m_copydata(n2, 0, n2->m_len, in m_collapse()
1732 mtod(m, char *) + n->m_len); in m_collapse()
1733 m->m_len = n->m_len + n2->m_len; in m_collapse()
1734 m->m_next = n2->m_next; in m_collapse()
1735 *prev = m; in m_collapse()
1773 * 0 no fragmentation will occur
1774 * > 0 each fragment will be of the specified length
1783 int divisor = 255, progress = 0, fraglen; in m_fragment()
1788 if (length == 0 || length < -2) in m_fragment()
1792 if (length < 0 && divisor > MCLBYTES) in m_fragment()
1796 if (length > 0) in m_fragment()
1805 if (m_dup_pkthdr(m_first, m0, how) == 0) in m_fragment()
1816 if (progress != 0) { in m_fragment()
1817 struct mbuf *m_new = m_getcl(how, MT_DATA, 0); in m_fragment()
1847 mb_free_mext_pgs(struct mbuf *m) in mb_free_mext_pgs() argument
1851 M_ASSERTEXTPG(m); in mb_free_mext_pgs()
1852 for (int i = 0; i < m->m_epg_npgs; i++) { in mb_free_mext_pgs()
1853 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]); in mb_free_mext_pgs()
1862 struct mbuf *m, *mb, *prev; in m_uiotombuf_nomap() local
1868 MPASS((flags & M_PKTHDR) == 0); in m_uiotombuf_nomap()
1869 MPASS((how & M_ZERO) == 0); in m_uiotombuf_nomap()
1875 if (len > 0) in m_uiotombuf_nomap()
1880 if (maxseg == 0) in m_uiotombuf_nomap()
1889 if (__predict_false(total == 0)) { in m_uiotombuf_nomap()
1890 mb = mb_alloc_ext_pgs(how, mb_free_mext_pgs, 0); in m_uiotombuf_nomap()
1900 m = NULL; in m_uiotombuf_nomap()
1901 while (total > 0) { in m_uiotombuf_nomap()
1902 mb = mb_alloc_ext_pgs(how, mb_free_mext_pgs, 0); in m_uiotombuf_nomap()
1905 if (m == NULL) in m_uiotombuf_nomap()
1906 m = mb; in m_uiotombuf_nomap()
1912 for (i = 0; needed > 0; i++, needed -= PAGE_SIZE) { in m_uiotombuf_nomap()
1929 error = uiomove_fromphys(pg_array, 0, length, uio); in m_uiotombuf_nomap()
1930 if (error != 0) in m_uiotombuf_nomap()
1935 m->m_pkthdr.len += length; in m_uiotombuf_nomap()
1937 return (m); in m_uiotombuf_nomap()
1940 m_freem(m); in m_uiotombuf_nomap()
1956 } else if (__predict_false(uio->uio_resid == 0)) { in m_uiotombuf()
1957 struct mbuf *m; in m_uiotombuf() local
1964 m = m_gethdr(how, MT_DATA); in m_uiotombuf()
1965 m->m_pkthdr.memlen = MSIZE; in m_uiotombuf()
1967 m = m_get(how, MT_DATA); in m_uiotombuf()
1968 if (m != NULL) in m_uiotombuf()
1969 m->m_data += lspace; in m_uiotombuf()
1970 return (m); in m_uiotombuf()
1976 if (__predict_true(error == 0)) { in m_uiotombuf()
1990 * @param length Limit copyout length. If 0 entire uio_resid is copied.
2004 MPASS(uio->uio_resid >= 0); in mc_uiotomc()
2006 if (length > 0) { in mc_uiotomc()
2017 if (__predict_false(total + lspace == 0)) { in mc_uiotomc()
2019 return (0); in mc_uiotomc()
2042 return (0); in mc_uiotomc()
2049 m_unmapped_uiomove(const struct mbuf *m, int m_off, struct uio *uio, int len) in m_unmapped_uiomove() argument
2054 M_ASSERTEXTPG(m); in m_unmapped_uiomove()
2055 error = 0; in m_unmapped_uiomove()
2058 off = mtod(m, vm_offset_t); in m_unmapped_uiomove()
2061 if (m->m_epg_hdrlen != 0) { in m_unmapped_uiomove()
2062 if (off >= m->m_epg_hdrlen) { in m_unmapped_uiomove()
2063 off -= m->m_epg_hdrlen; in m_unmapped_uiomove()
2065 seglen = m->m_epg_hdrlen - off; in m_unmapped_uiomove()
2068 off = 0; in m_unmapped_uiomove()
2071 &m->m_epg_hdr[segoff]), seglen, uio); in m_unmapped_uiomove()
2074 pgoff = m->m_epg_1st_off; in m_unmapped_uiomove()
2075 for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) { in m_unmapped_uiomove()
2076 pglen = m_epg_pagelen(m, i, pgoff); in m_unmapped_uiomove()
2079 pgoff = 0; in m_unmapped_uiomove()
2084 off = 0; in m_unmapped_uiomove()
2087 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]); in m_unmapped_uiomove()
2089 pgoff = 0; in m_unmapped_uiomove()
2091 if (len != 0 && error == 0) { in m_unmapped_uiomove()
2092 KASSERT((off + len) <= m->m_epg_trllen, in m_unmapped_uiomove()
2094 m->m_epg_trllen, m_off)); in m_unmapped_uiomove()
2095 error = uiomove(__DECONST(void *, &m->m_epg_trail[off]), in m_unmapped_uiomove()
2105 m_mbuftouio(struct uio *uio, const struct mbuf *m, int len) in m_mbuftouio() argument
2108 int progress = 0; in m_mbuftouio()
2110 if (len > 0) in m_mbuftouio()
2116 for (; m != NULL; m = m->m_next) { in m_mbuftouio()
2117 length = min(m->m_len, total - progress); in m_mbuftouio()
2119 if ((m->m_flags & M_EXTPG) != 0) in m_mbuftouio()
2120 error = m_unmapped_uiomove(m, 0, uio, length); in m_mbuftouio()
2122 error = uiomove(mtod(m, void *), length, uio); in m_mbuftouio()
2129 return (0); in m_mbuftouio()
2144 struct mbuf *m, *mprev; in m_unshare() local
2149 for (m = m0; m != NULL; m = mprev->m_next) { in m_unshare()
2162 if ((m->m_flags & M_EXT) == 0) { in m_unshare()
2164 m->m_len <= M_TRAILINGSPACE(mprev)) { in m_unshare()
2167 mtod(m, caddr_t), m->m_len); in m_unshare()
2168 mprev->m_len += m->m_len; in m_unshare()
2169 mprev->m_next = m->m_next; /* unlink from chain */ in m_unshare()
2170 m_free(m); /* reclaim mbuf */ in m_unshare()
2172 mprev = m; in m_unshare()
2179 if (M_WRITABLE(m)) { in m_unshare()
2180 mprev = m; in m_unshare()
2190 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags)); in m_unshare()
2193 m->m_len <= M_TRAILINGSPACE(mprev)) { in m_unshare()
2196 mtod(m, caddr_t), m->m_len); in m_unshare()
2197 mprev->m_len += m->m_len; in m_unshare()
2198 mprev->m_next = m->m_next; /* unlink from chain */ in m_unshare()
2199 m_free(m); /* reclaim mbuf */ in m_unshare()
2211 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS); in m_unshare()
2216 if (m->m_flags & M_PKTHDR) { in m_unshare()
2217 KASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR", in m_unshare()
2218 __func__, m0, m)); in m_unshare()
2219 m_move_pkthdr(n, m); in m_unshare()
2221 len = m->m_len; in m_unshare()
2222 off = 0; in m_unshare()
2227 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc); in m_unshare()
2232 #if 0 in m_unshare()
2237 if (len <= 0) in m_unshare()
2241 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS); in m_unshare()
2248 n->m_next = m->m_next; in m_unshare()
2253 m_free(m); /* release old mbuf */ in m_unshare()
2269 m_profile(struct mbuf *m) in m_profile() argument
2271 int segments = 0; in m_profile()
2272 int used = 0; in m_profile()
2273 int wasted = 0; in m_profile()
2275 while (m) { in m_profile()
2277 used += m->m_len; in m_profile()
2278 if (m->m_flags & M_EXT) { in m_profile()
2279 wasted += MHLEN - sizeof(m->m_ext) + in m_profile()
2280 m->m_ext.ext_size - m->m_len; in m_profile()
2282 if (m->m_flags & M_PKTHDR) in m_profile()
2283 wasted += MHLEN - m->m_len; in m_profile()
2285 wasted += MLEN - m->m_len; in m_profile()
2287 m = m->m_next; in m_profile()
2313 p = &mbprof.wasted[0]; in mbprof_handler()
2318 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], in mbprof_handler()
2325 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], in mbprof_handler()
2328 p = &mbprof.used[0]; in mbprof_handler()
2333 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], in mbprof_handler()
2340 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], in mbprof_handler()
2343 p = &mbprof.segments[0]; in mbprof_handler()
2348 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], in mbprof_handler()
2355 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], in mbprof_handler()
2369 clear = 0; in mbprof_clr_handler()
2370 error = sysctl_handle_int(oidp, &clear, 0, req); in mbprof_clr_handler()
2382 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
2387 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,