1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* xfrm_trace_iptfs.h 3 * 4 * August 12 2023, Christian Hopps <chopps@labn.net> 5 * 6 * Copyright (c) 2023, LabN Consulting, L.L.C. 7 */ 8 9 #undef TRACE_SYSTEM 10 #define TRACE_SYSTEM iptfs 11 12 #if !defined(_TRACE_IPTFS_H) || defined(TRACE_HEADER_MULTI_READ) 13 #define _TRACE_IPTFS_H 14 15 #include <linux/kernel.h> 16 #include <linux/skbuff.h> 17 #include <linux/tracepoint.h> 18 #include <net/ip.h> 19 20 struct xfrm_iptfs_data; 21 22 TRACE_EVENT(iptfs_egress_recv, 23 TP_PROTO(struct sk_buff *skb, struct xfrm_iptfs_data *xtfs, u16 blkoff), 24 TP_ARGS(skb, xtfs, blkoff), 25 TP_STRUCT__entry(__field(struct sk_buff *, skb) 26 __field(void *, head) 27 __field(void *, head_pg_addr) 28 __field(void *, pg0addr) 29 __field(u32, skb_len) 30 __field(u32, data_len) 31 __field(u32, headroom) 32 __field(u32, tailroom) 33 __field(u32, tail) 34 __field(u32, end) 35 __field(u32, pg0off) 36 __field(u8, head_frag) 37 __field(u8, frag_list) 38 __field(u8, nr_frags) 39 __field(u16, blkoff)), 40 TP_fast_assign(__entry->skb = skb; 41 __entry->head = skb->head; 42 __entry->skb_len = skb->len; 43 __entry->data_len = skb->data_len; 44 __entry->headroom = skb_headroom(skb); 45 __entry->tailroom = skb_tailroom(skb); 46 __entry->tail = (u32)skb->tail; 47 __entry->end = (u32)skb->end; 48 __entry->head_frag = skb->head_frag; 49 __entry->frag_list = (bool)skb_shinfo(skb)->frag_list; 50 __entry->nr_frags = skb_shinfo(skb)->nr_frags; 51 __entry->blkoff = blkoff; 52 __entry->head_pg_addr = page_address(virt_to_head_page(skb->head)); 53 __entry->pg0addr = (__entry->nr_frags 54 ? page_address(netmem_to_page(skb_shinfo(skb)->frags[0].netmem)) 55 : NULL); 56 __entry->pg0off = (__entry->nr_frags 57 ? skb_shinfo(skb)->frags[0].offset 58 : 0); 59 ), 60 TP_printk("EGRESS: skb=%p len=%u data_len=%u headroom=%u head_frag=%u frag_list=%u nr_frags=%u blkoff=%u\n\t\ttailroom=%u tail=%u end=%u head=%p hdpgaddr=%p pg0->addr=%p pg0->data=%p pg0->off=%u", 61 __entry->skb, __entry->skb_len, __entry->data_len, __entry->headroom, 62 __entry->head_frag, __entry->frag_list, __entry->nr_frags, __entry->blkoff, 63 __entry->tailroom, __entry->tail, __entry->end, __entry->head, 64 __entry->head_pg_addr, __entry->pg0addr, __entry->pg0addr + __entry->pg0off, 65 __entry->pg0off) 66 ) 67 68 DECLARE_EVENT_CLASS(iptfs_ingress_preq_event, 69 TP_PROTO(struct sk_buff *skb, struct xfrm_iptfs_data *xtfs, 70 u32 pmtu, u8 was_gso), 71 TP_ARGS(skb, xtfs, pmtu, was_gso), 72 TP_STRUCT__entry(__field(struct sk_buff *, skb) 73 __field(u32, skb_len) 74 __field(u32, data_len) 75 __field(u32, pmtu) 76 __field(u32, queue_size) 77 __field(u32, proto_seq) 78 __field(u8, proto) 79 __field(u8, was_gso) 80 ), 81 TP_fast_assign(__entry->skb = skb; 82 __entry->skb_len = skb->len; 83 __entry->data_len = skb->data_len; 84 __entry->queue_size = 85 xtfs->cfg.max_queue_size - xtfs->queue_size; 86 __entry->proto = __trace_ip_proto(ip_hdr(skb)); 87 __entry->proto_seq = __trace_ip_proto_seq(ip_hdr(skb)); 88 __entry->pmtu = pmtu; 89 __entry->was_gso = was_gso; 90 ), 91 TP_printk("INGRPREQ: skb=%p len=%u data_len=%u qsize=%u proto=%u proto_seq=%u pmtu=%u was_gso=%u", 92 __entry->skb, __entry->skb_len, __entry->data_len, 93 __entry->queue_size, __entry->proto, __entry->proto_seq, 94 __entry->pmtu, __entry->was_gso)); 95 96 DEFINE_EVENT(iptfs_ingress_preq_event, iptfs_enqueue, 97 TP_PROTO(struct sk_buff *skb, struct xfrm_iptfs_data *xtfs, u32 pmtu, u8 was_gso), 98 TP_ARGS(skb, xtfs, pmtu, was_gso)); 99 100 DEFINE_EVENT(iptfs_ingress_preq_event, iptfs_no_queue_space, 101 TP_PROTO(struct sk_buff *skb, struct xfrm_iptfs_data *xtfs, u32 pmtu, u8 was_gso), 102 TP_ARGS(skb, xtfs, pmtu, was_gso)); 103 104 DEFINE_EVENT(iptfs_ingress_preq_event, iptfs_too_big, 105 TP_PROTO(struct sk_buff *skb, struct xfrm_iptfs_data *xtfs, u32 pmtu, u8 was_gso), 106 TP_ARGS(skb, xtfs, pmtu, was_gso)); 107 108 DECLARE_EVENT_CLASS(iptfs_ingress_postq_event, 109 TP_PROTO(struct sk_buff *skb, u32 mtu, u16 blkoff, struct iphdr *iph), 110 TP_ARGS(skb, mtu, blkoff, iph), 111 TP_STRUCT__entry(__field(struct sk_buff *, skb) 112 __field(u32, skb_len) 113 __field(u32, data_len) 114 __field(u32, mtu) 115 __field(u32, proto_seq) 116 __field(u16, blkoff) 117 __field(u8, proto)), 118 TP_fast_assign(__entry->skb = skb; 119 __entry->skb_len = skb->len; 120 __entry->data_len = skb->data_len; 121 __entry->mtu = mtu; 122 __entry->blkoff = blkoff; 123 __entry->proto = iph ? __trace_ip_proto(iph) : 0; 124 __entry->proto_seq = iph ? __trace_ip_proto_seq(iph) : 0; 125 ), 126 TP_printk("INGRPSTQ: skb=%p len=%u data_len=%u mtu=%u blkoff=%u proto=%u proto_seq=%u", 127 __entry->skb, __entry->skb_len, __entry->data_len, __entry->mtu, 128 __entry->blkoff, __entry->proto, __entry->proto_seq)); 129 130 DEFINE_EVENT(iptfs_ingress_postq_event, iptfs_first_dequeue, 131 TP_PROTO(struct sk_buff *skb, u32 mtu, u16 blkoff, 132 struct iphdr *iph), 133 TP_ARGS(skb, mtu, blkoff, iph)); 134 135 DEFINE_EVENT(iptfs_ingress_postq_event, iptfs_first_fragmenting, 136 TP_PROTO(struct sk_buff *skb, u32 mtu, u16 blkoff, 137 struct iphdr *iph), 138 TP_ARGS(skb, mtu, blkoff, iph)); 139 140 DEFINE_EVENT(iptfs_ingress_postq_event, iptfs_first_final_fragment, 141 TP_PROTO(struct sk_buff *skb, u32 mtu, u16 blkoff, 142 struct iphdr *iph), 143 TP_ARGS(skb, mtu, blkoff, iph)); 144 145 DEFINE_EVENT(iptfs_ingress_postq_event, iptfs_first_toobig, 146 TP_PROTO(struct sk_buff *skb, u32 mtu, u16 blkoff, 147 struct iphdr *iph), 148 TP_ARGS(skb, mtu, blkoff, iph)); 149 150 TRACE_EVENT(iptfs_ingress_nth_peek, 151 TP_PROTO(struct sk_buff *skb, u32 remaining), 152 TP_ARGS(skb, remaining), 153 TP_STRUCT__entry(__field(struct sk_buff *, skb) 154 __field(u32, skb_len) 155 __field(u32, remaining)), 156 TP_fast_assign(__entry->skb = skb; 157 __entry->skb_len = skb->len; 158 __entry->remaining = remaining; 159 ), 160 TP_printk("INGRPSTQ: NTHPEEK: skb=%p len=%u remaining=%u", 161 __entry->skb, __entry->skb_len, __entry->remaining)); 162 163 TRACE_EVENT(iptfs_ingress_nth_add, TP_PROTO(struct sk_buff *skb, u8 share_ok), 164 TP_ARGS(skb, share_ok), 165 TP_STRUCT__entry(__field(struct sk_buff *, skb) 166 __field(u32, skb_len) 167 __field(u32, data_len) 168 __field(u8, share_ok) 169 __field(u8, head_frag) 170 __field(u8, pp_recycle) 171 __field(u8, cloned) 172 __field(u8, shared) 173 __field(u8, nr_frags) 174 __field(u8, frag_list) 175 ), 176 TP_fast_assign(__entry->skb = skb; 177 __entry->skb_len = skb->len; 178 __entry->data_len = skb->data_len; 179 __entry->share_ok = share_ok; 180 __entry->head_frag = skb->head_frag; 181 __entry->pp_recycle = skb->pp_recycle; 182 __entry->cloned = skb_cloned(skb); 183 __entry->shared = skb_shared(skb); 184 __entry->nr_frags = skb_shinfo(skb)->nr_frags; 185 __entry->frag_list = (bool)skb_shinfo(skb)->frag_list; 186 ), 187 TP_printk("INGRPSTQ: NTHADD: skb=%p len=%u data_len=%u share_ok=%u head_frag=%u pp_recycle=%u cloned=%u shared=%u nr_frags=%u frag_list=%u", 188 __entry->skb, __entry->skb_len, __entry->data_len, __entry->share_ok, 189 __entry->head_frag, __entry->pp_recycle, __entry->cloned, __entry->shared, 190 __entry->nr_frags, __entry->frag_list)); 191 192 DECLARE_EVENT_CLASS(iptfs_timer_event, 193 TP_PROTO(struct xfrm_iptfs_data *xtfs, u64 time_val), 194 TP_ARGS(xtfs, time_val), 195 TP_STRUCT__entry(__field(u64, time_val) 196 __field(u64, set_time)), 197 TP_fast_assign(__entry->time_val = time_val; 198 __entry->set_time = xtfs->iptfs_settime; 199 ), 200 TP_printk("TIMER: set_time=%llu time_val=%llu", 201 __entry->set_time, __entry->time_val)); 202 203 DEFINE_EVENT(iptfs_timer_event, iptfs_timer_start, 204 TP_PROTO(struct xfrm_iptfs_data *xtfs, u64 time_val), 205 TP_ARGS(xtfs, time_val)); 206 207 DEFINE_EVENT(iptfs_timer_event, iptfs_timer_expire, 208 TP_PROTO(struct xfrm_iptfs_data *xtfs, u64 time_val), 209 TP_ARGS(xtfs, time_val)); 210 211 #endif /* _TRACE_IPTFS_H */ 212 213 /* This part must be outside protection */ 214 #undef TRACE_INCLUDE_PATH 215 #define TRACE_INCLUDE_PATH ../../net/xfrm 216 #undef TRACE_INCLUDE_FILE 217 #define TRACE_INCLUDE_FILE trace_iptfs 218 #include <trace/define_trace.h> 219