| /linux/tools/testing/selftests/ring-buffer/ |
| H A D | map_test.c | 217 unsigned long meta_len, data_len; in TEST_F() local 220 meta_len = desc->meta->meta_page_size; in TEST_F() 225 desc->cpu_fd, meta_len); in TEST_F() 232 desc->cpu_fd, meta_len); in TEST_F() 237 meta_len += desc->meta->subbuf_size * 2; in TEST_F() 239 desc->cpu_fd, meta_len); in TEST_F()
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | type_cast.c | 21 unsigned int meta_len, frag0_len, kskb_len, kskb2_len; variable 50 meta_len = shared_info->meta_len; in md_skb()
|
| /linux/drivers/gpio/ |
| H A D | gpio-sloppy-logic-analyzer.c | 230 unsigned int i, meta_len = 0; in gpio_la_poll_probe() local 274 new_meta = devm_krealloc(dev, meta, meta_len + add_len, GFP_KERNEL); in gpio_la_poll_probe() 279 meta_len += snprintf(meta + meta_len, add_len, "probe%02u=%s\n", in gpio_la_poll_probe() 287 priv->meta.size = meta_len; in gpio_la_poll_probe()
|
| /linux/drivers/nvme/host/ |
| H A D | ioctl.c | 116 unsigned bufflen, void __user *meta_buffer, unsigned meta_len, in nvme_map_user_request() argument 124 bool has_metadata = meta_buffer && meta_len; in nvme_map_user_request() 149 ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len); in nvme_map_user_request() 164 void __user *meta_buffer, unsigned meta_len, in nvme_submit_user_cmd() argument 181 meta_len, NULL, flags); in nvme_submit_user_cmd() 210 unsigned length, meta_len; in nvme_submit_io() local 237 meta_len = 0; in nvme_submit_io() 240 meta_len = (io.nblocks + 1) * ns->head->ms; in nvme_submit_io() 245 length += meta_len; in nvme_submit_io() 246 meta_len = 0; in nvme_submit_io() [all …]
|
| /linux/Documentation/trace/ |
| H A D | ring-buffer-map.rst | 68 unsigned long meta_len, data_len; 81 meta_len = meta->meta_page_size; 89 data = mmap(NULL, data_len, PROT_READ, MAP_SHARED, fd, meta_len); 102 munmap(meta, meta_len);
|
| /linux/net/psample/ |
| H A D | psample.c | 376 int meta_len; in psample_sample_packet() local 384 meta_len = (in_ifindex ? nla_total_size(sizeof(u16)) : 0) + in psample_sample_packet() 403 meta_len += psample_tunnel_meta_len(tun_info); in psample_sample_packet() 407 if (meta_len + nla_total_size(data_len) > PSAMPLE_MAX_PACKET_SIZE) in psample_sample_packet() 408 data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN in psample_sample_packet() 411 nl_skb = genlmsg_new(meta_len + nla_total_size(data_len), GFP_ATOMIC); in psample_sample_packet()
|
| /linux/drivers/net/ethernet/netronome/nfp/nfd3/ |
| H A D | nfd3.h | 93 void *data, void *pkt, unsigned int pkt_len, int meta_len);
|
| /linux/include/linux/ |
| H A D | skbuff.h | 595 __u8 meta_len; member 4497 return skb_shinfo(skb)->meta_len; in skb_metadata_len() 4507 u8 meta_len) in __skb_metadata_differs() argument 4518 switch (meta_len) { in __skb_metadata_differs() 4539 return memcmp(a - meta_len, b - meta_len, meta_len); in __skb_metadata_differs() 4557 static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len) in skb_metadata_set() argument 4559 skb_shinfo(skb)->meta_len = meta_len; in skb_metadata_set() 4585 const u8 meta_len = skb_metadata_len(skb); in skb_data_move() local 4588 if (!len || (!n && !meta_len)) in skb_data_move() 4591 if (!meta_len) in skb_data_move() [all …]
|
| /linux/net/xdp/ |
| H A D | xsk.c | 238 u32 from_len, meta_len, rem, num_desc; in __xsk_rcv() local 244 meta_len = xdp->data - copy_from; in __xsk_rcv() 245 rem = len + meta_len; in __xsk_rcv() 255 memcpy(xsk_xdp->data - meta_len, copy_from, rem); in __xsk_rcv() 285 u32 to_len = frame_size + meta_len; in __xsk_rcv() 289 copy_to = xsk_xdp->data - meta_len; in __xsk_rcv() 295 __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0); in __xsk_rcv() 296 meta_len = 0; in __xsk_rcv()
|
| /linux/tools/testing/selftests/bpf/prog_tests/ |
| H A D | type_cast.c | 69 ASSERT_EQ(skel->bss->meta_len, 0, "skb meta_len"); in test_tc()
|
| /linux/drivers/net/ethernet/netronome/nfp/flower/ |
| H A D | offload.c | 98 u32 meta_len, key_len, mask_len, act_len, tot_len; in nfp_flower_xmit_flow() local 102 meta_len = sizeof(struct nfp_fl_rule_metadata); in nfp_flower_xmit_flow() 107 tot_len = meta_len + key_len + mask_len + act_len; in nfp_flower_xmit_flow() 121 memcpy(msg, &nfp_flow->meta, meta_len); in nfp_flower_xmit_flow() 122 memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len); in nfp_flower_xmit_flow() 123 memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len); in nfp_flower_xmit_flow() 124 memcpy(&msg[meta_len + key_len + mask_len], in nfp_flower_xmit_flow()
|
| /linux/net/bpf/ |
| H A D | test_run.c | 126 size_t frm_len, meta_len; in xdp_test_run_init_page() local 132 meta_len = orig_ctx->data - orig_ctx->data_meta; in xdp_test_run_init_page() 133 headroom -= meta_len; in xdp_test_run_init_page() 142 new_ctx->data = new_ctx->data_meta + meta_len; in xdp_test_run_init_page()
|
| /linux/net/core/ |
| H A D | skbuff.c | 6241 int mac_len, meta_len; in skb_reorder_vlan_header() local 6255 meta_len = skb_metadata_len(skb); in skb_reorder_vlan_header() 6256 if (meta_len) { in skb_reorder_vlan_header() 6257 meta = skb_metadata_end(skb) - meta_len; in skb_reorder_vlan_header() 6258 memmove(meta + VLAN_HLEN, meta, meta_len); in skb_reorder_vlan_header()
|
| H A D | filter.c | 3336 const u8 meta_len = skb_metadata_len(skb); in bpf_skb_proto_4_to_6() local 3340 ret = skb_cow(skb, meta_len + len_diff); in bpf_skb_proto_4_to_6() 3500 const u8 meta_len = skb_metadata_len(skb); in bpf_skb_net_grow() local 3511 ret = skb_cow_head(skb, meta_len + len_diff); in bpf_skb_net_grow() 3885 const u8 meta_len = skb_metadata_len(skb); in __bpf_skb_change_head() local 3895 ret = skb_cow(skb, meta_len + head_room); in __bpf_skb_change_head()
|
| /linux/drivers/net/ethernet/xilinx/ |
| H A D | xilinx_axienet_main.c | 1163 size_t meta_len, meta_max_len, rx_len; in axienet_dma_rx_cb() local 1171 app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len, in axienet_dma_rx_cb()
|