xref: /linux/net/packet/internal.h (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __PACKET_INTERNAL_H__
3 #define __PACKET_INTERNAL_H__
4 
5 #include <linux/refcount.h>
6 
7 struct packet_mclist {
8 	struct packet_mclist	*next;
9 	int			ifindex;
10 	int			count;
11 	unsigned short		type;
12 	unsigned short		alen;
13 	unsigned char		addr[MAX_ADDR_LEN];
14 };
15 
16 /* kbdq - kernel block descriptor queue */
17 struct tpacket_kbdq_core {
18 	struct pgv	*pkbdq;
19 	unsigned int	feature_req_word;
20 	unsigned int	hdrlen;
21 	unsigned char	reset_pending_on_curr_blk;
22 	unsigned char   delete_blk_timer;
23 	unsigned short	kactive_blk_num;
24 	unsigned short	blk_sizeof_priv;
25 
26 	/* last_kactive_blk_num:
27 	 * trick to see if user-space has caught up
28 	 * in order to avoid refreshing timer when every single pkt arrives.
29 	 */
30 	unsigned short	last_kactive_blk_num;
31 
32 	char		*pkblk_start;
33 	char		*pkblk_end;
34 	int		kblk_size;
35 	unsigned int	max_frame_len;
36 	unsigned int	knum_blocks;
37 	uint64_t	knxt_seq_num;
38 	char		*prev;
39 	char		*nxt_offset;
40 	struct sk_buff	*skb;
41 
42 	rwlock_t	blk_fill_in_prog_lock;
43 
44 	/* Default is set to 8ms */
45 #define DEFAULT_PRB_RETIRE_TOV	(8)
46 
47 	unsigned short  retire_blk_tov;
48 	unsigned short  version;
49 	unsigned long	tov_in_jiffies;
50 
51 	/* timer to retire an outstanding block */
52 	struct timer_list retire_blk_timer;
53 };
54 
55 struct pgv {
56 	char *buffer;
57 };
58 
59 struct packet_ring_buffer {
60 	struct pgv		*pg_vec;
61 
62 	unsigned int		head;
63 	unsigned int		frames_per_block;
64 	unsigned int		frame_size;
65 	unsigned int		frame_max;
66 
67 	unsigned int		pg_vec_order;
68 	unsigned int		pg_vec_pages;
69 	unsigned int		pg_vec_len;
70 
71 	unsigned int __percpu	*pending_refcnt;
72 
73 	union {
74 		unsigned long			*rx_owner_map;
75 		struct tpacket_kbdq_core	prb_bdqc;
76 	};
77 };
78 
79 extern struct mutex fanout_mutex;
80 #define PACKET_FANOUT_MAX	(1 << 16)
81 
82 struct packet_fanout {
83 	possible_net_t		net;
84 	unsigned int		num_members;
85 	u32			max_num_members;
86 	u16			id;
87 	u8			type;
88 	u8			flags;
89 	union {
90 		atomic_t		rr_cur;
91 		struct bpf_prog __rcu	*bpf_prog;
92 	};
93 	struct list_head	list;
94 	spinlock_t		lock;
95 	refcount_t		sk_ref;
96 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
97 	struct sock	__rcu	*arr[];
98 };
99 
100 struct packet_rollover {
101 	int			sock;
102 	atomic_long_t		num;
103 	atomic_long_t		num_huge;
104 	atomic_long_t		num_failed;
105 #define ROLLOVER_HLEN	(L1_CACHE_BYTES / sizeof(u32))
106 	u32			history[ROLLOVER_HLEN] ____cacheline_aligned;
107 } ____cacheline_aligned_in_smp;
108 
109 struct packet_sock {
110 	/* struct sock has to be the first member of packet_sock */
111 	struct sock		sk;
112 	struct packet_fanout	*fanout;
113 	union  tpacket_stats_u	stats;
114 	struct packet_ring_buffer	rx_ring;
115 	struct packet_ring_buffer	tx_ring;
116 	int			copy_thresh;
117 	spinlock_t		bind_lock;
118 	struct mutex		pg_vec_lock;
119 	unsigned int		running;	/* bind_lock must be held */
120 	unsigned int		auxdata:1,	/* writer must hold sock lock */
121 				origdev:1,
122 				has_vnet_hdr:1,
123 				tp_loss:1,
124 				tp_tx_has_off:1;
125 	int			pressure;
126 	int			ifindex;	/* bound device		*/
127 	__be16			num;
128 	struct packet_rollover	*rollover;
129 	struct packet_mclist	*mclist;
130 	atomic_t		mapped;
131 	enum tpacket_versions	tp_version;
132 	unsigned int		tp_hdrlen;
133 	unsigned int		tp_reserve;
134 	unsigned int		tp_tstamp;
135 	struct completion	skb_completion;
136 	struct net_device __rcu	*cached_dev;
137 	int			(*xmit)(struct sk_buff *skb);
138 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
139 	atomic_t		tp_drops ____cacheline_aligned_in_smp;
140 };
141 
142 static inline struct packet_sock *pkt_sk(struct sock *sk)
143 {
144 	return (struct packet_sock *)sk;
145 }
146 
147 #endif
148