xref: /linux/net/packet/internal.h (revision 905e46acd3272d04566fec49afbd7ad9e2ed9ae3)
1 #ifndef __PACKET_INTERNAL_H__
2 #define __PACKET_INTERNAL_H__
3 
4 struct packet_mclist {
5 	struct packet_mclist	*next;
6 	int			ifindex;
7 	int			count;
8 	unsigned short		type;
9 	unsigned short		alen;
10 	unsigned char		addr[MAX_ADDR_LEN];
11 };
12 
13 /* kbdq - kernel block descriptor queue */
14 struct tpacket_kbdq_core {
15 	struct pgv	*pkbdq;
16 	unsigned int	feature_req_word;
17 	unsigned int	hdrlen;
18 	unsigned char	reset_pending_on_curr_blk;
19 	unsigned char   delete_blk_timer;
20 	unsigned short	kactive_blk_num;
21 	unsigned short	blk_sizeof_priv;
22 
23 	/* last_kactive_blk_num:
24 	 * trick to see if user-space has caught up
25 	 * in order to avoid refreshing timer when every single pkt arrives.
26 	 */
27 	unsigned short	last_kactive_blk_num;
28 
29 	char		*pkblk_start;
30 	char		*pkblk_end;
31 	int		kblk_size;
32 	unsigned int	max_frame_len;
33 	unsigned int	knum_blocks;
34 	uint64_t	knxt_seq_num;
35 	char		*prev;
36 	char		*nxt_offset;
37 	struct sk_buff	*skb;
38 
39 	atomic_t	blk_fill_in_prog;
40 
41 	/* Default is set to 8ms */
42 #define DEFAULT_PRB_RETIRE_TOV	(8)
43 
44 	unsigned short  retire_blk_tov;
45 	unsigned short  version;
46 	unsigned long	tov_in_jiffies;
47 
48 	/* timer to retire an outstanding block */
49 	struct timer_list retire_blk_timer;
50 };
51 
52 struct pgv {
53 	char *buffer;
54 };
55 
56 struct packet_ring_buffer {
57 	struct pgv		*pg_vec;
58 
59 	unsigned int		head;
60 	unsigned int		frames_per_block;
61 	unsigned int		frame_size;
62 	unsigned int		frame_max;
63 
64 	unsigned int		pg_vec_order;
65 	unsigned int		pg_vec_pages;
66 	unsigned int		pg_vec_len;
67 
68 	unsigned int __percpu	*pending_refcnt;
69 
70 	struct tpacket_kbdq_core	prb_bdqc;
71 };
72 
73 extern struct mutex fanout_mutex;
74 #define PACKET_FANOUT_MAX	256
75 
76 struct packet_fanout {
77 	possible_net_t		net;
78 	unsigned int		num_members;
79 	u16			id;
80 	u8			type;
81 	u8			flags;
82 	union {
83 		atomic_t		rr_cur;
84 		struct bpf_prog __rcu	*bpf_prog;
85 	};
86 	struct list_head	list;
87 	struct sock		*arr[PACKET_FANOUT_MAX];
88 	spinlock_t		lock;
89 	atomic_t		sk_ref;
90 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
91 };
92 
93 struct packet_rollover {
94 	int			sock;
95 	struct rcu_head		rcu;
96 	atomic_long_t		num;
97 	atomic_long_t		num_huge;
98 	atomic_long_t		num_failed;
99 #define ROLLOVER_HLEN	(L1_CACHE_BYTES / sizeof(u32))
100 	u32			history[ROLLOVER_HLEN] ____cacheline_aligned;
101 } ____cacheline_aligned_in_smp;
102 
103 struct packet_sock {
104 	/* struct sock has to be the first member of packet_sock */
105 	struct sock		sk;
106 	struct packet_fanout	*fanout;
107 	union  tpacket_stats_u	stats;
108 	struct packet_ring_buffer	rx_ring;
109 	struct packet_ring_buffer	tx_ring;
110 	int			copy_thresh;
111 	spinlock_t		bind_lock;
112 	struct mutex		pg_vec_lock;
113 	unsigned int		running:1,	/* prot_hook is attached*/
114 				auxdata:1,
115 				origdev:1,
116 				has_vnet_hdr:1;
117 	int			pressure;
118 	int			ifindex;	/* bound device		*/
119 	__be16			num;
120 	struct packet_rollover	*rollover;
121 	struct packet_mclist	*mclist;
122 	atomic_t		mapped;
123 	enum tpacket_versions	tp_version;
124 	unsigned int		tp_hdrlen;
125 	unsigned int		tp_reserve;
126 	unsigned int		tp_loss:1;
127 	unsigned int		tp_tx_has_off:1;
128 	unsigned int		tp_tstamp;
129 	struct net_device __rcu	*cached_dev;
130 	int			(*xmit)(struct sk_buff *skb);
131 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
132 };
133 
134 static struct packet_sock *pkt_sk(struct sock *sk)
135 {
136 	return (struct packet_sock *)sk;
137 }
138 
139 #endif
140