xref: /linux/drivers/net/ethernet/google/gve/gve_desc.h (revision ec8a42e7343234802b9054874fe01810880289ce)
1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  * Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2019 Google, Inc.
5  */
6 
7 /* GVE Transmit Descriptor formats */
8 
9 #ifndef _GVE_DESC_H_
10 #define _GVE_DESC_H_
11 
12 #include <linux/build_bug.h>
13 
14 /* A note on seg_addrs
15  *
16  * Base addresses encoded in seg_addr are not assumed to be physical
17  * addresses. The ring format assumes these come from some linear address
18  * space. This could be physical memory, kernel virtual memory, user virtual
19  * memory.
20  * If raw dma addressing is not supported then gVNIC uses lists of registered
21  * pages. Each queue is assumed to be associated with a single such linear
22  * address space to ensure a consistent meaning for seg_addrs posted to its
23  * rings.
24  */
25 
26 struct gve_tx_pkt_desc {
27 	u8	type_flags;  /* desc type is lower 4 bits, flags upper */
28 	u8	l4_csum_offset;  /* relative offset of L4 csum word */
29 	u8	l4_hdr_offset;  /* Offset of start of L4 headers in packet */
30 	u8	desc_cnt;  /* Total descriptors for this packet */
31 	__be16	len;  /* Total length of this packet (in bytes) */
32 	__be16	seg_len;  /* Length of this descriptor's segment */
33 	__be64	seg_addr;  /* Base address (see note) of this segment */
34 } __packed;
35 
36 struct gve_tx_seg_desc {
37 	u8	type_flags;	/* type is lower 4 bits, flags upper	*/
38 	u8	l3_offset;	/* TSO: 2 byte units to start of IPH	*/
39 	__be16	reserved;
40 	__be16	mss;		/* TSO MSS				*/
41 	__be16	seg_len;
42 	__be64	seg_addr;
43 } __packed;
44 
45 /* GVE Transmit Descriptor Types */
46 #define	GVE_TXD_STD		(0x0 << 4) /* Std with Host Address	*/
47 #define	GVE_TXD_TSO		(0x1 << 4) /* TSO with Host Address	*/
48 #define	GVE_TXD_SEG		(0x2 << 4) /* Seg with Host Address	*/
49 
50 /* GVE Transmit Descriptor Flags for Std Pkts */
51 #define	GVE_TXF_L4CSUM	BIT(0)	/* Need csum offload */
52 #define	GVE_TXF_TSTAMP	BIT(2)	/* Timestamp required */
53 
54 /* GVE Transmit Descriptor Flags for TSO Segs */
55 #define	GVE_TXSF_IPV6	BIT(1)	/* IPv6 TSO */
56 
57 /* GVE Receive Packet Descriptor */
58 /* The start of an ethernet packet comes 2 bytes into the rx buffer.
59  * gVNIC adds this padding so that both the DMA and the L3/4 protocol header
60  * access is aligned.
61  */
62 #define GVE_RX_PAD 2
63 
64 struct gve_rx_desc {
65 	u8	padding[48];
66 	__be32	rss_hash;  /* Receive-side scaling hash (Toeplitz for gVNIC) */
67 	__be16	mss;
68 	__be16	reserved;  /* Reserved to zero */
69 	u8	hdr_len;  /* Header length (L2-L4) including padding */
70 	u8	hdr_off;  /* 64-byte-scaled offset into RX_DATA entry */
71 	__sum16	csum;  /* 1's-complement partial checksum of L3+ bytes */
72 	__be16	len;  /* Length of the received packet */
73 	__be16	flags_seq;  /* Flags [15:3] and sequence number [2:0] (1-7) */
74 } __packed;
75 static_assert(sizeof(struct gve_rx_desc) == 64);
76 
77 /* If the device supports raw dma addressing then the addr in data slot is
78  * the dma address of the buffer.
79  * If the device only supports registered segments then the addr is a byte
80  * offset into the registered segment (an ordered list of pages) where the
81  * buffer is.
82  */
83 union gve_rx_data_slot {
84 	__be64 qpl_offset;
85 	__be64 addr;
86 };
87 
88 /* GVE Recive Packet Descriptor Seq No */
89 #define GVE_SEQNO(x) (be16_to_cpu(x) & 0x7)
90 
91 /* GVE Recive Packet Descriptor Flags */
92 #define GVE_RXFLG(x)	cpu_to_be16(1 << (3 + (x)))
93 #define	GVE_RXF_FRAG	GVE_RXFLG(3)	/* IP Fragment			*/
94 #define	GVE_RXF_IPV4	GVE_RXFLG(4)	/* IPv4				*/
95 #define	GVE_RXF_IPV6	GVE_RXFLG(5)	/* IPv6				*/
96 #define	GVE_RXF_TCP	GVE_RXFLG(6)	/* TCP Packet			*/
97 #define	GVE_RXF_UDP	GVE_RXFLG(7)	/* UDP Packet			*/
98 #define	GVE_RXF_ERR	GVE_RXFLG(8)	/* Packet Error Detected	*/
99 
100 /* GVE IRQ */
101 #define GVE_IRQ_ACK	BIT(31)
102 #define GVE_IRQ_MASK	BIT(30)
103 #define GVE_IRQ_EVENT	BIT(29)
104 
105 static inline bool gve_needs_rss(__be16 flag)
106 {
107 	if (flag & GVE_RXF_FRAG)
108 		return false;
109 	if (flag & (GVE_RXF_IPV4 | GVE_RXF_IPV6))
110 		return true;
111 	return false;
112 }
113 
114 static inline u8 gve_next_seqno(u8 seq)
115 {
116 	return (seq + 1) == 8 ? 1 : seq + 1;
117 }
118 #endif /* _GVE_DESC_H_ */
119