xref: /linux/drivers/net/ethernet/ibm/ibmveth.h (revision cc427d24ac6442ffdeafd157a63c7c5b73ed4de4)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * IBM Power Virtual Ethernet Device Driver
4  *
5  * Copyright (C) IBM Corporation, 2003, 2010
6  *
7  * Authors: Dave Larson <larson1@us.ibm.com>
8  *	    Santiago Leon <santil@linux.vnet.ibm.com>
9  *	    Brian King <brking@linux.vnet.ibm.com>
10  *	    Robert Jennings <rcj@linux.vnet.ibm.com>
11  *	    Anton Blanchard <anton@au.ibm.com>
12  */
13 
14 #ifndef _IBMVETH_H
15 #define _IBMVETH_H
16 
17 /* constants for H_MULTICAST_CTRL */
18 #define IbmVethMcastReceptionModifyBit     0x80000UL
19 #define IbmVethMcastReceptionEnableBit     0x20000UL
20 #define IbmVethMcastFilterModifyBit        0x40000UL
21 #define IbmVethMcastFilterEnableBit        0x10000UL
22 
23 #define IbmVethMcastEnableRecv       (IbmVethMcastReceptionModifyBit | IbmVethMcastReceptionEnableBit)
24 #define IbmVethMcastDisableRecv      (IbmVethMcastReceptionModifyBit)
25 #define IbmVethMcastEnableFiltering  (IbmVethMcastFilterModifyBit | IbmVethMcastFilterEnableBit)
26 #define IbmVethMcastDisableFiltering (IbmVethMcastFilterModifyBit)
27 #define IbmVethMcastAddFilter        0x1UL
28 #define IbmVethMcastRemoveFilter     0x2UL
29 #define IbmVethMcastClearFilterTable 0x3UL
30 
31 #define IBMVETH_ILLAN_RX_MULTI_BUFF_SUPPORT	0x0000000000040000UL
32 #define IBMVETH_ILLAN_LRG_SR_ENABLED	0x0000000000010000UL
33 #define IBMVETH_ILLAN_LRG_SND_SUPPORT	0x0000000000008000UL
34 #define IBMVETH_ILLAN_PADDED_PKT_CSUM	0x0000000000002000UL
35 #define IBMVETH_ILLAN_TRUNK_PRI_MASK	0x0000000000000F00UL
36 #define IBMVETH_ILLAN_IPV6_TCP_CSUM		0x0000000000000004UL
37 #define IBMVETH_ILLAN_IPV4_TCP_CSUM		0x0000000000000002UL
38 #define IBMVETH_ILLAN_ACTIVE_TRUNK		0x0000000000000001UL
39 
40 #define IBMVETH_MIN_LSO_MSS		224	/* Minimum MSS for LSO */
41 /* hcall macros */
42 #define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
43   plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac)
44 
45 #define h_free_logical_lan(ua) \
46   plpar_hcall_norets(H_FREE_LOGICAL_LAN, ua)
47 
48 #define h_add_logical_lan_buffer(ua, buf) \
49   plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
50 
51 static inline long h_add_logical_lan_buffers(unsigned long unit_address,
52 					     unsigned long desc1,
53 					     unsigned long desc2,
54 					     unsigned long desc3,
55 					     unsigned long desc4,
56 					     unsigned long desc5,
57 					     unsigned long desc6,
58 					     unsigned long desc7,
59 					     unsigned long desc8)
60 {
61 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
62 
63 	return plpar_hcall9(H_ADD_LOGICAL_LAN_BUFFERS,
64 			    retbuf, unit_address,
65 			    desc1, desc2, desc3, desc4,
66 			    desc5, desc6, desc7, desc8);
67 }
68 
69 /* FW allows us to send 6 descriptors but we only use one so mark
70  * the other 5 as unused (0)
71  */
72 static inline long h_send_logical_lan(unsigned long unit_address,
73 		unsigned long desc, unsigned long corellator_in,
74 		unsigned long *corellator_out, unsigned long mss,
75 		unsigned long large_send_support)
76 {
77 	long rc;
78 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
79 
80 	if (large_send_support)
81 		rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
82 				  desc, 0, 0, 0, 0, 0, corellator_in, mss);
83 	else
84 		rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
85 				  desc, 0, 0, 0, 0, 0, corellator_in);
86 
87 	*corellator_out = retbuf[0];
88 
89 	return rc;
90 }
91 
92 static inline long h_illan_attributes(unsigned long unit_address,
93 				      unsigned long reset_mask, unsigned long set_mask,
94 				      unsigned long *ret_attributes)
95 {
96 	long rc;
97 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
98 
99 	rc = plpar_hcall(H_ILLAN_ATTRIBUTES, retbuf, unit_address,
100 			 reset_mask, set_mask);
101 
102 	*ret_attributes = retbuf[0];
103 
104 	return rc;
105 }
106 
107 #define h_multicast_ctrl(ua, cmd, mac) \
108   plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac)
109 
110 #define h_change_logical_lan_mac(ua, mac) \
111   plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
112 
113 #define IBMVETH_NUM_BUFF_POOLS 5
114 #define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
115 #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
116 #define IBMVETH_MIN_MTU 68
117 #define IBMVETH_MAX_POOL_COUNT 4096
118 #define IBMVETH_BUFF_LIST_SIZE 4096
119 #define IBMVETH_FILT_LIST_SIZE 4096
120 #define IBMVETH_MAX_BUF_SIZE (1024 * 128)
121 #define IBMVETH_MAX_TX_BUF_SIZE (1024 * 64)
122 #define IBMVETH_MAX_QUEUES 16U
123 #define IBMVETH_DEFAULT_QUEUES 8U
124 #define IBMVETH_MAX_RX_PER_HCALL 8U
125 
126 static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
127 static int pool_count[] = { 256, 512, 256, 256, 256 };
128 static int pool_count_cmo[] = { 256, 512, 256, 256, 64 };
129 static int pool_active[] = { 1, 1, 0, 0, 1};
130 
131 #define IBM_VETH_INVALID_MAP ((u16)0xffff)
132 
133 struct ibmveth_buff_pool {
134     u32 size;
135     u32 index;
136     u32 buff_size;
137     u32 threshold;
138     atomic_t available;
139     u32 consumer_index;
140     u32 producer_index;
141     u16 *free_map;
142     dma_addr_t *dma_addr;
143     struct sk_buff **skbuff;
144     int active;
145     struct kobject kobj;
146 };
147 
148 struct ibmveth_rx_q {
149     u64        index;
150     u64        num_slots;
151     u64        toggle;
152     dma_addr_t queue_dma;
153     u32        queue_len;
154     struct ibmveth_rx_q_entry *queue_addr;
155 };
156 
157 struct ibmveth_adapter {
158 	struct vio_dev *vdev;
159 	struct net_device *netdev;
160 	struct napi_struct napi;
161 	struct work_struct work;
162 	unsigned int mcastFilterSize;
163 	void *buffer_list_addr;
164 	void *filter_list_addr;
165 	void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
166 	unsigned int tx_ltb_size;
167 	dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
168 	dma_addr_t buffer_list_dma;
169 	dma_addr_t filter_list_dma;
170 	struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
171 	struct ibmveth_rx_q rx_queue;
172 	int rx_csum;
173 	int large_send;
174 	bool is_active_trunk;
175 	unsigned int rx_buffers_per_hcall;
176 
177 	u64 fw_ipv6_csum_support;
178 	u64 fw_ipv4_csum_support;
179 	u64 fw_large_send_support;
180 	/* adapter specific stats */
181 	u64 replenish_task_cycles;
182 	u64 replenish_no_mem;
183 	u64 replenish_add_buff_failure;
184 	u64 replenish_add_buff_success;
185 	u64 rx_invalid_buffer;
186 	u64 rx_no_buffer;
187 	u64 tx_map_failed;
188 	u64 tx_send_failed;
189 	u64 tx_large_packets;
190 	u64 rx_large_packets;
191 	/* Ethtool settings */
192 	u8 duplex;
193 	u32 speed;
194 };
195 
196 /*
197  * We pass struct ibmveth_buf_desc_fields to the hypervisor in registers,
198  * so we don't need to byteswap the two elements. However since we use
199  * a union (ibmveth_buf_desc) to convert from the struct to a u64 we
200  * do end up with endian specific ordering of the elements and that
201  * needs correcting.
202  */
203 struct ibmveth_buf_desc_fields {
204 #ifdef __BIG_ENDIAN
205 	u32 flags_len;
206 	u32 address;
207 #else
208 	u32 address;
209 	u32 flags_len;
210 #endif
211 #define IBMVETH_BUF_VALID	0x80000000
212 #define IBMVETH_BUF_TOGGLE	0x40000000
213 #define IBMVETH_BUF_LRG_SND     0x04000000
214 #define IBMVETH_BUF_NO_CSUM	0x02000000
215 #define IBMVETH_BUF_CSUM_GOOD	0x01000000
216 #define IBMVETH_BUF_LEN_MASK	0x00FFFFFF
217 };
218 
219 union ibmveth_buf_desc {
220     u64 desc;
221     struct ibmveth_buf_desc_fields fields;
222 };
223 
224 struct ibmveth_rx_q_entry {
225 	__be32 flags_off;
226 #define IBMVETH_RXQ_TOGGLE		0x80000000
227 #define IBMVETH_RXQ_TOGGLE_SHIFT	31
228 #define IBMVETH_RXQ_VALID		0x40000000
229 #define IBMVETH_RXQ_LRG_PKT		0x04000000
230 #define IBMVETH_RXQ_NO_CSUM		0x02000000
231 #define IBMVETH_RXQ_CSUM_GOOD		0x01000000
232 #define IBMVETH_RXQ_OFF_MASK		0x0000FFFF
233 
234 	__be32 length;
235 	/* correlator is only used by the OS, no need to byte swap */
236 	u64 correlator;
237 };
238 
239 #endif /* _IBMVETH_H */
240