1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * IBM Power Virtual Ethernet Device Driver
4 *
5 * Copyright (C) IBM Corporation, 2003, 2010
6 *
7 * Authors: Dave Larson <larson1@us.ibm.com>
8 * Santiago Leon <santil@linux.vnet.ibm.com>
9 * Brian King <brking@linux.vnet.ibm.com>
10 * Robert Jennings <rcj@linux.vnet.ibm.com>
11 * Anton Blanchard <anton@au.ibm.com>
12 */
13
14 #ifndef _IBMVETH_H
15 #define _IBMVETH_H
16
17 /* constants for H_MULTICAST_CTRL */
18 #define IbmVethMcastReceptionModifyBit 0x80000UL
19 #define IbmVethMcastReceptionEnableBit 0x20000UL
20 #define IbmVethMcastFilterModifyBit 0x40000UL
21 #define IbmVethMcastFilterEnableBit 0x10000UL
22
23 #define IbmVethMcastEnableRecv (IbmVethMcastReceptionModifyBit | IbmVethMcastReceptionEnableBit)
24 #define IbmVethMcastDisableRecv (IbmVethMcastReceptionModifyBit)
25 #define IbmVethMcastEnableFiltering (IbmVethMcastFilterModifyBit | IbmVethMcastFilterEnableBit)
26 #define IbmVethMcastDisableFiltering (IbmVethMcastFilterModifyBit)
27 #define IbmVethMcastAddFilter 0x1UL
28 #define IbmVethMcastRemoveFilter 0x2UL
29 #define IbmVethMcastClearFilterTable 0x3UL
30
31 #define IBMVETH_ILLAN_RX_MULTI_BUFF_SUPPORT 0x0000000000040000UL
32 #define IBMVETH_ILLAN_LRG_SR_ENABLED 0x0000000000010000UL
33 #define IBMVETH_ILLAN_LRG_SND_SUPPORT 0x0000000000008000UL
34 #define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL
35 #define IBMVETH_ILLAN_TRUNK_PRI_MASK 0x0000000000000F00UL
36 #define IBMVETH_ILLAN_IPV6_TCP_CSUM 0x0000000000000004UL
37 #define IBMVETH_ILLAN_IPV4_TCP_CSUM 0x0000000000000002UL
38 #define IBMVETH_ILLAN_ACTIVE_TRUNK 0x0000000000000001UL
39
40 /* hcall macros */
41 #define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
42 plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac)
43
44 #define h_free_logical_lan(ua) \
45 plpar_hcall_norets(H_FREE_LOGICAL_LAN, ua)
46
47 #define h_add_logical_lan_buffer(ua, buf) \
48 plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
49
h_add_logical_lan_buffers(unsigned long unit_address,unsigned long desc1,unsigned long desc2,unsigned long desc3,unsigned long desc4,unsigned long desc5,unsigned long desc6,unsigned long desc7,unsigned long desc8)50 static inline long h_add_logical_lan_buffers(unsigned long unit_address,
51 unsigned long desc1,
52 unsigned long desc2,
53 unsigned long desc3,
54 unsigned long desc4,
55 unsigned long desc5,
56 unsigned long desc6,
57 unsigned long desc7,
58 unsigned long desc8)
59 {
60 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
61
62 return plpar_hcall9(H_ADD_LOGICAL_LAN_BUFFERS,
63 retbuf, unit_address,
64 desc1, desc2, desc3, desc4,
65 desc5, desc6, desc7, desc8);
66 }
67
68 /* FW allows us to send 6 descriptors but we only use one so mark
69 * the other 5 as unused (0)
70 */
h_send_logical_lan(unsigned long unit_address,unsigned long desc,unsigned long corellator_in,unsigned long * corellator_out,unsigned long mss,unsigned long large_send_support)71 static inline long h_send_logical_lan(unsigned long unit_address,
72 unsigned long desc, unsigned long corellator_in,
73 unsigned long *corellator_out, unsigned long mss,
74 unsigned long large_send_support)
75 {
76 long rc;
77 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
78
79 if (large_send_support)
80 rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
81 desc, 0, 0, 0, 0, 0, corellator_in, mss);
82 else
83 rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
84 desc, 0, 0, 0, 0, 0, corellator_in);
85
86 *corellator_out = retbuf[0];
87
88 return rc;
89 }
90
h_illan_attributes(unsigned long unit_address,unsigned long reset_mask,unsigned long set_mask,unsigned long * ret_attributes)91 static inline long h_illan_attributes(unsigned long unit_address,
92 unsigned long reset_mask, unsigned long set_mask,
93 unsigned long *ret_attributes)
94 {
95 long rc;
96 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
97
98 rc = plpar_hcall(H_ILLAN_ATTRIBUTES, retbuf, unit_address,
99 reset_mask, set_mask);
100
101 *ret_attributes = retbuf[0];
102
103 return rc;
104 }
105
106 #define h_multicast_ctrl(ua, cmd, mac) \
107 plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac)
108
109 #define h_change_logical_lan_mac(ua, mac) \
110 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
111
112 #define IBMVETH_NUM_BUFF_POOLS 5
113 #define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
114 #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
115 #define IBMVETH_MIN_MTU 68
116 #define IBMVETH_MAX_POOL_COUNT 4096
117 #define IBMVETH_BUFF_LIST_SIZE 4096
118 #define IBMVETH_FILT_LIST_SIZE 4096
119 #define IBMVETH_MAX_BUF_SIZE (1024 * 128)
120 #define IBMVETH_MAX_TX_BUF_SIZE (1024 * 64)
121 #define IBMVETH_MAX_QUEUES 16U
122 #define IBMVETH_DEFAULT_QUEUES 8U
123 #define IBMVETH_MAX_RX_PER_HCALL 8U
124
125 static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
126 static int pool_count[] = { 256, 512, 256, 256, 256 };
127 static int pool_count_cmo[] = { 256, 512, 256, 256, 64 };
128 static int pool_active[] = { 1, 1, 0, 0, 1};
129
130 #define IBM_VETH_INVALID_MAP ((u16)0xffff)
131
132 struct ibmveth_buff_pool {
133 u32 size;
134 u32 index;
135 u32 buff_size;
136 u32 threshold;
137 atomic_t available;
138 u32 consumer_index;
139 u32 producer_index;
140 u16 *free_map;
141 dma_addr_t *dma_addr;
142 struct sk_buff **skbuff;
143 int active;
144 struct kobject kobj;
145 };
146
147 struct ibmveth_rx_q {
148 u64 index;
149 u64 num_slots;
150 u64 toggle;
151 dma_addr_t queue_dma;
152 u32 queue_len;
153 struct ibmveth_rx_q_entry *queue_addr;
154 };
155
156 struct ibmveth_adapter {
157 struct vio_dev *vdev;
158 struct net_device *netdev;
159 struct napi_struct napi;
160 struct work_struct work;
161 unsigned int mcastFilterSize;
162 void *buffer_list_addr;
163 void *filter_list_addr;
164 void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
165 unsigned int tx_ltb_size;
166 dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
167 dma_addr_t buffer_list_dma;
168 dma_addr_t filter_list_dma;
169 struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
170 struct ibmveth_rx_q rx_queue;
171 int rx_csum;
172 int large_send;
173 bool is_active_trunk;
174 unsigned int rx_buffers_per_hcall;
175
176 u64 fw_ipv6_csum_support;
177 u64 fw_ipv4_csum_support;
178 u64 fw_large_send_support;
179 /* adapter specific stats */
180 u64 replenish_task_cycles;
181 u64 replenish_no_mem;
182 u64 replenish_add_buff_failure;
183 u64 replenish_add_buff_success;
184 u64 rx_invalid_buffer;
185 u64 rx_no_buffer;
186 u64 tx_map_failed;
187 u64 tx_send_failed;
188 u64 tx_large_packets;
189 u64 rx_large_packets;
190 /* Ethtool settings */
191 u8 duplex;
192 u32 speed;
193 };
194
195 /*
196 * We pass struct ibmveth_buf_desc_fields to the hypervisor in registers,
197 * so we don't need to byteswap the two elements. However since we use
198 * a union (ibmveth_buf_desc) to convert from the struct to a u64 we
199 * do end up with endian specific ordering of the elements and that
200 * needs correcting.
201 */
202 struct ibmveth_buf_desc_fields {
203 #ifdef __BIG_ENDIAN
204 u32 flags_len;
205 u32 address;
206 #else
207 u32 address;
208 u32 flags_len;
209 #endif
210 #define IBMVETH_BUF_VALID 0x80000000
211 #define IBMVETH_BUF_TOGGLE 0x40000000
212 #define IBMVETH_BUF_LRG_SND 0x04000000
213 #define IBMVETH_BUF_NO_CSUM 0x02000000
214 #define IBMVETH_BUF_CSUM_GOOD 0x01000000
215 #define IBMVETH_BUF_LEN_MASK 0x00FFFFFF
216 };
217
218 union ibmveth_buf_desc {
219 u64 desc;
220 struct ibmveth_buf_desc_fields fields;
221 };
222
223 struct ibmveth_rx_q_entry {
224 __be32 flags_off;
225 #define IBMVETH_RXQ_TOGGLE 0x80000000
226 #define IBMVETH_RXQ_TOGGLE_SHIFT 31
227 #define IBMVETH_RXQ_VALID 0x40000000
228 #define IBMVETH_RXQ_LRG_PKT 0x04000000
229 #define IBMVETH_RXQ_NO_CSUM 0x02000000
230 #define IBMVETH_RXQ_CSUM_GOOD 0x01000000
231 #define IBMVETH_RXQ_OFF_MASK 0x0000FFFF
232
233 __be32 length;
234 /* correlator is only used by the OS, no need to byte swap */
235 u64 correlator;
236 };
237
238 #endif /* _IBMVETH_H */
239