xref: /linux/drivers/net/ethernet/ibm/ibmveth.h (revision fd7d598270724cc787982ea48bbe17ad383a8b7f)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * IBM Power Virtual Ethernet Device Driver
4  *
5  * Copyright (C) IBM Corporation, 2003, 2010
6  *
7  * Authors: Dave Larson <larson1@us.ibm.com>
8  *	    Santiago Leon <santil@linux.vnet.ibm.com>
9  *	    Brian King <brking@linux.vnet.ibm.com>
10  *	    Robert Jennings <rcj@linux.vnet.ibm.com>
11  *	    Anton Blanchard <anton@au.ibm.com>
12  */
13 
14 #ifndef _IBMVETH_H
15 #define _IBMVETH_H
16 
17 /* constants for H_MULTICAST_CTRL */
18 #define IbmVethMcastReceptionModifyBit     0x80000UL
19 #define IbmVethMcastReceptionEnableBit     0x20000UL
20 #define IbmVethMcastFilterModifyBit        0x40000UL
21 #define IbmVethMcastFilterEnableBit        0x10000UL
22 
23 #define IbmVethMcastEnableRecv       (IbmVethMcastReceptionModifyBit | IbmVethMcastReceptionEnableBit)
24 #define IbmVethMcastDisableRecv      (IbmVethMcastReceptionModifyBit)
25 #define IbmVethMcastEnableFiltering  (IbmVethMcastFilterModifyBit | IbmVethMcastFilterEnableBit)
26 #define IbmVethMcastDisableFiltering (IbmVethMcastFilterModifyBit)
27 #define IbmVethMcastAddFilter        0x1UL
28 #define IbmVethMcastRemoveFilter     0x2UL
29 #define IbmVethMcastClearFilterTable 0x3UL
30 
31 #define IBMVETH_ILLAN_LRG_SR_ENABLED	0x0000000000010000UL
32 #define IBMVETH_ILLAN_LRG_SND_SUPPORT	0x0000000000008000UL
33 #define IBMVETH_ILLAN_PADDED_PKT_CSUM	0x0000000000002000UL
34 #define IBMVETH_ILLAN_TRUNK_PRI_MASK	0x0000000000000F00UL
35 #define IBMVETH_ILLAN_IPV6_TCP_CSUM		0x0000000000000004UL
36 #define IBMVETH_ILLAN_IPV4_TCP_CSUM		0x0000000000000002UL
37 #define IBMVETH_ILLAN_ACTIVE_TRUNK		0x0000000000000001UL
38 
39 /* hcall macros */
40 #define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
41   plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac)
42 
43 #define h_free_logical_lan(ua) \
44   plpar_hcall_norets(H_FREE_LOGICAL_LAN, ua)
45 
46 #define h_add_logical_lan_buffer(ua, buf) \
47   plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
48 
49 /* FW allows us to send 6 descriptors but we only use one so mark
50  * the other 5 as unused (0)
51  */
52 static inline long h_send_logical_lan(unsigned long unit_address,
53 		unsigned long desc, unsigned long corellator_in,
54 		unsigned long *corellator_out, unsigned long mss,
55 		unsigned long large_send_support)
56 {
57 	long rc;
58 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
59 
60 	if (large_send_support)
61 		rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
62 				  desc, 0, 0, 0, 0, 0, corellator_in, mss);
63 	else
64 		rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
65 				  desc, 0, 0, 0, 0, 0, corellator_in);
66 
67 	*corellator_out = retbuf[0];
68 
69 	return rc;
70 }
71 
72 static inline long h_illan_attributes(unsigned long unit_address,
73 				      unsigned long reset_mask, unsigned long set_mask,
74 				      unsigned long *ret_attributes)
75 {
76 	long rc;
77 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
78 
79 	rc = plpar_hcall(H_ILLAN_ATTRIBUTES, retbuf, unit_address,
80 			 reset_mask, set_mask);
81 
82 	*ret_attributes = retbuf[0];
83 
84 	return rc;
85 }
86 
87 #define h_multicast_ctrl(ua, cmd, mac) \
88   plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac)
89 
90 #define h_change_logical_lan_mac(ua, mac) \
91   plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
92 
93 #define IBMVETH_NUM_BUFF_POOLS 5
94 #define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
95 #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
96 #define IBMVETH_MIN_MTU 68
97 #define IBMVETH_MAX_POOL_COUNT 4096
98 #define IBMVETH_BUFF_LIST_SIZE 4096
99 #define IBMVETH_FILT_LIST_SIZE 4096
100 #define IBMVETH_MAX_BUF_SIZE (1024 * 128)
101 #define IBMVETH_MAX_TX_BUF_SIZE (1024 * 64)
102 #define IBMVETH_MAX_QUEUES 16U
103 #define IBMVETH_DEFAULT_QUEUES 8U
104 
105 static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
106 static int pool_count[] = { 256, 512, 256, 256, 256 };
107 static int pool_count_cmo[] = { 256, 512, 256, 256, 64 };
108 static int pool_active[] = { 1, 1, 0, 0, 1};
109 
110 #define IBM_VETH_INVALID_MAP ((u16)0xffff)
111 
112 struct ibmveth_buff_pool {
113     u32 size;
114     u32 index;
115     u32 buff_size;
116     u32 threshold;
117     atomic_t available;
118     u32 consumer_index;
119     u32 producer_index;
120     u16 *free_map;
121     dma_addr_t *dma_addr;
122     struct sk_buff **skbuff;
123     int active;
124     struct kobject kobj;
125 };
126 
127 struct ibmveth_rx_q {
128     u64        index;
129     u64        num_slots;
130     u64        toggle;
131     dma_addr_t queue_dma;
132     u32        queue_len;
133     struct ibmveth_rx_q_entry *queue_addr;
134 };
135 
136 struct ibmveth_adapter {
137     struct vio_dev *vdev;
138     struct net_device *netdev;
139     struct napi_struct napi;
140     unsigned int mcastFilterSize;
141     void * buffer_list_addr;
142     void * filter_list_addr;
143     void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
144     unsigned int tx_ltb_size;
145     dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
146     dma_addr_t buffer_list_dma;
147     dma_addr_t filter_list_dma;
148     struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
149     struct ibmveth_rx_q rx_queue;
150     int rx_csum;
151     int large_send;
152     bool is_active_trunk;
153 
154     u64 fw_ipv6_csum_support;
155     u64 fw_ipv4_csum_support;
156     u64 fw_large_send_support;
157     /* adapter specific stats */
158     u64 replenish_task_cycles;
159     u64 replenish_no_mem;
160     u64 replenish_add_buff_failure;
161     u64 replenish_add_buff_success;
162     u64 rx_invalid_buffer;
163     u64 rx_no_buffer;
164     u64 tx_map_failed;
165     u64 tx_send_failed;
166     u64 tx_large_packets;
167     u64 rx_large_packets;
168     /* Ethtool settings */
169 	u8 duplex;
170 	u32 speed;
171 };
172 
173 /*
174  * We pass struct ibmveth_buf_desc_fields to the hypervisor in registers,
175  * so we don't need to byteswap the two elements. However since we use
176  * a union (ibmveth_buf_desc) to convert from the struct to a u64 we
177  * do end up with endian specific ordering of the elements and that
178  * needs correcting.
179  */
180 struct ibmveth_buf_desc_fields {
181 #ifdef __BIG_ENDIAN
182 	u32 flags_len;
183 	u32 address;
184 #else
185 	u32 address;
186 	u32 flags_len;
187 #endif
188 #define IBMVETH_BUF_VALID	0x80000000
189 #define IBMVETH_BUF_TOGGLE	0x40000000
190 #define IBMVETH_BUF_LRG_SND     0x04000000
191 #define IBMVETH_BUF_NO_CSUM	0x02000000
192 #define IBMVETH_BUF_CSUM_GOOD	0x01000000
193 #define IBMVETH_BUF_LEN_MASK	0x00FFFFFF
194 };
195 
196 union ibmveth_buf_desc {
197     u64 desc;
198     struct ibmveth_buf_desc_fields fields;
199 };
200 
201 struct ibmveth_rx_q_entry {
202 	__be32 flags_off;
203 #define IBMVETH_RXQ_TOGGLE		0x80000000
204 #define IBMVETH_RXQ_TOGGLE_SHIFT	31
205 #define IBMVETH_RXQ_VALID		0x40000000
206 #define IBMVETH_RXQ_LRG_PKT		0x04000000
207 #define IBMVETH_RXQ_NO_CSUM		0x02000000
208 #define IBMVETH_RXQ_CSUM_GOOD		0x01000000
209 #define IBMVETH_RXQ_OFF_MASK		0x0000FFFF
210 
211 	__be32 length;
212 	/* correlator is only used by the OS, no need to byte swap */
213 	u64 correlator;
214 };
215 
216 #endif /* _IBMVETH_H */
217