1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1, (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1, (the "License").
26
27 * You may not use this file except in compliance with the License.
28
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
31
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
34 */
35
36 #ifndef __ECORE_SRIOV_H__
37 #define __ECORE_SRIOV_H__
38
39 #include "ecore_status.h"
40 #include "ecore_vfpf_if.h"
41 #include "ecore_iov_api.h"
42 #include "ecore_hsi_common.h"
43 #include "ecore_l2.h"
44
45 #define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
46 (E4_MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)
47
48 /* Represents a full message. Both the request filled by VF
49 * and the response filled by the PF. The VF needs one copy
50 * of this message, it fills the request part and sends it to
51 * the PF. The PF will copy the response to the response part for
52 * the VF to later read it. The PF needs to hold a message like this
53 * per VF, the request that is copied to the PF is placed in the
54 * request size, and the response is filled by the PF before sending
55 * it to the VF.
56 */
57 struct ecore_vf_mbx_msg {
58 union vfpf_tlvs req;
59 union pfvf_tlvs resp;
60 };
61
62 /* This mailbox is maintained per VF in its PF
63 * contains all information required for sending / receiving
64 * a message
65 */
66 struct ecore_iov_vf_mbx {
67 union vfpf_tlvs *req_virt;
68 dma_addr_t req_phys;
69 union pfvf_tlvs *reply_virt;
70 dma_addr_t reply_phys;
71
72 /* Address in VF where a pending message is located */
73 dma_addr_t pending_req;
74
75 /* Message from VF awaits handling */
76 bool b_pending_msg;
77
78 u8 *offset;
79
80 #ifdef CONFIG_ECORE_SW_CHANNEL
81 struct ecore_iov_sw_mbx sw_mbx;
82 #endif
83
84 /* VF GPA address */
85 u32 vf_addr_lo;
86 u32 vf_addr_hi;
87
88 struct vfpf_first_tlv first_tlv; /* saved VF request header */
89
90 u8 flags;
91 #define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent
92 * more then one pending msg
93 */
94 };
95
96 #define ECORE_IOV_LEGACY_QID_RX (0)
97 #define ECORE_IOV_LEGACY_QID_TX (1)
98 #define ECORE_IOV_QID_INVALID (0xFE)
99
100 struct ecore_vf_queue_cid {
101 bool b_is_tx;
102 struct ecore_queue_cid *p_cid;
103 };
104
105 /* Describes a qzone associated with the VF */
106 struct ecore_vf_queue {
107 /* Input from upper-layer, mapping relateive queue to queue-zone */
108 u16 fw_rx_qid;
109 u16 fw_tx_qid;
110
111 struct ecore_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
112 };
113
114 enum vf_state {
115 VF_FREE = 0, /* VF ready to be acquired holds no resc */
116 VF_ACQUIRED = 1, /* VF, aquired, but not initalized */
117 VF_ENABLED = 2, /* VF, Enabled */
118 VF_RESET = 3, /* VF, FLR'd, pending cleanup */
119 VF_STOPPED = 4 /* VF, Stopped */
120 };
121
122 struct ecore_vf_vlan_shadow {
123 bool used;
124 u16 vid;
125 };
126
127 struct ecore_vf_shadow_config {
128 /* Shadow copy of all guest vlans */
129 struct ecore_vf_vlan_shadow vlans[ECORE_ETH_VF_NUM_VLAN_FILTERS + 1];
130
131 /* Shadow copy of all configured MACs; Empty if forcing MACs */
132 u8 macs[ECORE_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
133 u8 inner_vlan_removal;
134 };
135
136 /* PFs maintain an array of this structure, per VF */
137 struct ecore_vf_info {
138 struct ecore_iov_vf_mbx vf_mbx;
139 enum vf_state state;
140 bool b_init;
141 bool b_malicious;
142 u8 to_disable;
143
144 struct ecore_bulletin bulletin;
145 dma_addr_t vf_bulletin;
146
147 /* PF saves a copy of the last VF acquire message */
148 struct vfpf_acquire_tlv acquire;
149
150 u32 concrete_fid;
151 u16 opaque_fid;
152 u16 mtu;
153
154 u8 vport_id;
155 u8 rss_eng_id;
156 u8 relative_vf_id;
157 u8 abs_vf_id;
158 #define ECORE_VF_ABS_ID(p_hwfn, p_vf) (ECORE_PATH_ID(p_hwfn) ? \
159 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
160 (p_vf)->abs_vf_id)
161
162 u8 vport_instance; /* Number of active vports */
163 u8 num_rxqs;
164 u8 num_txqs;
165
166 u16 rx_coal;
167 u16 tx_coal;
168
169 u8 num_sbs;
170
171 u8 num_mac_filters;
172 u8 num_vlan_filters;
173
174 struct ecore_vf_queue vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
175 u16 igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF];
176
177 /* TODO - Only windows is using it - should be removed */
178 u8 was_malicious;
179 u8 num_active_rxqs;
180 void *ctx;
181 struct ecore_public_vf_info p_vf_info;
182 bool spoof_chk; /* Current configured on HW */
183 bool req_spoofchk_val; /* Requested value */
184
185 /* Stores the configuration requested by VF */
186 struct ecore_vf_shadow_config shadow_config;
187
188 /* A bitfield using bulletin's valid-map bits, used to indicate
189 * which of the bulletin board features have been configured.
190 */
191 u64 configured_features;
192 #define ECORE_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
193 (1 << VLAN_ADDR_FORCED))
194 };
195
196 /* This structure is part of ecore_hwfn and used only for PFs that have sriov
197 * capability enabled.
198 */
199 struct ecore_pf_iov {
200 struct ecore_vf_info vfs_array[E4_MAX_NUM_VFS];
201 u64 pending_flr[ECORE_VF_ARRAY_LENGTH];
202
203 #ifndef REMOVE_DBG
204 /* This doesn't serve anything functionally, but it makes windows
205 * debugging of IOV related issues easier.
206 */
207 u64 active_vfs[ECORE_VF_ARRAY_LENGTH];
208 #endif
209
210 /* Allocate message address continuosuly and split to each VF */
211 void *mbx_msg_virt_addr;
212 dma_addr_t mbx_msg_phys_addr;
213 u32 mbx_msg_size;
214 void *mbx_reply_virt_addr;
215 dma_addr_t mbx_reply_phys_addr;
216 u32 mbx_reply_size;
217 void *p_bulletins;
218 dma_addr_t bulletins_phys;
219 u32 bulletins_size;
220 };
221
222 #ifdef CONFIG_ECORE_SRIOV
223 /**
224 * @brief Read sriov related information and allocated resources
225 * reads from configuraiton space, shmem, etc.
226 *
227 * @param p_hwfn
228 *
229 * @return enum _ecore_status_t
230 */
231 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn);
232
233 /**
234 * @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset
235 *
236 * @param p_hwfn
237 * @param p_iov
238 * @param type
239 * @param length
240 *
241 * @return pointer to the newly placed tlv
242 */
243 void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
244 u8 **offset,
245 u16 type,
246 u16 length);
247
248 /**
249 * @brief list the types and lengths of the tlvs on the buffer
250 *
251 * @param p_hwfn
252 * @param tlvs_list
253 */
254 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn,
255 void *tlvs_list);
256
257 /**
258 * @brief ecore_iov_alloc - allocate sriov related resources
259 *
260 * @param p_hwfn
261 *
262 * @return enum _ecore_status_t
263 */
264 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn);
265
266 /**
267 * @brief ecore_iov_setup - setup sriov related resources
268 *
269 * @param p_hwfn
270 * @param p_ptt
271 */
272 void ecore_iov_setup(struct ecore_hwfn *p_hwfn,
273 struct ecore_ptt *p_ptt);
274
275 /**
276 * @brief ecore_iov_free - free sriov related resources
277 *
278 * @param p_hwfn
279 */
280 void ecore_iov_free(struct ecore_hwfn *p_hwfn);
281
282 /**
283 * @brief free sriov related memory that was allocated during hw_prepare
284 *
285 * @param p_dev
286 */
287 void ecore_iov_free_hw_info(struct ecore_dev *p_dev);
288
289 /**
290 * @brief ecore_sriov_eqe_event - handle async sriov event arrived on eqe.
291 *
292 * @param p_hwfn
293 * @param opcode
294 * @param echo
295 * @param data
296 */
297 enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
298 u8 opcode,
299 __le16 echo,
300 union event_ring_data *data);
301
302 /**
303 * @brief Mark structs of vfs that have been FLR-ed.
304 *
305 * @param p_hwfn
306 * @param disabled_vfs - bitmask of all VFs on path that were FLRed
307 *
308 * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
309 */
310 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
311 u32 *disabled_vfs);
312
313 /**
314 * @brief Search extended TLVs in request/reply buffer.
315 *
316 * @param p_hwfn
317 * @param p_tlvs_list - Pointer to tlvs list
318 * @param req_type - Type of TLV
319 *
320 * @return pointer to tlv type if found, otherwise returns NULL.
321 */
322 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
323 void *p_tlvs_list, u16 req_type);
324
325 /**
326 * @brief ecore_iov_get_vf_info - return the database of a
327 * specific VF
328 *
329 * @param p_hwfn
330 * @param relative_vf_id - relative id of the VF for which info
331 * is requested
332 * @param b_enabled_only - false iff want to access even if vf is disabled
333 *
334 * @return struct ecore_vf_info*
335 */
336 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
337 u16 relative_vf_id,
338 bool b_enabled_only);
339 #else
ecore_iov_hw_info(struct ecore_hwfn * p_hwfn)340 static OSAL_INLINE enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn) {return ECORE_SUCCESS;}
ecore_add_tlv(struct ecore_hwfn * p_hwfn,u8 ** offset,u16 type,u16 length)341 static OSAL_INLINE void *ecore_add_tlv(struct ecore_hwfn *p_hwfn, u8 **offset, u16 type, u16 length) {return OSAL_NULL;}
ecore_dp_tlv_list(struct ecore_hwfn * p_hwfn,void * tlvs_list)342 static OSAL_INLINE void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list) {}
ecore_iov_alloc(struct ecore_hwfn * p_hwfn)343 static OSAL_INLINE enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn) {return ECORE_SUCCESS;}
ecore_iov_setup(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)344 static OSAL_INLINE void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) {}
ecore_iov_free(struct ecore_hwfn * p_hwfn)345 static OSAL_INLINE void ecore_iov_free(struct ecore_hwfn *p_hwfn) {}
ecore_iov_free_hw_info(struct ecore_dev * p_dev)346 static OSAL_INLINE void ecore_iov_free_hw_info(struct ecore_dev *p_dev) {}
ecore_sriov_eqe_event(struct ecore_hwfn * p_hwfn,u8 opcode,__le16 echo,union event_ring_data * data)347 static OSAL_INLINE enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn, u8 opcode, __le16 echo, union event_ring_data *data) {return ECORE_INVAL;}
ecore_crc32(u32 crc,u8 * ptr,u32 length)348 static OSAL_INLINE u32 ecore_crc32(u32 crc, u8 *ptr, u32 length) {return 0;}
ecore_iov_mark_vf_flr(struct ecore_hwfn * p_hwfn,u32 * disabled_vfs)349 static OSAL_INLINE bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *disabled_vfs) {return 0;}
ecore_iov_search_list_tlvs(struct ecore_hwfn * p_hwfn,void * p_tlvs_list,u16 req_type)350 static OSAL_INLINE void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn, void *p_tlvs_list, u16 req_type) {return OSAL_NULL;}
ecore_iov_get_vf_info(struct ecore_hwfn * p_hwfn,u16 relative_vf_id,bool b_enabled_only)351 static OSAL_INLINE struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn, u16 relative_vf_id, bool b_enabled_only) {return OSAL_NULL;}
352
353 #endif
354 #endif /* __ECORE_SRIOV_H__ */
355