1 /*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #ifndef __ECORE_SRIOV_H__
30 #define __ECORE_SRIOV_H__
31
32 #include "ecore_status.h"
33 #include "ecore_vfpf_if.h"
34 #include "ecore_iov_api.h"
35 #include "ecore_hsi_common.h"
36 #include "ecore_l2.h"
37
38 #define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
39 (MAX_NUM_VFS_E4 * ECORE_ETH_VF_NUM_VLAN_FILTERS)
40
41 /* Represents a full message. Both the request filled by VF
42 * and the response filled by the PF. The VF needs one copy
43 * of this message, it fills the request part and sends it to
44 * the PF. The PF will copy the response to the response part for
45 * the VF to later read it. The PF needs to hold a message like this
46 * per VF, the request that is copied to the PF is placed in the
47 * request size, and the response is filled by the PF before sending
48 * it to the VF.
49 */
50 struct ecore_vf_mbx_msg {
51 union vfpf_tlvs req;
52 union pfvf_tlvs resp;
53 };
54
55 /* This mailbox is maintained per VF in its PF
56 * contains all information required for sending / receiving
57 * a message
58 */
59 struct ecore_iov_vf_mbx {
60 union vfpf_tlvs *req_virt;
61 dma_addr_t req_phys;
62 union pfvf_tlvs *reply_virt;
63 dma_addr_t reply_phys;
64
65 /* Address in VF where a pending message is located */
66 dma_addr_t pending_req;
67
68 /* Message from VF awaits handling */
69 bool b_pending_msg;
70
71 u8 *offset;
72
73 #ifdef CONFIG_ECORE_SW_CHANNEL
74 struct ecore_iov_sw_mbx sw_mbx;
75 #endif
76
77 /* VF GPA address */
78 u32 vf_addr_lo;
79 u32 vf_addr_hi;
80
81 struct vfpf_first_tlv first_tlv; /* saved VF request header */
82
83 u8 flags;
84 #define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent
85 * more then one pending msg
86 */
87 };
88
89 #define ECORE_IOV_LEGACY_QID_RX (0)
90 #define ECORE_IOV_LEGACY_QID_TX (1)
91 #define ECORE_IOV_QID_INVALID (0xFE)
92
93 struct ecore_vf_queue_cid {
94 bool b_is_tx;
95 struct ecore_queue_cid *p_cid;
96 };
97
98 /* Describes a qzone associated with the VF */
99 struct ecore_vf_queue {
100 /* Input from upper-layer, mapping relateive queue to queue-zone */
101 u16 fw_rx_qid;
102 u16 fw_tx_qid;
103
104 struct ecore_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
105 };
106
107 enum vf_state {
108 VF_FREE = 0, /* VF ready to be acquired holds no resc */
109 VF_ACQUIRED = 1, /* VF, aquired, but not initalized */
110 VF_ENABLED = 2, /* VF, Enabled */
111 VF_RESET = 3, /* VF, FLR'd, pending cleanup */
112 VF_STOPPED = 4 /* VF, Stopped */
113 };
114
115 struct ecore_vf_vlan_shadow {
116 bool used;
117 u16 vid;
118 };
119
120 struct ecore_vf_shadow_config {
121 /* Shadow copy of all guest vlans */
122 struct ecore_vf_vlan_shadow vlans[ECORE_ETH_VF_NUM_VLAN_FILTERS + 1];
123
124 /* Shadow copy of all configured MACs; Empty if forcing MACs */
125 u8 macs[ECORE_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
126 u8 inner_vlan_removal;
127 };
128
129 /* PFs maintain an array of this structure, per VF */
130 struct ecore_vf_info {
131 struct ecore_iov_vf_mbx vf_mbx;
132 enum vf_state state;
133 bool b_init;
134 bool b_malicious;
135 u8 to_disable;
136
137 struct ecore_bulletin bulletin;
138 dma_addr_t vf_bulletin;
139
140 #ifdef CONFIG_ECORE_SW_CHANNEL
141 /* Determine whether PF communicate with VF using HW/SW channel */
142 bool b_hw_channel;
143 #endif
144
145 /* PF saves a copy of the last VF acquire message */
146 struct vfpf_acquire_tlv acquire;
147
148 u32 concrete_fid;
149 u16 opaque_fid;
150 u16 mtu;
151
152 u8 vport_id;
153 u8 rss_eng_id;
154 u8 relative_vf_id;
155 u8 abs_vf_id;
156 #define ECORE_VF_ABS_ID(p_hwfn, p_vf) (ECORE_PATH_ID(p_hwfn) ? \
157 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
158 (p_vf)->abs_vf_id)
159
160 u8 vport_instance; /* Number of active vports */
161 u8 num_rxqs;
162 u8 num_txqs;
163
164 u16 rx_coal;
165 u16 tx_coal;
166
167 u8 num_sbs;
168
169 u8 num_mac_filters;
170 u8 num_vlan_filters;
171
172 struct ecore_vf_queue vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
173 u16 igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF];
174
175 /* TODO - Only windows is using it - should be removed */
176 u8 was_malicious;
177 u8 num_active_rxqs;
178 void *ctx;
179 struct ecore_public_vf_info p_vf_info;
180 bool spoof_chk; /* Current configured on HW */
181 bool req_spoofchk_val; /* Requested value */
182
183 /* Stores the configuration requested by VF */
184 struct ecore_vf_shadow_config shadow_config;
185
186 /* A bitfield using bulletin's valid-map bits, used to indicate
187 * which of the bulletin board features have been configured.
188 */
189 u64 configured_features;
190 #define ECORE_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
191 (1 << VLAN_ADDR_FORCED))
192 };
193
194 /* This structure is part of ecore_hwfn and used only for PFs that have sriov
195 * capability enabled.
196 */
197 struct ecore_pf_iov {
198 struct ecore_vf_info vfs_array[MAX_NUM_VFS_E4];
199 u64 pending_flr[ECORE_VF_ARRAY_LENGTH];
200
201 #ifndef REMOVE_DBG
202 /* This doesn't serve anything functionally, but it makes windows
203 * debugging of IOV related issues easier.
204 */
205 u64 active_vfs[ECORE_VF_ARRAY_LENGTH];
206 #endif
207
208 /* Allocate message address continuosuly and split to each VF */
209 void *mbx_msg_virt_addr;
210 dma_addr_t mbx_msg_phys_addr;
211 u32 mbx_msg_size;
212 void *mbx_reply_virt_addr;
213 dma_addr_t mbx_reply_phys_addr;
214 u32 mbx_reply_size;
215 void *p_bulletins;
216 dma_addr_t bulletins_phys;
217 u32 bulletins_size;
218 };
219
220 #ifdef CONFIG_ECORE_SRIOV
221 /**
222 * @brief Read sriov related information and allocated resources
223 * reads from configuration space, shmem, etc.
224 *
225 * @param p_hwfn
226 *
227 * @return enum _ecore_status_t
228 */
229 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn);
230
231 /**
232 * @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset
233 *
234 * @param offset
235 * @param type
236 * @param length
237 *
238 * @return pointer to the newly placed tlv
239 */
240 void *ecore_add_tlv(u8 **offset, u16 type, u16 length);
241
242 /**
243 * @brief list the types and lengths of the tlvs on the buffer
244 *
245 * @param p_hwfn
246 * @param tlvs_list
247 */
248 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn,
249 void *tlvs_list);
250
251 /**
252 * @brief ecore_iov_alloc - allocate sriov related resources
253 *
254 * @param p_hwfn
255 *
256 * @return enum _ecore_status_t
257 */
258 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn);
259
260 /**
261 * @brief ecore_iov_setup - setup sriov related resources
262 *
263 * @param p_hwfn
264 */
265 void ecore_iov_setup(struct ecore_hwfn *p_hwfn);
266
267 /**
268 * @brief ecore_iov_free - free sriov related resources
269 *
270 * @param p_hwfn
271 */
272 void ecore_iov_free(struct ecore_hwfn *p_hwfn);
273
274 /**
275 * @brief free sriov related memory that was allocated during hw_prepare
276 *
277 * @param p_dev
278 */
279 void ecore_iov_free_hw_info(struct ecore_dev *p_dev);
280
281 /**
282 * @brief Mark structs of vfs that have been FLR-ed.
283 *
284 * @param p_hwfn
285 * @param disabled_vfs - bitmask of all VFs on path that were FLRed
286 *
287 * @return true iff one of the PF's vfs got FLRed. false otherwise.
288 */
289 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
290 u32 *disabled_vfs);
291
292 /**
293 * @brief Search extended TLVs in request/reply buffer.
294 *
295 * @param p_hwfn
296 * @param p_tlvs_list - Pointer to tlvs list
297 * @param req_type - Type of TLV
298 *
299 * @return pointer to tlv type if found, otherwise returns NULL.
300 */
301 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
302 void *p_tlvs_list, u16 req_type);
303
304 /**
305 * @brief ecore_iov_get_vf_info - return the database of a
306 * specific VF
307 *
308 * @param p_hwfn
309 * @param relative_vf_id - relative id of the VF for which info
310 * is requested
311 * @param b_enabled_only - false iff want to access even if vf is disabled
312 *
313 * @return struct ecore_vf_info*
314 */
315 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
316 u16 relative_vf_id,
317 bool b_enabled_only);
318 #else
ecore_iov_hw_info(struct ecore_hwfn OSAL_UNUSED * p_hwfn)319 static OSAL_INLINE enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_SUCCESS;}
ecore_add_tlv(u8 OSAL_UNUSED ** offset,OSAL_UNUSED u16 type,OSAL_UNUSED u16 length)320 static OSAL_INLINE void *ecore_add_tlv(u8 OSAL_UNUSED **offset, OSAL_UNUSED u16 type, OSAL_UNUSED u16 length) {return OSAL_NULL;}
ecore_dp_tlv_list(struct ecore_hwfn OSAL_UNUSED * p_hwfn,void OSAL_UNUSED * tlvs_list)321 static OSAL_INLINE void ecore_dp_tlv_list(struct ecore_hwfn OSAL_UNUSED *p_hwfn, void OSAL_UNUSED *tlvs_list) {}
ecore_iov_alloc(struct ecore_hwfn OSAL_UNUSED * p_hwfn)322 static OSAL_INLINE enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_SUCCESS;}
ecore_iov_setup(struct ecore_hwfn OSAL_UNUSED * p_hwfn)323 static OSAL_INLINE void ecore_iov_setup(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
ecore_iov_free(struct ecore_hwfn OSAL_UNUSED * p_hwfn)324 static OSAL_INLINE void ecore_iov_free(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
ecore_iov_free_hw_info(struct ecore_dev OSAL_UNUSED * p_dev)325 static OSAL_INLINE void ecore_iov_free_hw_info(struct ecore_dev OSAL_UNUSED *p_dev) {}
ecore_crc32(u32 OSAL_UNUSED crc,u8 OSAL_UNUSED * ptr,u32 OSAL_UNUSED length)326 static OSAL_INLINE u32 ecore_crc32(u32 OSAL_UNUSED crc, u8 OSAL_UNUSED *ptr, u32 OSAL_UNUSED length) {return 0;}
ecore_iov_mark_vf_flr(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u32 OSAL_UNUSED * disabled_vfs)327 static OSAL_INLINE bool ecore_iov_mark_vf_flr(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u32 OSAL_UNUSED *disabled_vfs) {return false;}
ecore_iov_search_list_tlvs(struct ecore_hwfn OSAL_UNUSED * p_hwfn,void OSAL_UNUSED * p_tlvs_list,u16 OSAL_UNUSED req_type)328 static OSAL_INLINE void *ecore_iov_search_list_tlvs(struct ecore_hwfn OSAL_UNUSED *p_hwfn, void OSAL_UNUSED *p_tlvs_list, u16 OSAL_UNUSED req_type) {return OSAL_NULL;}
ecore_iov_get_vf_info(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u16 OSAL_UNUSED relative_vf_id,bool OSAL_UNUSED b_enabled_only)329 static OSAL_INLINE struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED relative_vf_id, bool OSAL_UNUSED b_enabled_only) {return OSAL_NULL;}
330
331 #endif
332 #endif /* __ECORE_SRIOV_H__ */
333