1 /*******************************************************************************
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * Copyright 2014 QLogic Corporation
22 * The contents of this file are subject to the terms of the
23 * QLogic End User License (the "License").
24 * You may not use this file except in compliance with the License.
25 *
26 * You can obtain a copy of the License at
27 * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
28 * QLogic_End_User_Software_License.txt
29 * See the License for the specific language governing permissions
30 * and limitations under the License.
31 *
32 *
33 * Module Description:
34 * This file contains functions that implement SR-IOV virtualization on
35 * the PF side
36 *
37 ******************************************************************************/
38
39 #ifdef VF_INVOLVED
40
41 #include "lm5710.h"
42 #include "lm_vf.h"
43 #include "577xx_int_offsets.h"
44 #include "command.h"
45
lm_pf_validate_request_header(struct _lm_device_t * pdev,lm_vf_info_t * vf_info,void * virt_buffer)46 struct vf_pf_msg_hdr *lm_pf_validate_request_header(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, void * virt_buffer)
47 {
48 struct vf_pf_msg_hdr * req_hdr = (struct vf_pf_msg_hdr *)virt_buffer;
49
50 if (req_hdr->resp_msg_offset > vf_info->pf_vf_response.request_size) {
51 req_hdr = NULL;
52 DbgMessage(pdev, FATAL, "VF[%d]: Estimated size of incoming request(%d) exceeds buffer size(%d)\n",
53 vf_info->relative_vf_id, req_hdr->resp_msg_offset, vf_info->pf_vf_response.request_size);
54 }
55 return req_hdr;
56 }
57
lm_pf_find_vf_info_by_rel_id(struct _lm_device_t * pdev,u16_t relative_vf_id)58 lm_vf_info_t * lm_pf_find_vf_info_by_rel_id(struct _lm_device_t *pdev, u16_t relative_vf_id)
59 {
60 lm_vf_info_t * vf_info = NULL;
61 if (relative_vf_id < pdev->vfs_set.number_of_enabled_vfs) {
62 vf_info = &pdev->vfs_set.vfs_array[relative_vf_id];
63 } else {
64 DbgMessage(pdev, FATAL, "lm_pf_find_vf_info_by_rel_id: VF[%d] is not enabled\n", relative_vf_id);
65 }
66 return vf_info;
67 }
68
lm_pf_find_vf_info_by_abs_id(struct _lm_device_t * pdev,u8_t abs_vf_id)69 lm_vf_info_t * lm_pf_find_vf_info_by_abs_id(struct _lm_device_t *pdev, u8_t abs_vf_id)
70 {
71 lm_vf_info_t * vf_info = NULL;
72 u16_t relative_vf_id = 0xFFFF;
73 DbgMessage(pdev, WARN, "lm_pf_find_vf_info_by_abs_id: abs_vf_id:%d(%d)\n",abs_vf_id,pdev->hw_info.sriov_info.first_vf_in_pf);
74 if (abs_vf_id < pdev->hw_info.sriov_info.first_vf_in_pf) {
75 DbgBreak();
76 }
77 relative_vf_id = abs_vf_id - (u8_t)pdev->hw_info.sriov_info.first_vf_in_pf;
78 if (relative_vf_id < pdev->vfs_set.number_of_enabled_vfs) {
79 vf_info = &pdev->vfs_set.vfs_array[relative_vf_id];
80 } else {
81 DbgMessage(pdev, FATAL, "lm_pf_find_vf_info_by_abs_id: VF[a:%d,r:%d] is not enabled\n",abs_vf_id,relative_vf_id);
82 }
83 return vf_info;
84 }
85
lm_pf_download_standard_request(struct _lm_device_t * pdev,lm_vf_info_t * vf_info,void * virt_buffer,u32_t length)86 lm_status_t lm_pf_download_standard_request(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, void* virt_buffer, u32_t length)
87 {
88 lm_status_t lm_status = LM_STATUS_SUCCESS;
89 struct vf_pf_msg_hdr * requst_hdr = NULL;
90
91 if(!(pdev && vf_info && virt_buffer)) {
92 DbgMessage(pdev, FATAL, "PFVF request with invalid parameters: %p, %p, %p, d\n", pdev,vf_info,virt_buffer,length);
93 DbgBreakIf(!DBG_BREAK_ON(UNDER_TEST));
94 return LM_STATUS_INVALID_PARAMETER;
95 }
96
97 if ((vf_info->pf_vf_response.req_resp_state != VF_PF_WAIT_FOR_START_REQUEST)
98 && (vf_info->pf_vf_response.req_resp_state != VF_PF_WAIT_FOR_NEXT_CHUNK_OF_REQUEST)) {
99 DbgMessage(pdev, FATAL, "VF[%d] does not expect PFVF request (%d)\n", vf_info->relative_vf_id, vf_info->pf_vf_response.req_resp_state);
100 return LM_STATUS_FAILURE;
101 }
102 if (vf_info->pf_vf_response.req_resp_state == VF_PF_WAIT_FOR_START_REQUEST) {
103 //requst_hdr = (struct vf_pf_msg_hdr *)virt_buffer;
104 if (length >= sizeof(struct vf_pf_msg_hdr)) {
105 requst_hdr = lm_pf_validate_request_header(pdev, vf_info, virt_buffer);
106 if (requst_hdr != NULL) {
107 vf_info->pf_vf_response.request_offset = 0;
108 }
109 } else {
110 DbgMessage(pdev, FATAL, "VF[%d] received too short(%d) PFVF request\n", vf_info->relative_vf_id, length);
111 }
112 } else {
113 requst_hdr = (struct vf_pf_msg_hdr *)vf_info->pf_vf_response.request_virt_addr;
114 }
115
116 if (requst_hdr != NULL) {
117 if (length <= (vf_info->pf_vf_response.request_size - vf_info->pf_vf_response.request_offset)) {
118 mm_memcpy((u8_t*)vf_info->pf_vf_response.request_virt_addr + vf_info->pf_vf_response.request_offset, virt_buffer, length);
119 DbgMessage(pdev, WARN, "VF[%d]: lm_pf_download_standard_request: %d bytes from offset %d\n", vf_info->relative_vf_id,
120 length, vf_info->pf_vf_response.request_offset);
121 if (requst_hdr->resp_msg_offset > (vf_info->pf_vf_response.request_offset + length)) {
122 lm_status = LM_STATUS_PENDING;
123 vf_info->pf_vf_response.request_offset += length;
124 vf_info->pf_vf_response.req_resp_state = VF_PF_WAIT_FOR_NEXT_CHUNK_OF_REQUEST;
125 } else {
126 vf_info->pf_vf_response.response_virt_addr = (u8_t*)vf_info->pf_vf_response.request_virt_addr + requst_hdr->resp_msg_offset;
127 vf_info->pf_vf_response.request_offset = 0;
128 vf_info->pf_vf_response.req_resp_state = VF_PF_REQUEST_IN_PROCESSING;
129 }
130 } else {
131 lm_status = LM_STATUS_INVALID_PARAMETER;
132 vf_info->pf_vf_response.req_resp_state = VF_PF_WAIT_FOR_START_REQUEST;
133 }
134 } else {
135 lm_status = LM_STATUS_INVALID_PARAMETER;
136 }
137 return lm_status;
138 }
lm_pf_upload_standard_response(struct _lm_device_t * pdev,lm_vf_info_t * vf_info,void * virt_buffer,u32_t length)139 lm_status_t lm_pf_upload_standard_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, void* virt_buffer, u32_t length)
140 {
141 lm_status_t lm_status = LM_STATUS_SUCCESS;
142 u32_t response_rest;
143
144 if(!(pdev && vf_info && virt_buffer)) {
145 DbgMessage(pdev, FATAL, "PFVF rresponse with invalid parameters: %p, %p, %p, d\n", pdev,vf_info,virt_buffer,length);
146 DbgBreakIf(!DBG_BREAK_ON(UNDER_TEST));
147 return LM_STATUS_INVALID_PARAMETER;
148 }
149
150 if (length < sizeof(struct pf_vf_msg_resp))
151 {
152 DbgBreakIf(!DBG_BREAK_ON(UNDER_TEST));
153 }
154
155 switch (vf_info->pf_vf_response.req_resp_state) {
156 case VF_PF_WAIT_FOR_START_REQUEST:
157 case VF_PF_WAIT_FOR_NEXT_CHUNK_OF_REQUEST:
158 DbgMessage(pdev, WARN, "VF[%d]:lm_pf_upload_standard_response (LM_STATUS_FAILURE)\n",vf_info->relative_vf_id);
159 lm_status = LM_STATUS_FAILURE;
160 break;
161 case VF_PF_REQUEST_IN_PROCESSING:
162 DbgBreakIf(!DBG_BREAK_ON(UNDER_TEST));
163 if (length > sizeof(struct pf_vf_msg_resp))
164 {
165 length = sizeof(struct pf_vf_msg_resp);
166 }
167 mm_memcpy(virt_buffer, vf_info->pf_vf_response.response_virt_addr, length);
168 break;
169 case VF_PF_RESPONSE_READY:
170 response_rest = vf_info->pf_vf_response.response_size - vf_info->pf_vf_response.response_offset;
171 if (length <= response_rest) {
172 vf_info->pf_vf_response.req_resp_state = VF_PF_WAIT_FOR_START_REQUEST;
173 } else {
174 length = response_rest;
175 }
176 mm_memcpy(virt_buffer, (u8_t*)vf_info->pf_vf_response.response_virt_addr + vf_info->pf_vf_response.response_offset, length);
177 DbgMessage(pdev, WARN, "VF[%d]:lm_pf_upload_standard_response: %d bytes from offset %d\n",vf_info->relative_vf_id,length,
178 vf_info->pf_vf_response.response_offset);
179 vf_info->pf_vf_response.response_offset += length;
180 if (vf_info->pf_vf_response.response_offset == vf_info->pf_vf_response.response_size)
181 {
182 vf_info->pf_vf_response.req_resp_state = VF_PF_WAIT_FOR_START_REQUEST;
183 }
184 break;
185 default:
186 DbgBreak();
187
188 }
189
190 return lm_status;
191 }
192
lm_pf_upload_standard_request(struct _lm_device_t * pdev,lm_vf_info_t * vf_info,lm_address_t * phys_buffer,u32_t length)193 lm_status_t lm_pf_upload_standard_request(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, lm_address_t * phys_buffer, u32_t length)
194 {
195 lm_status_t lm_status = LM_STATUS_FAILURE;
196 DbgMessage(pdev, WARN, "lm_pf_upload_standard_request is not implemented yet\n");
197 return lm_status;
198 }
199
lm_pf_allocate_vfs(struct _lm_device_t * pdev)200 lm_status_t lm_pf_allocate_vfs(struct _lm_device_t *pdev)
201 {
202 lm_status_t lm_status = LM_STATUS_SUCCESS;
203 u8_t mm_cli_idx = 0;
204 u32_t alloc_size = 0;
205 u16_t num_vfs = 0;
206
207 if CHK_NULL(pdev)
208 {
209 return LM_STATUS_INVALID_PARAMETER ;
210 }
211
212 mm_cli_idx = LM_RESOURCE_COMMON;
213 num_vfs = pdev->hw_info.sriov_info.total_vfs;
214
215 pdev->vfs_set.number_of_enabled_vfs = 0;
216 if (!num_vfs) {
217 DbgMessage(pdev, WARN, "lm_pf_allocate_vfs: SRIOV capability is not found\n");
218 return LM_STATUS_FAILURE;
219 } else {
220 DbgMessage(pdev, WARN, "lm_pf_allocate_vfs for %d VFs\n",num_vfs);
221 }
222 alloc_size = sizeof(lm_vf_info_t) * num_vfs;
223
224 pdev->vfs_set.vfs_array = mm_alloc_mem(pdev, alloc_size, mm_cli_idx);
225 if CHK_NULL(pdev->vfs_set.vfs_array)
226 {
227 DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
228 return LM_STATUS_RESOURCE ;
229 }
230 mm_mem_zero(pdev->vfs_set.vfs_array, alloc_size ) ;
231 pdev->vfs_set.req_resp_size = (((sizeof(union vf_pf_msg) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
232 + ((sizeof(union pf_vf_msg) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)) * num_vfs;
233 pdev->vfs_set.req_resp_virt_addr = mm_alloc_phys_mem(pdev, pdev->vfs_set.req_resp_size,
234 &pdev->vfs_set.req_resp_phys_addr, 0, LM_RESOURCE_COMMON);
235 if CHK_NULL(pdev->vfs_set.req_resp_virt_addr)
236 {
237 DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
238 return LM_STATUS_RESOURCE;
239 }
240
241 pdev->vfs_set.pf_fw_stats_set_data_sz = ((sizeof(struct per_queue_stats) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK) * num_vfs;
242 pdev->vfs_set.pf_fw_stats_set_virt_data = mm_alloc_phys_mem(pdev, pdev->vfs_set.pf_fw_stats_set_data_sz,
243 &pdev->vfs_set.pf_fw_stats_set_phys_data, 0, LM_RESOURCE_COMMON);
244 if CHK_NULL(pdev->vfs_set.pf_fw_stats_set_virt_data)
245 {
246 DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
247 return LM_STATUS_RESOURCE;
248 }
249
250 alloc_size = sizeof(lm_stats_fw_t) * num_vfs;
251 pdev->vfs_set.mirror_stats_fw_set = mm_alloc_mem(pdev, alloc_size, mm_cli_idx);
252 if CHK_NULL(pdev->vfs_set.mirror_stats_fw_set)
253 {
254 DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
255 return LM_STATUS_RESOURCE ;
256 }
257
258 pdev->vfs_set.rss_update_size = ((sizeof(struct eth_rss_update_ramrod_data) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK) * num_vfs;
259 pdev->vfs_set.rss_update_virt_addr = mm_alloc_phys_mem(pdev, pdev->vfs_set.rss_update_size,
260 &pdev->vfs_set.rss_update_phys_addr, 0, LM_RESOURCE_COMMON);
261 if CHK_NULL(pdev->vfs_set.rss_update_virt_addr)
262 {
263 DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
264 return LM_STATUS_RESOURCE;
265 }
266
267 if (pdev->hw_info.sriov_info.sriov_control & 0x0001) {
268 lm_status = lm_pf_init_vfs(pdev, pdev->hw_info.sriov_info.num_vfs);
269 DbgMessage(pdev, WARN, "SRIOV enable(after FLR): init %d VFs: status %d\n",pdev->hw_info.sriov_info.num_vfs,lm_status);
270 if(lm_status != LM_STATUS_SUCCESS) {
271 DbgBreak();
272 return lm_status;
273 } else {
274 u16_t vf_idx;
275 DbgMessage(pdev, WARN, "lm_pf_init_vfs returns OK\n");
276 for (vf_idx = 0; vf_idx < pdev->hw_info.sriov_info.num_vfs; vf_idx++) {
277 #if 0
278 lm_status = lm_pf_enable_vf(pdev, pdev->hw_info.sriov_info.first_vf_in_pf + vf_idx);
279 if(lm_status != LM_STATUS_SUCCESS) {
280 DbgMessage(pdev, WARN, "SRIOV enable(after FLR): enable VF[%d]: status %d\n",vf_idx,lm_status);
281 DbgBreak();
282 return lm_status;
283 }
284 #endif
285 }
286 }
287 }
288 return lm_status;
289 }
290
lm_pf_init_vfs(struct _lm_device_t * pdev,u16_t num_vfs)291 lm_status_t lm_pf_init_vfs(struct _lm_device_t *pdev, u16_t num_vfs)
292 {
293 lm_address_t mem_phys;
294 u8_t * mem_virt;
295 lm_status_t lm_status = LM_STATUS_SUCCESS;
296 u32_t req_resp_size;
297 u32_t stats_size;
298 u32_t rss_upd_size;
299 u16_t vf_idx = 0;
300
301 DbgBreakIf(!(pdev && num_vfs && pdev->vfs_set.vfs_array && pdev->vfs_set.req_resp_virt_addr && pdev->vfs_set.pf_fw_stats_set_virt_data));
302 MM_ACQUIRE_VFS_STATS_LOCK(pdev);
303 pdev->vfs_set.number_of_enabled_vfs = 0;
304 mm_mem_zero(pdev->vfs_set.vfs_array, sizeof(lm_vf_info_t)*num_vfs);
305 mm_mem_zero(pdev->vfs_set.mirror_stats_fw_set, sizeof(lm_stats_fw_t)*num_vfs);
306
307 req_resp_size = ((sizeof(union vf_pf_msg) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
308 + ((sizeof(union pf_vf_msg) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK);
309 mem_phys = pdev->vfs_set.req_resp_phys_addr;
310 mem_virt = pdev->vfs_set.req_resp_virt_addr;
311
312 for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) {
313 pdev->vfs_set.vfs_array[vf_idx].pf_vf_response.response_phys_addr = mem_phys;
314 LM_INC64(&mem_phys, req_resp_size);
315 pdev->vfs_set.vfs_array[vf_idx].pf_vf_response.request_virt_addr = mem_virt;
316 mem_virt += req_resp_size;
317 pdev->vfs_set.vfs_array[vf_idx].pf_vf_response.request_size = req_resp_size;
318 pdev->vfs_set.vfs_array[vf_idx].pf_vf_response.req_resp_state = VF_PF_WAIT_FOR_START_REQUEST;
319 pdev->vfs_set.vfs_array[vf_idx].relative_vf_id = (u8_t)vf_idx;
320 pdev->vfs_set.vfs_array[vf_idx].abs_vf_id = (u8_t)(vf_idx + pdev->hw_info.sriov_info.first_vf_in_pf);
321 }
322
323 stats_size = (sizeof(struct per_queue_stats) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
324 mem_phys = pdev->vfs_set.pf_fw_stats_set_phys_data;
325 mem_virt = pdev->vfs_set.pf_fw_stats_set_virt_data;
326 for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) {
327 pdev->vfs_set.vfs_array[vf_idx].vf_stats.pf_fw_stats_phys_data = mem_phys;
328 LM_INC64(&mem_phys, stats_size);
329 pdev->vfs_set.vfs_array[vf_idx].vf_stats.pf_fw_stats_virt_data = (struct per_queue_stats *)mem_virt;
330 mem_virt += stats_size;
331 pdev->vfs_set.vfs_array[vf_idx].vf_stats.mirror_stats_fw = pdev->vfs_set.mirror_stats_fw_set + sizeof(lm_stats_fw_t) * vf_idx;
332 }
333
334 rss_upd_size = (sizeof(struct eth_rss_update_ramrod_data) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
335 mem_phys = pdev->vfs_set.rss_update_phys_addr;
336 mem_virt = pdev->vfs_set.rss_update_virt_addr;
337 for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) {
338 pdev->vfs_set.vfs_array[vf_idx].vf_slowpath_info.slowpath_data.rss_rdata_phys = mem_phys;
339 LM_INC64(&mem_phys, rss_upd_size);
340 pdev->vfs_set.vfs_array[vf_idx].vf_slowpath_info.slowpath_data.rss_rdata = (struct eth_rss_update_ramrod_data *)mem_virt;
341 mem_virt += rss_upd_size;
342 }
343 pdev->vfs_set.number_of_enabled_vfs = num_vfs;
344 mm_mem_zero(pdev->pf_resources.free_sbs,sizeof(pdev->pf_resources.free_sbs));
345 mm_mem_zero(pdev->pf_resources.free_fw_clients,sizeof(pdev->pf_resources.free_fw_clients));
346 mm_mem_zero(pdev->pf_resources.free_sw_clients,sizeof(pdev->pf_resources.free_sw_clients));
347 MM_RELEASE_VFS_STATS_LOCK(pdev);
348 return lm_status;
349 }
350
351 #if 0
352 lm_status_t lm_pf_clear_vfs(struct _lm_device_t * pf_dev)
353 {
354 /* TODO: Clean VF Database for FLR needs? */
355 lm_status_t lm_status = LM_STATUS_SUCCESS;
356 u32_t base_vfid, vfid;
357 u16_t pretend_val;
358 u16_t ind_cids, start_cid, end_cid;
359
360 DbgMessage(pf_dev, FATAL, "vf disable\n");
361 start_cid = (((1 << LM_VF_MAX_RVFID_SIZE) | 0) << LM_VF_CID_WND_SIZE); //1st possible abs VF_ID
362 end_cid = (((1 << LM_VF_MAX_RVFID_SIZE) | 63) << LM_VF_CID_WND_SIZE); //last possible abs VF_ID
363 DbgMessage(pf_dev, FATAL, "vf disable: clear VFs connections from %d till %d\n",start_cid, end_cid);
364 for (ind_cids = MAX_ETH_CONS; ind_cids < ETH_MAX_RX_CLIENTS_E2; ind_cids++) {
365 pf_dev->vars.connections[ind_cids].con_state = LM_CON_STATE_CLOSE;
366 }
367
368 if (lm_is_function_after_flr(pf_dev)) {
369 pf_dev->vfs_set.number_of_enabled_vfs = 0;
370 DbgMessage(pf_dev, FATAL, "vf disable called on a flred function - not much we can do here... \n");
371 return LM_STATUS_SUCCESS;
372 }
373 /* if MCP does not exist for each vf in pf, need to pretend to it and disable igu vf_msix and internal vfid enable bit */
374 if (GET_FLAGS( pf_dev->params.test_mode, TEST_MODE_NO_MCP)){
375 DbgMessage(pf_dev, FATAL, "bootcode is down fix sriov disable.\n");
376 base_vfid = pf_dev->hw_info.sriov_info.first_vf_in_pf;
377 for (vfid = base_vfid; vfid < base_vfid + pf_dev->vfs_set.number_of_enabled_vfs; vfid++ ) {
378 pretend_val = ABS_FUNC_ID(pf_dev) | (1<<3) | (vfid << 4);
379 lm_pretend_func(pf_dev, pretend_val);
380
381 REG_WR(pf_dev, IGU_REG_PCI_VF_MSIX_EN, 0);
382 REG_WR(pf_dev, IGU_REG_PCI_VF_MSIX_FUNC_MASK, 0);
383 REG_WR(pf_dev, PGLUE_B_REG_INTERNAL_VFID_ENABLE, 0);
384
385 lm_pretend_func(pf_dev, ABS_FUNC_ID(pf_dev) );
386 }
387
388 /* This is a clear-on-write register, therefore we actually write 1 to the bit we want to reset */
389 REG_WR(pf_dev, 0x24d8, 1<<29);
390
391 REG_WR(pf_dev, PGLUE_B_REG_SR_IOV_DISABLED_REQUEST_CLR ,(1<<ABS_FUNC_ID(pf_dev)));
392 //REG_WR(pf_dev, PGLUE_B_REG_DISABLE_FLR_SRIOV_DISABLED, PGLUE_B_DISABLE_FLR_SRIOV_DISABLED_REG_DISABLE_SRIOV_DISABLED_REQUEST);*/
393 }
394 pf_dev->vfs_set.number_of_enabled_vfs = 0;
395 return lm_status;
396 }
397 #endif
398
lm_pf_set_vf_ctx(struct _lm_device_t * pdev,u16_t vf_id,void * ctx)399 lm_status_t lm_pf_set_vf_ctx(struct _lm_device_t *pdev, u16_t vf_id, void* ctx)
400 {
401 lm_status_t lm_status = LM_STATUS_SUCCESS;
402 lm_vf_info_t * vf_info = lm_pf_find_vf_info_by_rel_id(pdev, vf_id);
403 DbgBreakIf(!vf_info);
404 if (vf_info != NULL) {
405 vf_info->um_ctx = ctx;
406 vf_info->vf_si_state = PF_SI_WAIT_FOR_ACQUIRING_REQUEST;
407 vf_info->pf_vf_response.req_resp_state = VF_PF_WAIT_FOR_START_REQUEST;
408 } else {
409 lm_status = LM_STATUS_FAILURE;
410 }
411 return lm_status;
412 }
413
lm_pf_set_vf_stat_id(struct _lm_device_t * pdev,u16_t vf_id,u8_t base_fw_stats_id)414 lm_status_t lm_pf_set_vf_stat_id(struct _lm_device_t *pdev,
415 u16_t vf_id,
416 u8_t base_fw_stats_id)
417 {
418 lm_status_t lm_status = LM_STATUS_SUCCESS;
419 lm_vf_info_t * vf_info = lm_pf_find_vf_info_by_rel_id(pdev, vf_id);
420 DbgBreakIf(!vf_info);
421 if (vf_info != NULL) {
422 vf_info->base_fw_stats_id = base_fw_stats_id;
423 DbgMessage(pdev, WARN, "VF[%d]: Stat ID: %d(FW)\n", vf_id, base_fw_stats_id);
424 } else {
425 lm_status = LM_STATUS_FAILURE;
426 }
427 return lm_status;
428 }
429
lm_pf_is_vf_mac_set(struct _lm_device_t * pdev,u16_t vf_id)430 u8_t lm_pf_is_vf_mac_set(struct _lm_device_t *pdev, u16_t vf_id)
431 {
432 u8_t is_mac_set = FALSE;
433 lm_vf_info_t * vf_info = lm_pf_find_vf_info_by_rel_id(pdev, vf_id);
434 DbgBreakIf(!vf_info);
435 if (vf_info != NULL) {
436 is_mac_set = vf_info->is_mac_set;
437 }
438 return is_mac_set;
439 }
440
lm_pf_set_vf_base_cam_idx(struct _lm_device_t * pdev,u16_t vf_id,u32_t base_cam_idx)441 lm_status_t lm_pf_set_vf_base_cam_idx(struct _lm_device_t *pdev, u16_t vf_id, u32_t base_cam_idx)
442 {
443 lm_status_t lm_status = LM_STATUS_SUCCESS;
444 lm_vf_info_t * vf_info = lm_pf_find_vf_info_by_rel_id(pdev, vf_id);
445 DbgBreakIf(!vf_info);
446 if (vf_info != NULL) {
447 vf_info->base_cam_offset = base_cam_idx;
448 } else {
449 lm_status = LM_STATUS_FAILURE;
450 }
451 return lm_status;
452 }
453
lm_pf_get_sw_client_idx_from_cid(struct _lm_device_t * pdev,u32_t cid)454 u32_t lm_pf_get_sw_client_idx_from_cid(struct _lm_device_t *pdev, u32_t cid)
455 {
456 u32_t client_info_idx = 0xFFFFFFFF;
457 u8_t abs_vf_id = 0xff;
458 u8_t vf_q_id = 0xff;
459 lm_vf_info_t * vf_info = NULL;
460
461 DbgBreakIf(!IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev));
462
463 /* Either MP is disabled OR enabled but not a tx-only connection */
464 if (cid < MAX_RX_CHAIN(pdev))
465 {
466 client_info_idx = cid;
467 }
468 else
469 {
470 abs_vf_id = GET_ABS_VF_ID_FROM_PF_CID(cid);
471 vf_q_id = GET_VF_Q_ID_FROM_PF_CID(cid);
472 vf_info = lm_pf_find_vf_info_by_abs_id(pdev, abs_vf_id);
473 DbgBreakIf(!vf_info);
474 client_info_idx = LM_SW_VF_CLI_ID(vf_info, vf_q_id);
475 }
476 return client_info_idx;
477 }
478
lm_pf_get_fw_client_idx_from_cid(struct _lm_device_t * pdev,u32_t cid)479 u32_t lm_pf_get_fw_client_idx_from_cid(struct _lm_device_t *pdev, u32_t cid)
480 {
481 u32_t client_info_idx = 0xFFFFFFFF;
482 u8_t abs_vf_id = 0xff;
483 u8_t vf_q_id = 0xff;
484 lm_vf_info_t * vf_info = NULL;
485
486 DbgBreakIf(!IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev));
487
488 if (cid < MAX_RX_CHAIN(pdev)) {
489 client_info_idx = LM_FW_CLI_ID(pdev,cid);
490 } else {
491 abs_vf_id = GET_ABS_VF_ID_FROM_PF_CID(cid);
492 vf_q_id = GET_VF_Q_ID_FROM_PF_CID(cid);
493 vf_info = lm_pf_find_vf_info_by_abs_id(pdev, abs_vf_id);
494 DbgBreakIf(!vf_info);
495 client_info_idx = LM_FW_VF_CLI_ID(vf_info, vf_q_id);
496 }
497 return client_info_idx;
498 }
499
lm_vf_get_free_resource(u32_t * resource,u8_t min_num,u8_t max_num,u8_t num)500 u8_t lm_vf_get_free_resource(u32_t * resource, u8_t min_num, u8_t max_num, u8_t num)
501 {
502 u8_t i,j;
503 u8_t base_value = 0xff;
504
505 for (i = min_num; i <= (max_num - num); i++) {
506 u8_t ind,offset;
507 for (j = 0; j < num; j++) {
508 ind = (i + j) / ELEM_OF_RES_ARRAY_SIZE_IN_BITS;
509 offset = (i+j) % ELEM_OF_RES_ARRAY_SIZE_IN_BITS;
510 if (resource[ind] & (1 << offset)) {
511 break;
512 }
513 }
514 if (j == num) {
515 base_value = i;
516 break;
517 }
518 }
519 return base_value;
520 }
521
lm_vf_acquire_resource(u32_t * presource,u8_t base_value,u8_t num)522 void lm_vf_acquire_resource(u32_t * presource, u8_t base_value, u8_t num)
523 {
524 int i,ind,offset;
525 for (i = base_value; i < (base_value + num); i++) {
526 ind = i / ELEM_OF_RES_ARRAY_SIZE_IN_BITS;
527 offset = i % ELEM_OF_RES_ARRAY_SIZE_IN_BITS;
528 presource[ind] |= (1 << offset);
529 }
530
531 return;
532 }
533
lm_vf_get_resource_value(u32_t * presource,u8_t base_value)534 u8_t lm_vf_get_resource_value(u32_t * presource, u8_t base_value)
535 {
536 u8_t value;
537 int ind,offset;
538
539 ind = base_value / ELEM_OF_RES_ARRAY_SIZE_IN_BITS;
540 offset = base_value % ELEM_OF_RES_ARRAY_SIZE_IN_BITS;
541 value = presource[ind] & (1 << offset);
542
543 return value;
544 }
545
lm_vf_release_resource(u32_t * presource,u8_t base_value,u8_t num)546 void lm_vf_release_resource(u32_t * presource, u8_t base_value, u8_t num)
547 {
548 int i,ind,offset;
549 for (i = base_value; i < (base_value + num); i++) {
550 ind = i / ELEM_OF_RES_ARRAY_SIZE_IN_BITS;
551 offset = i % ELEM_OF_RES_ARRAY_SIZE_IN_BITS;
552 presource[ind] &= ~(1 << offset);
553 }
554
555 return;
556 }
557
558 #ifndef ARRAY_SIZE
559 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
560 #endif
561
lm_pf_acquire_vf_chains_resources(struct _lm_device_t * pdev,u16_t vf_id,u32_t num_chains)562 u8_t lm_pf_acquire_vf_chains_resources(struct _lm_device_t *pdev, u16_t vf_id, u32_t num_chains)
563 {
564 u32_t chain_idx;
565 u8_t min_ndsb;
566 u8_t min_fw_client, current_fw_client;
567 u8_t min_sw_client = MAX_RX_CHAIN(pdev);
568 u8_t client_info_entries;
569
570 lm_vf_info_t * vf_info = lm_pf_find_vf_info_by_rel_id(pdev, vf_id);
571
572 MM_ACQUIRE_PF_LOCK(pdev);
573 vf_info->num_allocated_chains = 0;
574 min_ndsb = pdev->params.max_pf_sb_cnt;
575 min_fw_client = pdev->params.max_pf_fw_client_cnt;
576 DbgBreakIf(pdev->params.fw_client_cnt <= pdev->params.max_pf_fw_client_cnt);
577 client_info_entries = pdev->params.fw_client_cnt;
578
579 if (min_sw_client < pdev->params.max_pf_fw_client_cnt)
580 {
581 min_sw_client = pdev->params.max_pf_fw_client_cnt;
582 }
583 for (chain_idx = 0; chain_idx < num_chains; chain_idx++) {
584 vf_info->vf_chains[chain_idx].sw_ndsb = lm_vf_get_free_resource(pdev->pf_resources.free_sbs, min_ndsb,
585 pdev->params.fw_sb_cnt, 1);
586 if (vf_info->vf_chains[chain_idx].sw_ndsb == 0xFF) {
587 DbgMessage(pdev, FATAL, "No SBs from %d to %d\n",min_ndsb,pdev->params.fw_sb_cnt);
588 break;
589 }
590 vf_info->vf_chains[chain_idx].fw_ndsb = LM_FW_SB_ID(pdev,vf_info->vf_chains[chain_idx].sw_ndsb);
591 min_ndsb = vf_info->vf_chains[chain_idx].sw_ndsb + 1;
592 #if 0
593 current_fw_client = lm_vf_get_free_resource(pdev->pf_resources.free_fw_clients, min_fw_client,
594 pdev->params.fw_client_cnt, 1);
595 if (current_fw_client == 0xFF) {
596
597 DbgMessage(pdev, FATAL, "No FW Clients from %d to %d\n",min_fw_client,pdev->params.fw_client_cnt);
598 break;
599 }
600 #endif
601 current_fw_client = vf_info->vf_chains[chain_idx].sw_client_id = lm_vf_get_free_resource(pdev->pf_resources.free_sw_clients, min_sw_client, client_info_entries, 1);
602 if (vf_info->vf_chains[chain_idx].sw_client_id == 0xFF) {
603
604 DbgMessage(pdev, FATAL, "No Clients from %d to %d\n",min_sw_client,client_info_entries);
605 break;
606 }
607
608 vf_info->vf_chains[chain_idx].fw_client_id = LM_FW_CLI_ID(pdev,current_fw_client);
609 vf_info->vf_chains[chain_idx].fw_qzone_id = LM_FW_DHC_QZONE_ID(pdev, vf_info->vf_chains[chain_idx].sw_ndsb);
610
611 min_fw_client = current_fw_client + 1;
612 min_sw_client = vf_info->vf_chains[chain_idx].sw_client_id + 1;
613 vf_info->num_allocated_chains++;
614 }
615 if (vf_info->num_allocated_chains) {
616 for (chain_idx = 0; chain_idx < vf_info->num_allocated_chains; chain_idx++) {
617 lm_vf_acquire_resource(pdev->pf_resources.free_sbs, vf_info->vf_chains[chain_idx].sw_ndsb, 1);
618 lm_vf_acquire_resource(pdev->pf_resources.free_fw_clients, vf_info->vf_chains[chain_idx].fw_client_id - pdev->params.base_fw_client_id, 1);
619 lm_vf_acquire_resource(pdev->pf_resources.free_sw_clients, vf_info->vf_chains[chain_idx].sw_client_id, 1);
620 DbgMessage(pdev, WARN, "VF[%d(rel)] received resourses for chain %d: SW_NDSB=%d, FW_CLIENT_ID=%d, SW_CLIENT_ID=%d\n",
621 vf_id,
622 chain_idx,
623 vf_info->vf_chains[chain_idx].sw_ndsb,
624 vf_info->vf_chains[chain_idx].fw_client_id - pdev->params.base_fw_client_id,
625 vf_info->vf_chains[chain_idx].sw_client_id);
626 }
627 }
628
629 MM_RELEASE_PF_LOCK(pdev);
630 return vf_info->num_allocated_chains;
631 }
632
lm_pf_release_vf_chains_resources(struct _lm_device_t * pdev,u16_t vf_id)633 void lm_pf_release_vf_chains_resources(struct _lm_device_t *pdev, u16_t vf_id)
634 {
635 u8_t num_chains, chain_idx;
636 lm_vf_info_t * vf_info = lm_pf_find_vf_info_by_rel_id(pdev, vf_id);
637
638 num_chains = vf_info->num_allocated_chains;
639 if (!vf_info->was_malicious)
640 {
641 MM_ACQUIRE_PF_LOCK(pdev);
642 for (chain_idx = 0; chain_idx < num_chains; chain_idx++)
643 {
644 lm_vf_release_resource(pdev->pf_resources.free_sbs, vf_info->vf_chains[chain_idx].sw_ndsb, 1);
645 lm_vf_release_resource(pdev->pf_resources.free_fw_clients, vf_info->vf_chains[chain_idx].fw_client_id - pdev->params.base_fw_client_id, 1);
646 lm_vf_release_resource(pdev->pf_resources.free_sw_clients, vf_info->vf_chains[chain_idx].sw_client_id, 1);
647 }
648 MM_RELEASE_PF_LOCK(pdev);
649 }
650 return;
651 }
652
lm_pf_release_separate_vf_chain_resources(struct _lm_device_t * pdev,u16_t vf_id,u8_t chain_num)653 void lm_pf_release_separate_vf_chain_resources(struct _lm_device_t *pdev, u16_t vf_id, u8_t chain_num)
654 {
655 lm_vf_info_t * vf_info = lm_pf_find_vf_info_by_rel_id(pdev, vf_id);
656
657 if (!vf_info->was_malicious)
658 {
659 if (chain_num < vf_info->num_allocated_chains)
660 {
661 MM_ACQUIRE_PF_LOCK(pdev);
662 lm_vf_release_resource(pdev->pf_resources.free_sbs, vf_info->vf_chains[chain_num].sw_ndsb, 1);
663 lm_vf_release_resource(pdev->pf_resources.free_fw_clients, vf_info->vf_chains[chain_num].fw_client_id - pdev->params.base_fw_client_id, 1);
664 lm_vf_release_resource(pdev->pf_resources.free_sw_clients, vf_info->vf_chains[chain_num].sw_client_id, 1);
665 MM_RELEASE_PF_LOCK(pdev);
666 }
667 }
668 return;
669 }
670
lm_pf_init_vf_client(struct _lm_device_t * pdev,lm_vf_info_t * vf_info,u8_t q_id)671 void lm_pf_init_vf_client(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, u8_t q_id)
672 {
673
674 ecore_init_mac_obj(pdev,
675 &pdev->client_info[LM_SW_VF_CLI_ID(vf_info,q_id)].mac_obj,
676 LM_FW_VF_CLI_ID(vf_info,q_id),
677 LM_VF_Q_ID_TO_PF_CID(pdev, vf_info, q_id),
678 FUNC_ID(pdev),
679 LM_SLOWPATH(pdev, mac_rdata)[LM_CLI_IDX_NDIS],
680 LM_SLOWPATH_PHYS(pdev, mac_rdata)[LM_CLI_IDX_NDIS],
681 ECORE_FILTER_MAC_PENDING,
682 (unsigned long *)&pdev->client_info[LM_SW_VF_CLI_ID(vf_info,q_id)].sp_mac_state,
683 ECORE_OBJ_TYPE_RX_TX,
684 &pdev->slowpath_info.macs_pool);
685
686 if (!CHIP_IS_E1(pdev))
687 {
688 ecore_init_vlan_mac_obj(pdev,
689 &pdev->client_info[LM_SW_VF_CLI_ID(vf_info,q_id)].mac_vlan_obj,
690 LM_FW_VF_CLI_ID(vf_info,q_id),
691 LM_VF_Q_ID_TO_PF_CID(pdev, vf_info, q_id),
692 FUNC_ID(pdev),
693 LM_SLOWPATH(pdev, mac_rdata)[LM_CLI_IDX_NDIS],
694 LM_SLOWPATH_PHYS(pdev, mac_rdata)[LM_CLI_IDX_NDIS],
695 ECORE_FILTER_VLAN_MAC_PENDING,
696 (unsigned long *)&pdev->client_info[LM_SW_VF_CLI_ID(vf_info,q_id)].sp_mac_state,
697 ECORE_OBJ_TYPE_RX_TX,
698 &pdev->slowpath_info.macs_pool,
699 &pdev->slowpath_info.vlans_pool);
700 }
701
702 return;
703 }
704
lm_pf_init_vf_slow_path(struct _lm_device_t * pdev,lm_vf_info_t * vf_info)705 void lm_pf_init_vf_slow_path(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
706 {
707
708 ecore_init_rss_config_obj(pdev,
709 &vf_info->vf_slowpath_info.rss_conf_obj,
710 LM_FW_VF_CLI_ID(vf_info, LM_SW_LEADING_RSS_CID(pdev)),
711 LM_VF_Q_ID_TO_PF_CID(pdev, vf_info,LM_SW_LEADING_RSS_CID(pdev)),
712 vf_info->abs_vf_id,
713 8 + vf_info->abs_vf_id,
714 LM_VF_SLOWPATH(vf_info, rss_rdata),
715 LM_VF_SLOWPATH_PHYS(vf_info, rss_rdata),
716 ECORE_FILTER_RSS_CONF_PENDING,
717 (unsigned long *)&vf_info->vf_slowpath_info.sp_rss_state,
718 ECORE_OBJ_TYPE_RX);
719 vf_info->was_malicious = FALSE;
720 return;
721 }
722
lm_pf_vf_wait_for_stats_ready(struct _lm_device_t * pdev,lm_vf_info_t * vf_info)723 lm_status_t lm_pf_vf_wait_for_stats_ready(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
724 {
725 return lm_wait_state_change(pdev, &vf_info->vf_stats.vf_stats_state, VF_STATS_REQ_READY);
726 }
727
lm_pf_init_vf_client_init_data(struct _lm_device_t * pdev,lm_vf_info_t * vf_info,u8_t q_id,struct sw_vf_pf_rxq_params * rxq_params,struct sw_vf_pf_txq_params * txq_params)728 lm_status_t lm_pf_init_vf_client_init_data(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, u8_t q_id,
729 struct sw_vf_pf_rxq_params * rxq_params,
730 struct sw_vf_pf_txq_params * txq_params)
731 {
732 lm_status_t lm_status = LM_STATUS_SUCCESS;
733 struct client_init_ramrod_data *
734 client_init_data_virt = NULL;
735 lm_address_t q_addr;
736 u16_t client_interrupt_moderation_level;
737
738 client_init_data_virt = &(pdev->client_info[LM_SW_VF_CLI_ID(vf_info,q_id)].client_init_data_virt->init_data);
739
740 if CHK_NULL(client_init_data_virt)
741 {
742 return LM_STATUS_FAILURE;
743 }
744
745 /* General Structure */
746
747 client_init_data_virt->general.activate_flg = 1;
748 client_init_data_virt->general.client_id = LM_FW_VF_CLI_ID(vf_info, q_id);
749 client_init_data_virt->general.is_fcoe_flg = FALSE;
750 client_init_data_virt->general.statistics_counter_id = LM_FW_VF_STATS_CNT_ID(vf_info);
751 client_init_data_virt->general.statistics_en_flg = TRUE;
752 client_init_data_virt->general.sp_client_id = LM_FW_CLI_ID(pdev, LM_SW_LEADING_RSS_CID(pdev));
753 client_init_data_virt->general.mtu = mm_cpu_to_le16((u16_t)rxq_params->mtu);
754 client_init_data_virt->general.func_id = 8 + vf_info->abs_vf_id;
755 client_init_data_virt->general.cos = 0;//The connection cos, if applicable only if STATIC_COS is set
756 client_init_data_virt->general.traffic_type = LLFC_TRAFFIC_TYPE_NW;
757 client_init_data_virt->general.fp_hsi_ver = vf_info->fp_hsi_ver;
758
759 client_init_data_virt->rx.status_block_id = LM_FW_VF_SB_ID(vf_info,q_id); //LM_FW_VF_SB_ID(vf_info, LM_VF_Q_TO_SB_ID(vf_info,q_id));
760 client_init_data_virt->rx.client_qzone_id = LM_FW_VF_QZONE_ID(vf_info, q_id);
761 // client_init_data_virt->rx.tpa_en_flg = FALSE;
762 client_init_data_virt->rx.max_agg_size = mm_cpu_to_le16(0); /* TPA related only */;
763 client_init_data_virt->rx.extra_data_over_sgl_en_flg = FALSE;
764 if (rxq_params->flags & SW_VFPF_QUEUE_FLG_CACHE_ALIGN) {
765 client_init_data_virt->rx.cache_line_alignment_log_size = rxq_params->cache_line_log;
766 } else {
767 client_init_data_virt->rx.cache_line_alignment_log_size = (u8_t)LOG2(CACHE_LINE_SIZE/* TODO mm_get_cache_line_alignment()*/);
768 }
769
770 if (pdev->params.int_coalesing_mode == LM_INT_COAL_PERIODIC_SYNC)
771 {
772 client_interrupt_moderation_level = vf_info->current_interrupr_moderation;
773 if ((rxq_params->flags & SW_VFPF_QUEUE_FLG_DHC)) {
774 client_init_data_virt->rx.enable_dynamic_hc = TRUE;
775 } else {
776 client_init_data_virt->rx.enable_dynamic_hc = FALSE;
777 if (client_interrupt_moderation_level == VPORT_INT_MOD_ADAPTIVE)
778 {
779 client_interrupt_moderation_level = VPORT_INT_MOD_UNDEFINED;
780 }
781 }
782 }
783 else
784 {
785 client_init_data_virt->rx.enable_dynamic_hc = FALSE;
786 client_interrupt_moderation_level = VPORT_INT_MOD_OFF;
787 }
788 lm_pf_update_vf_ndsb(pdev, vf_info, q_id, client_interrupt_moderation_level);
789
790 client_init_data_virt->rx.outer_vlan_removal_enable_flg = IS_MULTI_VNIC(pdev)? TRUE: FALSE;
791 client_init_data_virt->rx.inner_vlan_removal_enable_flg = TRUE; //= !pdev->params.keep_vlan_tag;
792
793 client_init_data_virt->rx.state = CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN; /*If VF L2 client established without "accept_any_vlan" flag, the firmware is trying */
794 client_init_data_virt->tx.state = CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN; /*to match packets with both MAC and VLAN, fails and send the packet to
795 the network (transfer leakage).
796 The "accept_any_vlan" is only set later in the "set rx mode" command,
797 and then the TX-switching is working again.*/
798
799 q_addr.as_u64 = rxq_params->rxq_addr;
800 client_init_data_virt->rx.bd_page_base.lo= mm_cpu_to_le32(q_addr.as_u32.low);
801 client_init_data_virt->rx.bd_page_base.hi= mm_cpu_to_le32(q_addr.as_u32.high);
802
803 q_addr.as_u64 = rxq_params->rcq_addr;
804 client_init_data_virt->rx.cqe_page_base.lo = mm_cpu_to_le32(q_addr.as_u32.low);
805 client_init_data_virt->rx.cqe_page_base.hi = mm_cpu_to_le32(q_addr.as_u32.high);
806
807
808 if (!q_id) {
809 client_init_data_virt->rx.is_leading_rss = TRUE;
810 }
811 client_init_data_virt->rx.is_approx_mcast = TRUE;
812
813 client_init_data_virt->rx.approx_mcast_engine_id = 8 + vf_info->abs_vf_id;
814 client_init_data_virt->rx.rss_engine_id = 8 + vf_info->abs_vf_id;
815
816 client_init_data_virt->rx.max_bytes_on_bd = mm_cpu_to_le16((rxq_params->buf_sz) - (pdev)->params.rcv_buffer_offset);
817
818
819 /* Status block index init we do for Rx + Tx together so that we ask which cid we are only once */
820 client_init_data_virt->rx.rx_sb_index_number = rxq_params->sb_index;
821 client_init_data_virt->tx.tx_sb_index_number = txq_params->sb_index;
822
823 /* TX Data (remaining , sb index above...) */
824 /* ooo cid doesn't have a tx chain... */
825 q_addr.as_u64 = txq_params->txq_addr;
826 client_init_data_virt->tx.tx_bd_page_base.hi = mm_cpu_to_le32(q_addr.as_u32.high);
827 client_init_data_virt->tx.tx_bd_page_base.lo = mm_cpu_to_le32(q_addr.as_u32.low);
828
829 client_init_data_virt->tx.tx_status_block_id = LM_FW_VF_SB_ID(vf_info,txq_params->vf_sb);
830
831 client_init_data_virt->tx.enforce_security_flg = TRUE;//FALSE; /* TBD: turn on for KVM VF? */
832
833 /* Tx Switching... */
834 client_init_data_virt->tx.tss_leading_client_id = LM_FW_VF_CLI_ID(vf_info, 0);
835 #ifdef __LINUX
836 client_init_data_virt->tx.tx_switching_flg = FALSE;
837 client_init_data_virt->tx.anti_spoofing_flg = FALSE;
838 #else
839 client_init_data_virt->tx.tx_switching_flg = TRUE;
840 client_init_data_virt->tx.anti_spoofing_flg = TRUE;
841 #endif
842 /* FC */
843 #if 0
844 if (pdev->params.l2_fw_flow_ctrl)
845 {
846 u16_t low_thresh = mm_cpu_to_le16(min(250, ((u16_t)(LM_RXQ(pdev, cid).common.desc_cnt))/4));
847 u16_t high_thresh = mm_cpu_to_le16(min(350, ((u16_t)(LM_RXQ(pdev, cid).common.desc_cnt))/2));
848
849 client_init_data_virt->fc.cqe_pause_thr_low = low_thresh;
850 client_init_data_virt->fc.bd_pause_thr_low = low_thresh;
851 client_init_data_virt->fc.sge_pause_thr_low = 0;
852 client_init_data_virt->fc.rx_cos_mask = 1;
853 client_init_data_virt->fc.cqe_pause_thr_high = high_thresh;
854 client_init_data_virt->fc.bd_pause_thr_high = high_thresh;
855 client_init_data_virt->fc.sge_pause_thr_high = 0;
856 }
857 #endif
858
859 client_init_data_virt->tx.refuse_outband_vlan_flg = 0;
860
861 // for encapsulated packets
862 // the hw ip id will be the inner ip id, the hw will incremnet the inner ip id
863 // this means that if the outer ip header is ipv4, its ip id will not be incremented.
864 client_init_data_virt->tx.tunnel_lso_inc_ip_id = INT_HEADER;
865 // In case of non-Lso encapsulated packets with L4 checksum offload, the pseudo checksum location - on BD
866 client_init_data_virt->tx.tunnel_non_lso_pcsum_location = CSUM_ON_BD;
867 // In case of non-Lso encapsulated packets with outer L3 ip checksum offload, the pseudo checksum location - on BD
868 client_init_data_virt->tx.tunnel_non_lso_outer_ip_csum_location = CSUM_ON_BD;
869
870 return lm_status;
871 }
872
lm_pf_is_sriov_valid(struct _lm_device_t * pdev)873 u8_t lm_pf_is_sriov_valid(struct _lm_device_t *pdev)
874 {
875 u8_t res = FALSE;
876 if (IS_PFDEV(pdev)) {
877 if (pdev->hw_info.sriov_info.total_vfs) {
878 DbgMessage(pdev, FATAL, "The card has valid SRIOV caps\n");
879 res = TRUE;
880 } else {
881 DbgMessage(pdev, FATAL, "The card has not valid SRIOV caps\n");
882 res = FALSE;
883 }
884 } else {
885 DbgMessage(pdev, FATAL, "Request of validity SRIOV caps is not applicable for VF\n");
886 res = FALSE;
887 }
888 return res;
889 }
890
lm_pf_allocate_vf_igu_sbs(lm_device_t * pdev,lm_vf_info_t * vf_info,u8_t num_of_igu_sbs)891 u8_t lm_pf_allocate_vf_igu_sbs(lm_device_t *pdev, lm_vf_info_t *vf_info, u8_t num_of_igu_sbs)
892 {
893 u8_t num_of_vf_desired_vf_chains;
894 u8_t idx;
895 u8_t starting_from = 0;
896 if ((pdev == NULL) || (vf_info == NULL))
897 {
898 DbgBreak();
899 return 0;
900 }
901 vf_info->num_igu_sb_available = lm_pf_get_vf_available_igu_blocks(pdev);
902 if (vf_info->num_igu_sb_available == 0)
903 {
904 return 0;
905 }
906
907 num_of_vf_desired_vf_chains = min(vf_info->num_igu_sb_available, LM_VF_CHAINS_PER_PF(pdev));
908 num_of_vf_desired_vf_chains = min(num_of_vf_desired_vf_chains, num_of_igu_sbs);
909 MM_ACQUIRE_PF_LOCK(pdev);
910 for (idx = 0; idx < num_of_vf_desired_vf_chains; idx++)
911 {
912 starting_from = vf_info->vf_chains[idx].igu_sb_id = lm_pf_get_next_free_igu_block_id(pdev, starting_from);
913 if (starting_from == 0xFF)
914 {
915 break;
916 }
917 lm_pf_acquire_vf_igu_block(pdev, starting_from, vf_info->abs_vf_id, idx);
918 }
919 MM_RELEASE_PF_LOCK(pdev);
920 num_of_vf_desired_vf_chains = idx;
921 #if 0
922 vf_info->num_igu_sb_available = pdev->hw_info.intr_blk_info.igu_info.vf_igu_info[vf_info->abs_vf_id].igu_sb_cnt;
923 num_of_vf_desired_vf_chains = min(vf_info->num_igu_sb_available, num_of_igu_sbs);
924 for (idx = 0; idx < num_of_vf_desired_vf_chains; idx++)
925 {
926 vf_info->vf_chains[idx].igu_sb_id = pdev->hw_info.intr_blk_info.igu_info.vf_igu_info[vf_info->abs_vf_id].igu_base_sb + idx;
927 }
928 #endif
929 return num_of_vf_desired_vf_chains;
930 }
931
lm_pf_release_vf_igu_sbs(struct _lm_device_t * pdev,struct _lm_vf_info_t * vf_info)932 void lm_pf_release_vf_igu_sbs(struct _lm_device_t *pdev, struct _lm_vf_info_t *vf_info)
933 {
934 return;
935 }
936
lm_pf_get_max_number_of_vf_igu_sbs(lm_device_t * pdev)937 u8_t lm_pf_get_max_number_of_vf_igu_sbs(lm_device_t *pdev)
938 {
939 u8_t max_igu_sbs = pdev->hw_info.sriov_info.total_vfs
940 * pdev->hw_info.intr_blk_info.igu_info.vf_igu_info[0].igu_sb_cnt;
941 return max_igu_sbs;
942 }
943
lm_pf_get_next_free_igu_block_id(lm_device_t * pdev,u8_t starting_from)944 u8_t lm_pf_get_next_free_igu_block_id(lm_device_t *pdev, u8_t starting_from)
945 {
946 u8_t igu_sb_idx;
947 u8_t igu_free_sb_id = 0xFF;
948 for (igu_sb_idx = starting_from; igu_sb_idx < IGU_REG_MAPPING_MEMORY_SIZE; igu_sb_idx++ )
949 {
950 lm_igu_block_t * lm_igu_sb = &IGU_SB(pdev,igu_sb_idx);
951 if (lm_igu_sb->status & LM_IGU_STATUS_AVAILABLE)
952 {
953 if (!(lm_igu_sb->status & LM_IGU_STATUS_PF) && !(lm_igu_sb->status & LM_IGU_STATUS_BUSY))
954 {
955 igu_free_sb_id = igu_sb_idx;
956 break;
957 }
958 }
959 }
960 return igu_free_sb_id;
961 }
962
lm_pf_clear_vf_igu_blocks(lm_device_t * pdev)963 void lm_pf_clear_vf_igu_blocks(lm_device_t *pdev)
964 {
965 u8_t igu_sb_idx;
966 for (igu_sb_idx = 0; igu_sb_idx < IGU_REG_MAPPING_MEMORY_SIZE; igu_sb_idx++ )
967 {
968 lm_igu_block_t * lm_igu_sb = &IGU_SB(pdev,igu_sb_idx);
969 if (lm_igu_sb->status & LM_IGU_STATUS_AVAILABLE)
970 {
971 if (!(lm_igu_sb->status & LM_IGU_STATUS_PF))
972 {
973 REG_WR(PFDEV(pdev), IGU_REG_MAPPING_MEMORY + 4*igu_sb_idx, 0);
974 lm_igu_sb->vf_number = lm_igu_sb->vector_number = 0xFF;
975 lm_igu_sb->status &= ~LM_IGU_STATUS_BUSY;
976 }
977 }
978 }
979 return;
980 }
981
lm_pf_release_vf_igu_block(lm_device_t * pdev,u8_t igu_sb_idx)982 u8_t lm_pf_release_vf_igu_block(lm_device_t *pdev, u8_t igu_sb_idx)
983 {
984 lm_igu_block_t * lm_igu_sb = &IGU_SB(pdev,igu_sb_idx);
985 u8_t res = FALSE;
986
987 if (!(lm_igu_sb->status & LM_IGU_STATUS_PF) && (lm_igu_sb->status & LM_IGU_STATUS_AVAILABLE) && (igu_sb_idx < IGU_REG_MAPPING_MEMORY_SIZE))
988 {
989 REG_WR(PFDEV(pdev), IGU_REG_MAPPING_MEMORY + 4*igu_sb_idx, 0);
990 lm_igu_sb->vf_number = lm_igu_sb->vector_number = 0xFF;
991 lm_igu_sb->status &= ~LM_IGU_STATUS_BUSY;
992 res = TRUE;
993 }
994 else
995 {
996 DbgBreak();
997 }
998 return res;
999 }
1000
lm_pf_acquire_vf_igu_block(lm_device_t * pdev,u8_t igu_sb_idx,u8_t abs_vf_id,u8_t vector_number)1001 u8_t lm_pf_acquire_vf_igu_block(lm_device_t *pdev, u8_t igu_sb_idx, u8_t abs_vf_id, u8_t vector_number)
1002 {
1003 lm_igu_block_t * lm_igu_sb = &IGU_SB(pdev,igu_sb_idx);
1004 u8_t res = FALSE;
1005 u32_t value = 0;
1006
1007 if (!(lm_igu_sb->status & LM_IGU_STATUS_PF) && (lm_igu_sb->status & LM_IGU_STATUS_AVAILABLE)
1008 && !(lm_igu_sb->status & LM_IGU_STATUS_BUSY) && (igu_sb_idx < IGU_REG_MAPPING_MEMORY_SIZE))
1009 {
1010 value = (IGU_REG_MAPPING_MEMORY_FID_MASK & (abs_vf_id << IGU_REG_MAPPING_MEMORY_FID_SHIFT))
1011 | (IGU_REG_MAPPING_MEMORY_VECTOR_MASK & (vector_number << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT))
1012 | IGU_REG_MAPPING_MEMORY_VALID;
1013 REG_WR(PFDEV(pdev), IGU_REG_MAPPING_MEMORY + 4*igu_sb_idx, value);
1014 lm_igu_sb->vf_number = abs_vf_id;
1015 lm_igu_sb->vector_number = vector_number;
1016 lm_igu_sb->status |= LM_IGU_STATUS_BUSY;
1017 res = TRUE;
1018 }
1019 else
1020 {
1021 DbgBreak();
1022 }
1023 return res;
1024 }
1025
lm_pf_get_vf_available_igu_blocks(lm_device_t * pdev)1026 u8_t lm_pf_get_vf_available_igu_blocks(lm_device_t *pdev)
1027 {
1028 u8_t igu_sb_idx;
1029 u8_t available_igu_sbs = 0;
1030 for (igu_sb_idx = 0; igu_sb_idx < IGU_REG_MAPPING_MEMORY_SIZE; igu_sb_idx++ )
1031 {
1032 lm_igu_block_t * lm_igu_sb = &IGU_SB(pdev,igu_sb_idx);
1033 if (lm_igu_sb->status & LM_IGU_STATUS_AVAILABLE)
1034 {
1035 if (!(lm_igu_sb->status & LM_IGU_STATUS_PF) && !(lm_igu_sb->status & LM_IGU_STATUS_BUSY))
1036 {
1037 available_igu_sbs++;
1038 }
1039 }
1040 }
1041 return available_igu_sbs;
1042 }
1043
lm_pf_update_vf_default_vlan(IN struct _lm_device_t * pdev,IN struct _lm_vf_info_t * vf_info,IN const u16_t silent_vlan_value,IN const u16_t silent_vlan_mask,IN const u8_t silent_vlan_removal_flg,IN const u8_t silent_vlan_change_flg,IN const u16_t default_vlan,IN const u8_t default_vlan_enable_flg,IN const u8_t default_vlan_change_flg)1044 lm_status_t lm_pf_update_vf_default_vlan(IN struct _lm_device_t *pdev, IN struct _lm_vf_info_t * vf_info,
1045 IN const u16_t silent_vlan_value,
1046 IN const u16_t silent_vlan_mask,
1047 IN const u8_t silent_vlan_removal_flg,
1048 IN const u8_t silent_vlan_change_flg,
1049 IN const u16_t default_vlan,
1050 IN const u8_t default_vlan_enable_flg,
1051 IN const u8_t default_vlan_change_flg)
1052 {
1053 struct client_update_ramrod_data * client_update_data_virt = NULL;
1054 lm_status_t lm_status = LM_STATUS_FAILURE;
1055 u32_t vf_cid_of_pf = 0;
1056 u8_t type = 0;
1057 u8_t q_idx = 0;
1058
1059
1060 for (q_idx = 0; q_idx < vf_info->vf_si_num_of_active_q; q_idx++) {
1061 client_update_data_virt = pdev->client_info[LM_SW_VF_CLI_ID(vf_info, q_idx)].update.data_virt;
1062 if CHK_NULL(client_update_data_virt)
1063 {
1064 DbgBreak();
1065 return LM_STATUS_FAILURE;
1066 }
1067 mm_mem_zero((void *) client_update_data_virt , sizeof(struct client_update_ramrod_data));
1068
1069 MM_ACQUIRE_ETH_CON_LOCK(pdev);
1070
1071 DbgBreakIf( LM_CLI_UPDATE_NOT_USED != pdev->client_info[LM_SW_VF_CLI_ID(vf_info, q_idx)].update.state);
1072
1073 pdev->client_info[LM_SW_VF_CLI_ID(vf_info, q_idx)].update.state = LM_CLI_UPDATE_USED;
1074
1075 client_update_data_virt->client_id = LM_FW_VF_CLI_ID(vf_info, q_idx);
1076 client_update_data_virt->func_id = 8 + vf_info->abs_vf_id;
1077
1078 client_update_data_virt->silent_vlan_value = mm_cpu_to_le16(silent_vlan_value);
1079 client_update_data_virt->silent_vlan_mask = mm_cpu_to_le16(silent_vlan_mask);
1080 client_update_data_virt->silent_vlan_removal_flg = silent_vlan_removal_flg;
1081 client_update_data_virt->silent_vlan_change_flg = silent_vlan_change_flg;
1082
1083 client_update_data_virt->default_vlan = mm_cpu_to_le16(default_vlan);
1084 client_update_data_virt->default_vlan_enable_flg = default_vlan_enable_flg;
1085 client_update_data_virt->default_vlan_change_flg = default_vlan_change_flg;
1086
1087 client_update_data_virt->refuse_outband_vlan_flg = 0;
1088 client_update_data_virt->refuse_outband_vlan_change_flg = 0;
1089
1090 vf_cid_of_pf = LM_VF_Q_ID_TO_PF_CID(pdev, vf_info, q_idx);
1091 type = (ETH_CONNECTION_TYPE | ((8 + vf_info->abs_vf_id) << SPE_HDR_T_FUNCTION_ID_SHIFT));
1092
1093 lm_status = lm_sq_post(pdev,
1094 vf_cid_of_pf,
1095 RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
1096 CMD_PRIORITY_MEDIUM,
1097 type,
1098 pdev->client_info[LM_SW_VF_CLI_ID(vf_info, q_idx)].update.data_phys.as_u64);
1099
1100
1101 MM_RELEASE_ETH_CON_LOCK(pdev);
1102 if (lm_status != LM_STATUS_SUCCESS)
1103 {
1104 return lm_status;
1105 }
1106
1107 lm_status = lm_wait_state_change(pdev, &pdev->client_info[LM_SW_VF_CLI_ID(vf_info, q_idx)].update.state, LM_CLI_UPDATE_RECV);
1108
1109 pdev->client_info[LM_SW_VF_CLI_ID(vf_info, q_idx)].update.state = LM_CLI_UPDATE_NOT_USED;
1110 }
1111
1112 return lm_status;
1113 }
1114
lm_pf_update_vf_ndsb(IN struct _lm_device_t * pdev,IN struct _lm_vf_info_t * vf_info,IN u8_t relative_in_vf_ndsb,IN u16_t interrupt_mod_level)1115 lm_status_t lm_pf_update_vf_ndsb(IN struct _lm_device_t *pdev,
1116 IN struct _lm_vf_info_t *vf_info,
1117 IN u8_t relative_in_vf_ndsb,
1118 IN u16_t interrupt_mod_level)
1119 {
1120 lm_status_t lm_status = LM_STATUS_SUCCESS;
1121 u8_t dhc_timeout, hc_rx_timeout, hc_tx_timeout;
1122 lm_int_coalesing_info*
1123 ic = &pdev->vars.int_coal;
1124 u32_t rx_coal_usec,tx_coal_usec;
1125
1126
1127 switch (interrupt_mod_level)
1128 {
1129 case VPORT_INT_MOD_UNDEFINED:
1130 dhc_timeout = 0;
1131 hc_rx_timeout = (u8_t)(ic->hc_usec_u_sb[HC_INDEX_VF_ETH_RX_CQ_CONS] / HC_TIMEOUT_RESOLUTION_IN_US);
1132 DbgBreakIf(HC_INDEX_VF_ETH_TX_CQ_CONS < HC_USTORM_SB_NUM_INDICES);
1133 hc_tx_timeout = (u8_t)(ic->hc_usec_c_sb[HC_INDEX_VF_ETH_TX_CQ_CONS - HC_USTORM_SB_NUM_INDICES] / HC_TIMEOUT_RESOLUTION_IN_US);
1134 break;
1135 case VPORT_INT_MOD_ADAPTIVE:
1136 dhc_timeout = (u8_t)pdev->params.hc_timeout0[SM_RX_ID][HC_INDEX_VF_ETH_RX_CQ_CONS];
1137 hc_rx_timeout = (u8_t)(ic->hc_usec_u_sb[HC_INDEX_VF_ETH_RX_CQ_CONS] / HC_TIMEOUT_RESOLUTION_IN_US);
1138 hc_tx_timeout = (u8_t)(ic->hc_usec_c_sb[HC_INDEX_VF_ETH_TX_CQ_CONS - HC_USTORM_SB_NUM_INDICES] / HC_TIMEOUT_RESOLUTION_IN_US);
1139 break;
1140 case VPORT_INT_MOD_OFF:
1141 dhc_timeout = 0;
1142 hc_rx_timeout = 0;
1143 hc_tx_timeout = 0;
1144 break;
1145 case VPORT_INT_MOD_LOW:
1146 dhc_timeout = 0;
1147 rx_coal_usec = 1000000 / pdev->params.vf_int_per_sec_rx[LM_VF_INT_LOW_IDX];
1148 tx_coal_usec = 1000000 / pdev->params.vf_int_per_sec_tx[LM_VF_INT_LOW_IDX];
1149 hc_rx_timeout = (u8_t)(rx_coal_usec / HC_TIMEOUT_RESOLUTION_IN_US);
1150 hc_tx_timeout = (u8_t)(rx_coal_usec / HC_TIMEOUT_RESOLUTION_IN_US);
1151 break;
1152 case VPORT_INT_MOD_MEDIUM:
1153 dhc_timeout = 0;
1154 rx_coal_usec = 1000000 / pdev->params.vf_int_per_sec_rx[LM_VF_INT_MEDIUM_IDX];
1155 tx_coal_usec = 1000000 / pdev->params.vf_int_per_sec_tx[LM_VF_INT_MEDIUM_IDX];
1156 hc_rx_timeout = (u8_t)(rx_coal_usec / HC_TIMEOUT_RESOLUTION_IN_US);
1157 hc_tx_timeout = (u8_t)(rx_coal_usec / HC_TIMEOUT_RESOLUTION_IN_US);
1158 break;
1159 case VPORT_INT_MOD_HIGH:
1160 dhc_timeout = 0;
1161 rx_coal_usec = 1000000 / pdev->params.vf_int_per_sec_rx[LM_VF_INT_HIGH_IDX];
1162 tx_coal_usec = 1000000 / pdev->params.vf_int_per_sec_tx[LM_VF_INT_HIGH_IDX];
1163 hc_rx_timeout = (u8_t)(rx_coal_usec / HC_TIMEOUT_RESOLUTION_IN_US);
1164 hc_tx_timeout = (u8_t)(rx_coal_usec / HC_TIMEOUT_RESOLUTION_IN_US);
1165 break;
1166 default:
1167 lm_status = LM_STATUS_INVALID_PARAMETER;
1168 DbgBreak();
1169 break;
1170 }
1171 if (lm_status == LM_STATUS_SUCCESS)
1172 {
1173 u8_t dhc_enable;
1174 u8_t timeout;
1175 u32_t index;
1176
1177 if (dhc_timeout)
1178 {
1179 dhc_enable = TRUE;
1180 timeout = dhc_timeout;
1181 REG_WR(PFDEV(pdev), CSEM_REG_FAST_MEMORY + CSTORM_BYTE_COUNTER_OFFSET(LM_FW_VF_DHC_QZONE_ID(vf_info, relative_in_vf_ndsb), HC_INDEX_VF_ETH_RX_CQ_CONS), 0);
1182 }
1183 else
1184 {
1185 dhc_enable = FALSE;
1186 timeout = hc_rx_timeout;
1187 }
1188 lm_setup_ndsb_index(pdev, LM_SW_VF_SB_ID(vf_info,relative_in_vf_ndsb), HC_INDEX_VF_ETH_RX_CQ_CONS, SM_RX_ID, timeout, dhc_enable);
1189 lm_setup_ndsb_index(pdev, LM_SW_VF_SB_ID(vf_info,relative_in_vf_ndsb), HC_INDEX_VF_ETH_TX_CQ_CONS, SM_TX_ID, hc_tx_timeout, FALSE);
1190 for (index = 0; index < sizeof(struct hc_status_block_data_e2)/sizeof(u32_t); index++) {
1191 LM_INTMEM_WRITE32(pdev, CSTORM_STATUS_BLOCK_DATA_OFFSET(LM_FW_VF_SB_ID(vf_info, relative_in_vf_ndsb)) + sizeof(u32_t)*index,
1192 *((u32_t*)(&pdev->vars.status_blocks_arr[LM_SW_VF_SB_ID(vf_info,relative_in_vf_ndsb)].hc_status_block_data.e2_sb_data) + index), BAR_CSTRORM_INTMEM);
1193 }
1194 }
1195 return lm_status;
1196 }
1197
lm_pf_update_vf_ndsbs(IN struct _lm_device_t * pdev,IN struct _lm_vf_info_t * vf_info,IN u16_t interrupt_mod_level)1198 lm_status_t lm_pf_update_vf_ndsbs(IN struct _lm_device_t *pdev,
1199 IN struct _lm_vf_info_t *vf_info,
1200 IN u16_t interrupt_mod_level)
1201 {
1202 lm_status_t lm_status = LM_STATUS_SUCCESS;
1203 u8_t q_idx = 0;
1204 u8_t is_hc_available_on_host;
1205 u16_t client_interrupt_mod_level;
1206 if (pdev->params.int_coalesing_mode == LM_INT_COAL_PERIODIC_SYNC)
1207 {
1208 is_hc_available_on_host = TRUE;
1209 }
1210 else
1211 {
1212 is_hc_available_on_host = FALSE;
1213 }
1214
1215 switch (interrupt_mod_level)
1216 {
1217 case VPORT_INT_MOD_OFF:
1218 break;
1219 case VPORT_INT_MOD_UNDEFINED:
1220 if (is_hc_available_on_host)
1221 {
1222 interrupt_mod_level = VPORT_INT_MOD_ADAPTIVE;
1223 }
1224 case VPORT_INT_MOD_ADAPTIVE:
1225 case VPORT_INT_MOD_LOW:
1226 case VPORT_INT_MOD_MEDIUM:
1227 case VPORT_INT_MOD_HIGH:
1228 if (!is_hc_available_on_host)
1229 {
1230 interrupt_mod_level = VPORT_INT_MOD_OFF;
1231 }
1232 break;
1233 default:
1234 lm_status = LM_STATUS_INVALID_PARAMETER;
1235 DbgBreak();
1236 break;
1237 }
1238
1239 if (lm_status != LM_STATUS_SUCCESS)
1240 {
1241 return lm_status;
1242 }
1243
1244 vf_info->current_interrupr_moderation = interrupt_mod_level;
1245 for (q_idx = 0; q_idx < vf_info->vf_si_num_of_active_q; q_idx++)
1246 {
1247 client_interrupt_mod_level = interrupt_mod_level;
1248 if ((interrupt_mod_level == VPORT_INT_MOD_ADAPTIVE) && !pdev->client_info[LM_SW_VF_CLI_ID(vf_info,q_idx)].client_init_data_virt->init_data.rx.enable_dynamic_hc)
1249 {
1250 client_interrupt_mod_level = VPORT_INT_MOD_UNDEFINED;
1251 }
1252 lm_pf_update_vf_ndsb(pdev, vf_info, q_idx, client_interrupt_mod_level);
1253 }
1254
1255 return lm_status;
1256 }
1257 #endif //VF_INVOLVED
1258