1 #ifdef VF_INVOLVED
2
3 #include "lm5710.h"
4 #include "command.h"
5 #include "igu_def.h"
6
7
lm_vf_is_function_after_flr(struct _lm_device_t * pdev)8 u8_t lm_vf_is_function_after_flr(struct _lm_device_t * pdev)
9 {
10 u8_t res = 0;
11 res = (PFDEV(pdev)->vars.connections[VF_TO_PF_CID(pdev,LM_SW_LEADING_RSS_CID(pdev))].con_state != LM_CON_STATE_CLOSE);
12 if (res) {
13 DbgMessage2(pdev, FATAL, "VF[%d(%d)] was FLRed\n", ABS_VFID(pdev), REL_VFID(pdev));
14 }
15 return res;
16 }
17
18
lm_vf_get_free_sbs(struct _lm_device_t * pf_dev,u8_t num_rss)19 static u8_t lm_vf_get_free_sbs(struct _lm_device_t * pf_dev, u8_t num_rss)
20 {
21 u8_t free_sb = 0xff;
22 u8_t max_num = pf_dev->params.base_fw_ndsb + MAX_RSS_CHAINS / pf_dev->params.vnics_per_port;
23 free_sb = lm_vf_get_free_resource(pf_dev->pf_resources.free_sbs, pf_dev->params.base_fw_ndsb, max_num, num_rss);
24 if (free_sb != 0xff) {
25 DbgMessage3(pf_dev,FATAL,"lm_vf_get_free_sbs(%d-%d): %d\n",pf_dev->params.base_fw_ndsb, max_num, free_sb);
26 } else {
27 DbgMessage2(pf_dev,FATAL,"lm_vf_get_free_sbs(%d-%d): No more free SBs\n",pf_dev->params.base_fw_ndsb, max_num);
28 }
29 return free_sb;
30 }
31
lm_vf_get_free_clients(struct _lm_device_t * pf_dev,u8_t num_rss)32 static u8_t lm_vf_get_free_clients(struct _lm_device_t * pf_dev, u8_t num_rss)
33 {
34 u8_t free_cli = 0xff;
35 u8_t max_num = pf_dev->params.base_fw_client_id + MAX_RSS_CHAINS / pf_dev->params.vnics_per_port;
36 free_cli = lm_vf_get_free_resource(pf_dev->pf_resources.free_clients, pf_dev->params.base_fw_client_id, max_num, num_rss);
37 if (free_cli != 0xff) {
38 DbgMessage3(pf_dev,FATAL,"lm_vf_get_free_clients(%d-%d): %d\n",pf_dev->params.base_fw_client_id, max_num, free_cli);
39 } else {
40 DbgMessage2(pf_dev,FATAL,"lm_vf_get_free_clients(%d-%d): No more free clients\n",pf_dev->params.base_fw_client_id, max_num);
41 }
42 return free_cli;
43 }
44
lm_vf_get_free_stats(struct _lm_device_t * pf_dev)45 static u8_t lm_vf_get_free_stats(struct _lm_device_t * pf_dev)
46 {
47 u8_t free_st_id = 0xff;
48 u8_t min_num = pf_dev->params.vnics_per_port + VNIC_ID(pf_dev) * ((MAX_NUM_OF_STATS - pf_dev->params.vnics_per_port) / pf_dev->params.vnics_per_port);
49 u8_t max_num = min_num + (MAX_NUM_OF_STATS - pf_dev->params.vnics_per_port) / pf_dev->params.vnics_per_port;
50 free_st_id = lm_vf_get_free_resource(pf_dev->pf_resources.free_stats, min_num, max_num, 1);
51 if (free_st_id != 0xff) {
52 DbgMessage1(pf_dev,FATAL,"lm_vf_get_free_stats: %d\n",free_st_id);
53 } else {
54 DbgMessage3(pf_dev,FATAL,"lm_vf_get_free_stats: No more free stats counters(%d,%d)\n",min_num,max_num);
55 DbgMessage1(pf_dev,FATAL,"lm_vf_get_free_stats: vnic_per_port is %d)\n",pf_dev->params.vnics_per_port);
56 }
57 return free_st_id;
58 }
59
lm_vf_get_free_cam_offset(struct _lm_device_t * pf_dev)60 static u8_t lm_vf_get_free_cam_offset(struct _lm_device_t * pf_dev)
61 {
62 u8_t free_cam_offset = 0xff;
63 u8_t max_num;
64 max_num = LM_CAM_SIZE(pf_dev);
65 free_cam_offset = lm_vf_get_free_resource(pf_dev->pf_resources.free_cam_offsets, 0, max_num, 1);
66 if (free_cam_offset != 0xff) {
67 DbgMessage1(pf_dev,FATAL,"lm_vf_get_free_cam_offset: %d\n",free_cam_offset);
68 } else {
69 DbgMessage(pf_dev,FATAL,"lm_vf_get_free_cam_offset: No more free cam offsets\n");
70 }
71 return free_cam_offset;
72 }
73
lm_vf_prep(struct _lm_device_t * pf_dev,struct _lm_device_t * vf_dev)74 lm_status_t lm_vf_prep(struct _lm_device_t * pf_dev, struct _lm_device_t * vf_dev)
75 {
76 vf_dev->pf_dev = pf_dev;
77 /* TODO: anything else to prepare for VF? */
78
79 lm_set_virt_mode(vf_dev, DEVICE_TYPE_VF, VT_BASIC_VF);
80
81 return LM_STATUS_SUCCESS;
82 }
83
lm_vf_get_bar_offset(struct _lm_device_t * pdev,u8_t bar_num,lm_address_t * bar_addr)84 lm_status_t lm_vf_get_bar_offset(struct _lm_device_t *pdev, u8_t bar_num, lm_address_t * bar_addr)
85 {
86 bar_addr->as_u64 = PFDEV(pdev)->hw_info.sriov_info.vf_bars[bar_num].as_u64 +
87 REL_VFID(pdev)*pdev->hw_info.bar_size[bar_num];
88 DbgMessage3(pdev, FATAL, "VF[%d(%d)]-bar[%d]:\n", ABS_VFID(pdev),REL_VFID(pdev),bar_num);
89 DbgMessage2(pdev, FATAL, "A: 0x%x, S: 0x%x\n", bar_addr->as_u32.low, pdev->hw_info.bar_size[bar_num]);
90
91 return LM_STATUS_SUCCESS;
92 }
93
lm_vf_get_vf_id(struct _lm_device_t * pdev)94 lm_status_t lm_vf_get_vf_id(struct _lm_device_t * pdev)
95 {
96 u32_t val;
97
98 mm_read_pci(pdev, PCICFG_ME_REGISTER, &val);
99
100 DbgMessage1(pdev, FATAL, "vf ME-REG value: 0x%x\n", val);
101
102 if (!(val & ME_REG_VF_VALID)) {
103 DbgBreakIf(!(val & ME_REG_VF_VALID));
104 return LM_STATUS_FAILURE;
105 }
106 pdev->params.vf_num_in_path = (val & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
107
108 if (pdev->params.vf_num_in_path < PFDEV(pdev)->hw_info.sriov_info.first_vf_in_pf) {
109 DbgBreakIf(pdev->params.vf_num_in_path < PFDEV(pdev)->hw_info.sriov_info.first_vf_in_pf);
110 return LM_STATUS_FAILURE;
111 }
112 pdev->params.vf_num_in_pf = pdev->params.vf_num_in_path - PFDEV(pdev)->hw_info.sriov_info.first_vf_in_pf;
113
114 DbgMessage2(pdev, FATAL, "vf_num_in_path=%d vf_num_in_pf=%d\n", pdev->params.vf_num_in_path, pdev->params.vf_num_in_pf);
115
116 return LM_STATUS_SUCCESS;
117 }
118
lm_vf_get_intr_blk_info(struct _lm_device_t * pdev)119 lm_status_t lm_vf_get_intr_blk_info(struct _lm_device_t *pdev)
120 {
121 // TODO
122 return LM_STATUS_SUCCESS;
123 }
124
lm_vf_en(struct _lm_device_t * pf_dev,u16_t vf_num)125 lm_status_t lm_vf_en(struct _lm_device_t * pf_dev, u16_t vf_num)
126 {
127 u8_t rss_id;
128 lm_status_t lm_status = LM_STATUS_SUCCESS;
129 /* TODO: what HW needs to be initialized at this stage */
130 /* TODO: VF Database for FLR needs? */
131 #ifndef _VBD_CMD_
132 lm_status = mm_vf_en(pf_dev, vf_num);
133 #endif
134 if (lm_status == LM_STATUS_SUCCESS) {
135 pf_dev->pf_resources.free_cam_offsets[0] |= 0x3;
136
137 LM_FOREACH_RSS_IDX(pf_dev, rss_id) {
138 lm_vf_acquire_resource(pf_dev->pf_resources.free_sbs, LM_FW_SB_ID(pf_dev, RSS_ID_TO_SB_ID(rss_id)), 1);
139 DbgMessage2(pf_dev, FATAL, "SB%d is allocated for PF[%d] itself\n", LM_FW_SB_ID(pf_dev, RSS_ID_TO_SB_ID(rss_id)), FUNC_ID(pf_dev));
140 lm_vf_acquire_resource(pf_dev->pf_resources.free_clients, LM_FW_CLI_ID(pf_dev, RSS_ID_TO_CID(rss_id)), 1);
141 DbgMessage2(pf_dev, FATAL, "Client%d is allocated for PF[%d] itself\n", LM_FW_CLI_ID(pf_dev, RSS_ID_TO_CID(rss_id)), FUNC_ID(pf_dev));
142 }
143
144 lm_vf_acquire_resource(pf_dev->pf_resources.free_stats, LM_STATS_CNT_ID(pf_dev), 1);
145 DbgMessage2(pf_dev, FATAL, "Stats%d is allocated for PF[%d] itself\n", LM_STATS_CNT_ID(pf_dev), FUNC_ID(pf_dev));
146 }
147 pf_dev->vars.num_vfs_enabled = vf_num;
148 return lm_status;
149 }
150
lm_vf_dis(struct _lm_device_t * pf_dev)151 lm_status_t lm_vf_dis(struct _lm_device_t * pf_dev)
152 {
153 /* TODO: Clean VF Database for FLR needs? */
154 lm_status_t lm_status = LM_STATUS_SUCCESS;
155 u32_t base_vfid, vfid;
156 u16_t pretend_val;
157 u16_t ind_cids, start_cid, end_cid;
158
159 DbgMessage(pf_dev, FATAL, "vf disable\n");
160 start_cid = (((1 << LM_VF_MAX_RVFID_SIZE) | 0) << LM_VF_CID_WND_SIZE); //1st possible abs VF_ID
161 end_cid = (((1 << LM_VF_MAX_RVFID_SIZE) | 63) << LM_VF_CID_WND_SIZE); //last possible abs VF_ID
162 DbgMessage2(pf_dev, FATAL, "vf disable: clear VFs connections from %d till %d\n",start_cid, end_cid);
163 for (ind_cids = start_cid; ind_cids <= end_cid; ind_cids++) {
164 pf_dev->vars.connections[ind_cids].con_state = LM_CON_STATE_CLOSE;
165 }
166 #ifndef _VBD_CMD_
167 mm_vf_dis(pf_dev);
168 #endif
169
170 if (lm_is_function_after_flr(pf_dev)) {
171 DbgMessage(pf_dev, FATAL, "vf disable called on a flred function - not much we can do here... \n");
172 return LM_STATUS_SUCCESS;
173 }
174 /* if MCP does not exist for each vf in pf, need to pretend to it and disable igu vf_msix and internal vfid enable bit */
175 if (GET_FLAGS( pf_dev->params.test_mode, TEST_MODE_NO_MCP)){
176 DbgMessage(pf_dev, FATAL, "bootcode is down fix sriov disable.\n");
177 base_vfid = pf_dev->hw_info.sriov_info.first_vf_in_pf;
178 for (vfid = base_vfid; vfid < base_vfid + pf_dev->vars.num_vfs_enabled; vfid++ ) {
179 pretend_val = ABS_FUNC_ID(pf_dev) | (1<<3) | (vfid << 4);
180 lm_pretend_func(pf_dev, pretend_val);
181
182 REG_WR(pf_dev, IGU_REG_PCI_VF_MSIX_EN, 0);
183 REG_WR(pf_dev, IGU_REG_PCI_VF_MSIX_FUNC_MASK, 0);
184 REG_WR(pf_dev, PGLUE_B_REG_INTERNAL_VFID_ENABLE, 0);
185
186 lm_pretend_func(pf_dev, ABS_FUNC_ID(pf_dev) );
187 }
188
189 /* This is a clear-on-write register, therefore we actually write 1 to the bit we want to reset */
190 REG_WR(pf_dev, 0x24d8, 1<<29);
191
192 REG_WR(pf_dev, PGLUE_B_REG_SR_IOV_DISABLED_REQUEST_CLR ,(1<<ABS_FUNC_ID(pf_dev)));
193 //REG_WR(pf_dev, PGLUE_B_REG_DISABLE_FLR_SRIOV_DISABLED, PGLUE_B_DISABLE_FLR_SRIOV_DISABLED_REG_DISABLE_SRIOV_DISABLED_REQUEST);*/
194 }
195 return lm_status;
196 }
197
198 lm_status_t lm_alloc_client_info(struct _lm_device_t *pdev);
199 lm_status_t lm_setup_client_info(struct _lm_device_t *pdev);
200
201 /* Description:
202 * This routine contain code for VF alloc/setup distinguish by flag
203 */
lm_vf_setup_alloc_resc(struct _lm_device_t * pdev,u8_t b_is_alloc)204 lm_status_t lm_vf_setup_alloc_resc(struct _lm_device_t *pdev, u8_t b_is_alloc )
205 {
206 lm_variables_t* vars = NULL ;
207 u32_t mem_size = 0 ;
208 u32_t alloc_size = 0 ;
209 u8_t mm_cli_idx = 0 ;
210 u8_t sb_id = 0 ;
211 lm_address_t sb_phy_address;
212 lm_status_t lm_status = LM_STATUS_FAILURE;
213
214 if CHK_NULL( pdev )
215 {
216 return LM_STATUS_INVALID_PARAMETER ;
217 }
218
219 DbgMessage1(pdev, FATAL , "### VF lm_common_setup_alloc_resc b_is_alloc=%s\n", b_is_alloc ? "TRUE" : "FALSE" );
220
221 vars = &(pdev->vars) ;
222
223 // Status blocks allocation. We allocate mem both for the default and non-default status blocks
224 // there is 1 def sb and 16 non-def sb per port.
225 // non-default sb: index 0-15, default sb: index 16.
226 mem_size = E2_STATUS_BLOCK_BUFFER_SIZE;
227
228 mm_cli_idx = LM_RESOURCE_COMMON;//!!DP mm_cli_idx_to_um_idx(LM_CLI_IDX_MAX);
229
230 LM_FOREACH_SB_ID(pdev, sb_id)
231 {
232 if( b_is_alloc )
233 {
234 pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e2_sb = mm_alloc_phys_mem(pdev, mem_size, &sb_phy_address, 0, mm_cli_idx);
235 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.host_sb_addr.lo = sb_phy_address.as_u32.low;
236 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.host_sb_addr.hi = sb_phy_address.as_u32.high;
237 }
238 if CHK_NULL(pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e1x_sb)
239 {
240 DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
241 return LM_STATUS_RESOURCE ;
242 }
243 mm_mem_zero((void *)(pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e2_sb), mem_size);
244 }
245
246 /* SlowPath Info */
247 lm_status = lm_alloc_setup_slowpath_resc(pdev, b_is_alloc);
248 if (lm_status != LM_STATUS_SUCCESS)
249 {
250 DbgMessage1(pdev, FATAL, "lm_alloc_client_info failed lm-status = %d\n", lm_status);
251 return lm_status;
252 }
253
254
255 if (b_is_alloc)
256 {
257 lm_status = lm_alloc_client_info(pdev);
258 if (lm_status != LM_STATUS_SUCCESS)
259 {
260 DbgMessage1(pdev, FATAL, "lm_alloc_client_info failed lm-status = %d\n", lm_status);
261 return lm_status;
262 }
263 }
264
265 lm_status = lm_setup_client_info(pdev);
266 if (lm_status != LM_STATUS_SUCCESS)
267 {
268 DbgMessage1(pdev, FATAL, "lm_setup_client_info failed lm-status = %d\n", lm_status);
269 return lm_status;
270 }
271
272 return LM_STATUS_SUCCESS;
273 }
274
275
lm_vf_init_dev_info(struct _lm_device_t * pdev)276 lm_status_t lm_vf_init_dev_info(struct _lm_device_t *pdev)
277 {
278 u8_t index;
279 lm_status_t lm_status;
280 lm_status = lm_vf_allocate_resc_in_pf(pdev);
281 if (lm_status == LM_STATUS_SUCCESS) {
282 pdev->vars.stats.stats_collect.stats_hw.b_collect_enabled = FALSE;
283 pdev->vars.stats.stats_collect.stats_fw.b_collect_enabled = FALSE;
284 DbgBreakIf(LM_SB_CNT(pdev) != 1);
285
286 for (index = 0; index < LM_SB_CNT(pdev); index++) { //RSS? but not SBs
287 PFDEV(pdev)->context_info->array[VF_TO_PF_CID(pdev,index)].cid_resc.mapped_cid_bar_addr =
288 (volatile void *)((u8_t*)pdev->vars.mapped_bar_addr[BAR_0] + index*LM_DQ_CID_SIZE + VF_BAR0_DB_OFFSET);
289 }
290 }
291 return lm_status;
292 }
293
294
295 //static vf_info_t tmp_vf_info;
296
lm_vf_allocate_resc_in_pf(struct _lm_device_t * pdev)297 lm_status_t lm_vf_allocate_resc_in_pf(struct _lm_device_t *pdev)
298 {
299 lm_status_t lm_status = LM_STATUS_SUCCESS;
300
301 DbgMessage(pdev, FATAL, "lm_vf_allocate_resc_in_pf\n");
302
303 DbgMessage2(pdev,FATAL,"lm_vf_allocate_resc_in_pf: VF %d requests resources from PF %d\n",ABS_VFID(pdev),FUNC_ID(pdev));
304 MM_ACQUIRE_PF_LOCK(PFDEV(pdev));
305
306 pdev->params.base_fw_client_id = lm_vf_get_free_clients(PFDEV(pdev),pdev->params.sb_cnt);
307 pdev->params.base_fw_ndsb = lm_vf_get_free_sbs(PFDEV(pdev),pdev->params.sb_cnt);
308 pdev->params.base_cam_offset = lm_vf_get_free_cam_offset(PFDEV(pdev));
309 pdev->params.base_fw_stats_id = lm_vf_get_free_stats(PFDEV(pdev));
310
311 if ((pdev->params.base_fw_client_id == 0xff)
312 || (pdev->params.base_fw_ndsb == 0xff)
313 || (pdev->params.base_cam_offset == 0xff)) {
314 lm_status = LM_STATUS_RESOURCE;
315 } else {
316 lm_vf_acquire_resource(PFDEV(pdev)->pf_resources.free_sbs, pdev->params.base_fw_ndsb, pdev->params.sb_cnt);
317 lm_vf_acquire_resource(PFDEV(pdev)->pf_resources.free_clients, pdev->params.base_fw_client_id, pdev->params.sb_cnt);
318 lm_vf_acquire_resource(PFDEV(pdev)->pf_resources.free_cam_offsets, pdev->params.base_cam_offset, 1);
319 if (pdev->params.base_fw_stats_id != 0xff) {
320 lm_vf_acquire_resource(PFDEV(pdev)->pf_resources.free_stats, pdev->params.base_fw_stats_id, 1);
321 }
322 /* For now, qzone_id == sb_id, but this is not a requirement */
323 pdev->params.base_fw_qzone_id = pdev->params.base_fw_ndsb;
324 }
325
326 MM_RELEASE_PF_LOCK(PFDEV(pdev));
327
328 DbgMessage4(pdev, FATAL, "vf_resc: fw_client=%d fw_ndsb=%d fw cam=%d fw stats=%d\n",
329 pdev->params.base_fw_client_id, pdev->params.base_fw_ndsb, pdev->params.base_cam_offset, pdev->params.base_fw_stats_id);
330
331 return lm_status;
332 }
333
334 lm_status_t
lm_vf_chip_init(struct _lm_device_t * pdev)335 lm_vf_chip_init(struct _lm_device_t *pdev)
336 {
337 lm_status_t lm_status = LM_STATUS_SUCCESS;
338 u32_t function_fw_id;
339 u8_t port = PORT_ID(pdev);
340 u8_t i;
341
342 DbgMessage(pdev, FATAL, "lm_vf_chip_init: start\n");
343 mm_memset(pdev->vars.c_hc_ack, 0, sizeof(pdev->vars.c_hc_ack));
344 mm_memset(pdev->vars.u_hc_ack, 0, sizeof(pdev->vars.u_hc_ack));
345 lm_init_non_def_status_block(pdev, LM_SW_LEADING_SB_ID, port);
346
347 // Init SPQ
348 /* Driver should zero the slow path queue data before enabling the function in XSTORM.
349 Until now firmware was doing this but it cannot scale for VFs, so this zeroing was removed from firmware.
350 The driver should write zeros to XSTORM_SPQ_DATA_OFFSET(function).
351 The size of this structure is given in XSTORM_SPQ_DATA_SIZE.
352 For VFs, the XSTORM_VF_SPQ_DATA_OFFSET(vfid) should be used. To do it via GRC is preferrable */
353 DbgBreakIf((XSTORM_SPQ_DATA_SIZE % 4) != 0);
354 for (i = 0; i < XSTORM_SPQ_DATA_SIZE/sizeof(u32_t); i++) {
355 REG_WR(PFDEV(pdev),XSEM_REG_FAST_MEMORY + XSTORM_VF_SPQ_DATA_OFFSET(ABS_VFID(pdev)) + i*sizeof(u32_t),0);
356 }
357
358 REG_WR(PFDEV(pdev),XSEM_REG_FAST_MEMORY + (XSTORM_VF_SPQ_PAGE_BASE_OFFSET(ABS_VFID(pdev))),pdev->sq_info.sq_chain.bd_chain_phy.as_u32.low);
359 REG_WR(PFDEV(pdev),XSEM_REG_FAST_MEMORY + (XSTORM_VF_SPQ_PAGE_BASE_OFFSET(ABS_VFID(pdev)) + 4),pdev->sq_info.sq_chain.bd_chain_phy.as_u32.high);
360 REG_WR(PFDEV(pdev),XSEM_REG_FAST_MEMORY + (XSTORM_VF_SPQ_PROD_OFFSET(ABS_VFID(pdev))),pdev->sq_info.sq_chain.prod_idx);
361
362 lm_status = lm_set_rx_mask(pdev, LM_CLI_IDX_NDIS, LM_RX_MASK_ACCEPT_NONE, NULL);
363 if(LM_STATUS_SUCCESS != lm_status)
364 {
365 DbgMessage1(pdev,FATAL,"lm_set_rx_mask(LM_RX_MASK_ACCEPT_NONE) returns %d\n",lm_status);
366 return lm_status;
367 }
368 /*
369 Enable the function in STORMs
370 */
371 function_fw_id = FW_VFID(pdev);
372
373 LM_INTMEM_WRITE8(PFDEV(pdev), XSTORM_VF_TO_PF_OFFSET(function_fw_id), FUNC_ID(pdev), BAR_XSTRORM_INTMEM);
374 LM_INTMEM_WRITE8(PFDEV(pdev), CSTORM_VF_TO_PF_OFFSET(function_fw_id), FUNC_ID(pdev), BAR_CSTRORM_INTMEM);
375 LM_INTMEM_WRITE8(PFDEV(pdev), TSTORM_VF_TO_PF_OFFSET(function_fw_id), FUNC_ID(pdev), BAR_TSTRORM_INTMEM);
376 LM_INTMEM_WRITE8(PFDEV(pdev), USTORM_VF_TO_PF_OFFSET(function_fw_id), FUNC_ID(pdev), BAR_USTRORM_INTMEM);
377
378 LM_INTMEM_WRITE8(PFDEV(pdev), XSTORM_FUNC_EN_OFFSET(function_fw_id), 1, BAR_XSTRORM_INTMEM);
379 LM_INTMEM_WRITE8(PFDEV(pdev), CSTORM_FUNC_EN_OFFSET(function_fw_id), 1, BAR_CSTRORM_INTMEM);
380 LM_INTMEM_WRITE8(PFDEV(pdev), TSTORM_FUNC_EN_OFFSET(function_fw_id), 1, BAR_TSTRORM_INTMEM);
381 LM_INTMEM_WRITE8(PFDEV(pdev), USTORM_FUNC_EN_OFFSET(function_fw_id), 1, BAR_USTRORM_INTMEM);
382
383
384 return LM_STATUS_SUCCESS;
385 }
386
387 lm_status_t
lm_vf_chip_reset(struct _lm_device_t * pdev,lm_reason_t reason)388 lm_vf_chip_reset(struct _lm_device_t *pdev, lm_reason_t reason)
389 {
390 lm_status_t lm_status = LM_STATUS_SUCCESS;
391 u32_t function_fw_id;
392 u8_t port = PORT_ID(pdev);
393
394 if (lm_reset_is_inprogress(pdev)) {
395 DbgMessage1(pdev,FATAL,"lm_vf_chip_reset: VF(%d) under reset\n",ABS_VFID(pdev));
396 if (!lm_vf_fl_reset_is_inprogress(pdev)) {
397 lm_status = lm_vf_recycle_resc_in_pf(pdev);
398 PFDEV(pdev)->vars.connections[VF_TO_PF_CID(pdev,LM_SW_LEADING_RSS_CID(pdev))].con_state = LM_CON_STATE_CLOSE;
399 DbgMessage1(pdev,FATAL,"lm_vf_chip_reset: recycle resources (including connection) for VF(%d)\n",ABS_VFID(pdev));
400 }
401 return lm_status;
402 }
403
404 /*
405 Disable the function in STORMs
406 */
407 function_fw_id = FW_VFID(pdev);
408
409 LM_INTMEM_WRITE8(PFDEV(pdev), XSTORM_FUNC_EN_OFFSET(function_fw_id), 0, BAR_XSTRORM_INTMEM);
410 LM_INTMEM_WRITE8(PFDEV(pdev), CSTORM_FUNC_EN_OFFSET(function_fw_id), 0, BAR_CSTRORM_INTMEM);
411 LM_INTMEM_WRITE8(PFDEV(pdev), TSTORM_FUNC_EN_OFFSET(function_fw_id), 0, BAR_TSTRORM_INTMEM);
412 LM_INTMEM_WRITE8(PFDEV(pdev), USTORM_FUNC_EN_OFFSET(function_fw_id), 0, BAR_USTRORM_INTMEM);
413
414 lm_clear_non_def_status_block(pdev, LM_FW_SB_ID(pdev, LM_SW_LEADING_SB_ID));
415
416 lm_status = lm_vf_recycle_resc_in_pf(pdev);
417 return lm_status;
418 }
419
420 lm_status_t
lm_vf_recycle_resc_in_pf(struct _lm_device_t * pdev)421 lm_vf_recycle_resc_in_pf(struct _lm_device_t *pdev)
422 {
423 lm_status_t lm_status = LM_STATUS_SUCCESS;
424
425 MM_ACQUIRE_PF_LOCK(PFDEV(pdev));
426
427 lm_vf_release_resource(PFDEV(pdev)->pf_resources.free_sbs, pdev->params.base_fw_ndsb, pdev->params.sb_cnt);
428 lm_vf_release_resource(PFDEV(pdev)->pf_resources.free_clients, pdev->params.base_fw_client_id, pdev->params.sb_cnt);
429 lm_vf_release_resource(PFDEV(pdev)->pf_resources.free_cam_offsets, pdev->params.base_cam_offset, 1);
430 if (pdev->params.base_fw_stats_id != 0xff) {
431 lm_vf_release_resource(PFDEV(pdev)->pf_resources.free_stats, pdev->params.base_fw_stats_id, 1);
432 }
433
434 MM_RELEASE_PF_LOCK(PFDEV(pdev));
435
436 return lm_status;
437 }
438
439 lm_status_t
lm_vf_enable_vf(struct _lm_device_t * pdev)440 lm_vf_enable_vf(struct _lm_device_t *pdev)
441 {
442 lm_status_t lm_status = LM_STATUS_SUCCESS;
443 u16_t pretend_val;
444 u32_t prod_idx;
445 u8_t igu_sb_id;
446 u32_t was_err_num;
447 u32_t was_err_value;
448 u32_t was_err_reg;
449
450 /* Enable the VF in PXP - this will enable read/write from VF bar.
451 * Need to use Pretend in order to do this. Note: once we do pretend
452 * all accesses to SPLIT-68 will be done as if-vf...
453 * Bits. Bits [13:10] - Reserved. Bits [9:4] - VFID. Bits [3] - VF valid. Bits [2:0] - PFID.
454 */
455
456 pretend_val = ABS_FUNC_ID(pdev) | (1<<3) | (ABS_VFID(pdev) << 4);
457 lm_status = lm_pretend_func(PFDEV(pdev), pretend_val);
458 if (lm_status == LM_STATUS_SUCCESS) {
459 REG_WR(PFDEV(pdev), PBF_REG_DISABLE_VF,0);
460 REG_WR(PFDEV(pdev), PGLUE_B_REG_INTERNAL_VFID_ENABLE, 1);
461 lm_pretend_func(PFDEV(pdev), ABS_FUNC_ID(pdev) );
462 DbgMessage1(pdev, FATAL, "vf[%d] is enabled\n", ABS_VFID(pdev));
463
464 was_err_num = 2 * PATH_ID(pdev) + ABS_VFID(pdev) / 32;
465 switch (was_err_num) {
466 case 0:
467 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
468 break;
469 case 1:
470 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
471 break;
472 case 2:
473 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
474 break;
475 case 3:
476 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
477 break;
478 default:
479 was_err_reg = 0;
480 DbgMessage2(pdev,FATAL,"Wrong Path[%d], VF[%d]\n",PATH_ID(pdev),ABS_VFID(pdev));
481 DbgBreak();
482 }
483
484 was_err_value = 1 << (ABS_VFID(pdev) % 32);
485 if (was_err_reg) {
486 REG_WR(PFDEV(pdev), was_err_reg, was_err_value); /* PglueB - Clear the was_error indication of the relevant function*/
487 }
488
489 /* IGU Initializations */
490 for (igu_sb_id = 0; igu_sb_id < LM_IGU_SB_CNT(pdev); igu_sb_id++) {
491 prod_idx = (IGU_BASE_NDSB(pdev) + igu_sb_id);
492 REG_WR(PFDEV(pdev), IGU_REG_PROD_CONS_MEMORY + prod_idx*4, 0);
493 DbgMessage1(pdev, FATAL, "IGU[%d] is inialized\n", prod_idx);
494 }
495 REG_WR(PFDEV(pdev),TSEM_REG_VFPF_ERR_NUM, ABS_VFID(pdev));
496 REG_WR(PFDEV(pdev),USEM_REG_VFPF_ERR_NUM, ABS_VFID(pdev));
497 REG_WR(PFDEV(pdev),CSEM_REG_VFPF_ERR_NUM, ABS_VFID(pdev));
498 REG_WR(PFDEV(pdev),XSEM_REG_VFPF_ERR_NUM, ABS_VFID(pdev));
499 } else {
500 DbgMessage2(pdev, FATAL, "lm_pretend_func(%x) returns %d\n",pretend_val,lm_status);
501 DbgMessage1(pdev, FATAL, "vf[%d] is not enabled\n", ABS_VFID(pdev));
502 }
503
504 return lm_status;
505 }
506
507 lm_status_t
lm_vf_enable_igu_int(struct _lm_device_t * pdev)508 lm_vf_enable_igu_int(struct _lm_device_t * pdev)
509 {
510 u32_t val;
511 u16_t pretend_val;
512 u8_t num_segs;
513 u8_t prod_idx;
514 u8_t sb_id;
515 u8_t i;
516 lm_status_t status;
517
518 /* Need to use pretend for VF */
519 pretend_val = ABS_FUNC_ID(pdev) | (1<<3) | (ABS_VFID(pdev) << 4);
520 lm_pretend_func(PFDEV(pdev), pretend_val);
521
522 REG_WR(PFDEV(pdev), IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
523 REG_WR(PFDEV(pdev), IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
524 REG_WR(PFDEV(pdev), IGU_REG_SB_MASK_LSB, 0);
525 REG_WR(PFDEV(pdev), IGU_REG_SB_MASK_MSB, 0);
526 REG_WR(PFDEV(pdev), IGU_REG_PBA_STATUS_LSB, 0);
527 REG_WR(PFDEV(pdev), IGU_REG_PBA_STATUS_MSB, 0);
528
529
530 val=REG_RD(PFDEV(pdev), IGU_REG_VF_CONFIGURATION);
531
532 SET_FLAGS(val, IGU_VF_CONF_FUNC_EN);
533 SET_FLAGS(val, IGU_VF_CONF_MSI_MSIX_EN);
534
535 if (pdev->params.interrupt_mode == LM_INT_MODE_SIMD) {
536 SET_FLAGS(val,IGU_VF_CONF_SINGLE_ISR_EN);
537 }
538
539 /* set Parent PF */
540 val |= ((FUNC_ID(pdev) << IGU_VF_CONF_PARENT_SHIFT) & IGU_VF_CONF_PARENT_MASK);
541
542 REG_WR(PFDEV(pdev), IGU_REG_VF_CONFIGURATION, val);
543
544 status = lm_pretend_func(PFDEV(pdev), ABS_FUNC_ID(pdev));
545
546 num_segs = (INTR_BLK_MODE(pdev) == INTR_BLK_MODE_BC)? IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
547 for (sb_id = 0; sb_id < LM_IGU_SB_CNT(pdev); sb_id++) {
548 prod_idx = (IGU_BASE_NDSB(pdev) + sb_id)*num_segs; /* bc-assumption consecutive pfs, norm-no assumption */
549 for (i = 0; i < num_segs;i++) {
550 REG_WR(PFDEV(pdev), IGU_REG_PROD_CONS_MEMORY + (prod_idx + i)*4, 0);
551 }
552 /* Give Consumer updates with value '0' */
553 lm_int_ack_sb_enable(pdev, sb_id);
554 }
555
556 return status;
557
558 }
559
560 lm_status_t
lm_vf_disable_igu_int(struct _lm_device_t * pdev)561 lm_vf_disable_igu_int(struct _lm_device_t * pdev)
562 {
563 u32_t val;
564 u16_t pretend_val;
565
566 /* Need to use pretend for VF */
567 if (lm_fl_reset_is_inprogress(PFDEV(pdev))) {
568 DbgMessage2(pdev, FATAL, "PF[%d] of VF[%d] is under FLR\n", FUNC_ID(pdev), ABS_VFID(pdev));
569 return LM_STATUS_SUCCESS;
570 }
571 pretend_val = ABS_FUNC_ID(pdev) | (1<<3) | (ABS_VFID(pdev) << 4);
572 lm_pretend_func(PFDEV(pdev), pretend_val);
573
574 val = REG_RD(PFDEV(pdev), IGU_REG_VF_CONFIGURATION);
575
576 /* disable both bits, for INTA, MSI and MSI-X. */
577 RESET_FLAGS(val, (IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK));
578
579 REG_WR(PFDEV(pdev), IGU_REG_VF_CONFIGURATION, val);
580
581 return (lm_pretend_func(PFDEV(pdev), ABS_FUNC_ID(pdev)));
582 }
583
lm_vf_fl_reset_set_inprogress(struct _lm_device_t * pdev)584 void lm_vf_fl_reset_set_inprogress(struct _lm_device_t * pdev)
585 {
586 MM_ACQUIRE_PF_LOCK(PFDEV(pdev));
587 lm_vf_acquire_resource(PFDEV(pdev)->pf_resources.flred_vfs, REL_VFID(pdev), 1);
588 DbgMessage2(pdev, FATAL, "Set FLR flag for VF[%d(%d)]\n", ABS_VFID(pdev), REL_VFID(pdev));
589 MM_RELEASE_PF_LOCK(PFDEV(pdev));
590 }
591
lm_vf_fl_reset_clear_inprogress(struct _lm_device_t * pdev)592 void lm_vf_fl_reset_clear_inprogress(struct _lm_device_t *pdev)
593 {
594 MM_ACQUIRE_PF_LOCK(PFDEV(pdev));
595 lm_vf_release_resource(PFDEV(pdev)->pf_resources.flred_vfs, REL_VFID(pdev), 1);
596 DbgMessage2(pdev, FATAL, "Clear FLR flag for VF[%d(%d)]\n", ABS_VFID(pdev), REL_VFID(pdev));
597 MM_RELEASE_PF_LOCK(PFDEV(pdev));
598 }
599
lm_vf_fl_reset_is_inprogress(struct _lm_device_t * pdev)600 u8_t lm_vf_fl_reset_is_inprogress(struct _lm_device_t *pdev)
601 {
602 u8_t vf_flr_inprogess;
603 MM_ACQUIRE_PF_LOCK(PFDEV(pdev));
604 vf_flr_inprogess = lm_vf_get_resource_value(PFDEV(pdev)->pf_resources.flred_vfs, REL_VFID(pdev));
605 if (vf_flr_inprogess) {
606 DbgMessage2(pdev, FATAL, "VF[%d(%d)] is FLRed\n", ABS_VFID(pdev), REL_VFID(pdev));
607 }
608 MM_RELEASE_PF_LOCK(PFDEV(pdev));
609 return vf_flr_inprogess;
610 }
611
lm_vf_pf_get_sb_running_index(struct _lm_device_t * pdev,u8_t sb_id,u8_t sm_idx)612 u16_t lm_vf_pf_get_sb_running_index(struct _lm_device_t *pdev, u8_t sb_id, u8_t sm_idx)
613 {
614 DbgMessage(NULL, FATAL, "lm_vf_pf_get_sb_running_index is not used in basic VF\n");
615 DbgBreak();
616 return 0;
617 }
618
lm_vf_pf_get_sb_index(struct _lm_device_t * pdev,u8_t sb_id,u8_t idx)619 u16_t lm_vf_pf_get_sb_index(struct _lm_device_t *pdev, u8_t sb_id, u8_t idx)
620 {
621 DbgMessage(NULL, FATAL, "lm_vf_pf_get_sb_running_index is not used in basic VF\n");
622 DbgBreak();
623 return 0;
624 }
625
lm_vf_get_doorbell_size(struct _lm_device_t * pdev)626 u16_t lm_vf_get_doorbell_size(struct _lm_device_t *pdev)
627 {
628 DbgMessage(NULL, FATAL, "lm_vf_get_doorbell_size is not used in basic VF\n");
629 DbgBreak();
630 return 0;
631 }
632
lm_vf_pf_set_q_filters(struct _lm_device_t * pdev,u8 vf_qid,u8_t to_indicate,q_filter_type filter_type,u8_t * pbuf,u32_t buf_len,u16_t vlan_tag,u8_t set_mac)633 lm_status_t lm_vf_pf_set_q_filters(struct _lm_device_t * pdev, u8 vf_qid, u8_t to_indicate, q_filter_type filter_type, u8_t * pbuf, u32_t buf_len, u16_t vlan_tag, u8_t set_mac)
634 {
635 DbgMessage(NULL, FATAL, "lm_vf_pf_set_q_filters is not used in basic VF\n");
636 DbgBreak();
637 return LM_STATUS_FAILURE;
638 }
639
lm_vf_pf_set_q_filters_list(struct _lm_device_t * pdev,u8 vf_qid,u8_t to_indicate,q_filter_type filter_type,d_list_t * pbuf,u16_t vlan_tag,u8_t set_mac)640 lm_status_t lm_vf_pf_set_q_filters_list(struct _lm_device_t * pdev, u8 vf_qid, u8_t to_indicate, q_filter_type filter_type, d_list_t * pbuf, u16_t vlan_tag, u8_t set_mac)
641 {
642 DbgMessage(NULL, FATAL, "lm_vf_pf_set_q_filters_list is not used in basic VF\n");
643 DbgBreak();
644 return LM_STATUS_FAILURE;
645 }
646
lm_vf_pf_tear_q_down(struct _lm_device_t * pdev,u8 vf_qid)647 lm_status_t lm_vf_pf_tear_q_down(struct _lm_device_t * pdev, u8 vf_qid)
648 {
649 DbgMessage(NULL, FATAL, "lm_vf_pf_tear_q_down is not used in basic VF\n");
650 DbgBreak();
651 return LM_STATUS_FAILURE;
652 }
653
lm_vf_queue_init(struct _lm_device_t * pdev,u8_t cid)654 lm_status_t lm_vf_queue_init(struct _lm_device_t *pdev, u8_t cid)
655 {
656 DbgMessage(NULL, FATAL, "lm_vf_queue_init is not used in basic VF\n");
657 DbgBreak();
658 return LM_STATUS_FAILURE;
659 }
660
lm_vf_queue_close(struct _lm_device_t * pdev,u8_t cid)661 lm_status_t lm_vf_queue_close(struct _lm_device_t *pdev, u8_t cid)
662 {
663 DbgMessage(NULL, FATAL, "lm_vf_queue_close is not used in basic VF\n");
664 DbgBreak();
665 return LM_STATUS_FAILURE;
666 }
667
668 #endif
669
670