xref: /titanic_44/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/vf/channel_vf/lm_vf.c (revision d14abf155341d55053c76eeec58b787a456b753b)
1 #ifdef VF_INVOLVED
2 
3 #include "lm5710.h"
4 #include "bd_chain.h"
5 #include "577xx_int_offsets.h"
6 #include "context.h"
7 #include "command.h"
8 
9 extern void lm_int_igu_ack_sb(lm_device_t *pdev, u8_t rss_id, u8_t storm_id, u16_t sb_index, u8_t int_op, u8_t is_update_idx);
10 //#define LM_VF_PM_MESS_STATE_READY_TO_SEND       0
11 //#define LM_VF_PM_MESS_STATE_SENT                1
12 
13 /**********************VF_PF FUNCTIONS**************************************/
14 /**
15  * Function send a message over the pf/vf channel, first writes the message low/high addr
16  * and then writes to the "addr-valid" in the trigger-zone... this causes the FW to wake
17  * up and handle the message.
18  *
19  * @param pdev
20  * @param mess
21  *
22  * @return lm_status_t
23  */
24 
lm_vf_is_lamac_restricted(struct _lm_device_t * pdev)25 u8_t lm_vf_is_lamac_restricted(struct _lm_device_t *pdev)
26 {
27     return (pdev->vars.is_pf_provides_mac && (pdev->vars.is_pf_restricts_lamac || pdev->vars.is_pf_rejected_lamac));
28 }
29 
lm_vf_check_mac_restriction(struct _lm_device_t * pdev,struct pfvf_acquire_resp_tlv * pf_resp)30 static u8_t lm_vf_check_mac_restriction(struct _lm_device_t *pdev, struct pfvf_acquire_resp_tlv *pf_resp)
31 {
32     return (!(pf_resp->pfdev_info.pf_cap | PFVF_CAP_ALLOW_MAC));
33 }
34 
lm_pf_get_queues_number(struct _lm_device_t * pdev,lm_vf_info_t * vf_info,u8_t * num_rxqs,u8_t * num_txqs)35 static lm_status_t lm_pf_get_queues_number(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, u8_t *num_rxqs, u8_t * num_txqs)
36 {
37     return mm_pf_get_queues_number(pdev, vf_info, num_rxqs, num_txqs);
38 }
39 
lm_pf_get_filters_number(struct _lm_device_t * pdev,lm_vf_info_t * vf_info,u8_t * num_mac_filters,u8_t * num_vlan_filters,u8_t * num_mc_filters)40 static lm_status_t lm_pf_get_filters_number(struct _lm_device_t *pdev, lm_vf_info_t *vf_info,
41                                                 u8_t *num_mac_filters,
42                                                 u8_t *num_vlan_filters,
43                                                 u8_t *num_mc_filters)
44 {
45     return mm_pf_get_filters_number(pdev, vf_info, num_mac_filters, num_vlan_filters, num_mc_filters);
46 }
47 
lm_pf_get_macs(struct _lm_device_t * pdev,lm_vf_info_t * vf_info,u8_t * permanent_mac_addr,u8_t * current_mac_addr)48 static lm_status_t lm_pf_get_macs(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, u8_t *permanent_mac_addr, u8_t *current_mac_addr)
49 {
50     return mm_pf_get_macs(pdev, vf_info, permanent_mac_addr, current_mac_addr);
51 }
52 
lm_pf_vf_check_compatibility(struct _lm_device_t * pdev,lm_vf_info_t * vf_info,struct vf_pf_msg_acquire * request)53 static u8 lm_pf_vf_check_compatibility(struct _lm_device_t *pdev,
54                                                 lm_vf_info_t *vf_info,
55                                                 struct vf_pf_msg_acquire *request)
56 {
57     u8 status = SW_PFVF_STATUS_SUCCESS;
58     if( 0 == request->vfdev_info.vf_fw_hsi_version )
59     {
60         // here we handle cases where HSI version of PF is not compatible with HSI version of VF
61         // Until this code section was added, VF always returned 0 so we fail request for old VF's
62         // Currenly (22/9/2011) we consider all VF that return ANY value (not 0) as valid
63         // once HSI will change, we'll need to enter here logic that will say:
64         // if( ( 0 == vf_fw_hsi_version) || ( some condition with vf_fw_hsi_version )
65         status  = SW_PFVF_STATUS_MISMATCH_FW_HSI;
66     }
67     else
68     {
69         #define FW_REV_INTERFACE_SUPPORTED     0x07084b00 // 7.8.75.0
70 
71         if (request->vfdev_info.vf_fw_hsi_version >= FW_REV_INTERFACE_SUPPORTED)
72         {
73             vf_info->fp_hsi_ver = request->vfdev_info.fp_hsi_ver;
74         }
75         else
76         {
77             vf_info->fp_hsi_ver = 0;
78         }
79     }
80     if (vf_info->fp_hsi_ver > ETH_FP_HSI_VERSION)
81     {
82         /* VF FP HSI VER is newer than PF... treat as mismatch */
83         status  = SW_PFVF_STATUS_MISMATCH_FW_HSI;
84     }
85 
86     if (!(request->vfdev_info.vf_aux & SW_VFPF_VFDEF_INFO_AUX_DIRECT_DQ))
87     {
88         status  = SW_PFVF_STATUS_MISMATCH_FW_HSI;
89     }
90 
91     return status;
92 }
93 
lm_pf_vf_fill_acquire_response(struct _lm_device_t * pdev,lm_vf_info_t * vf_info)94 static lm_status_t lm_pf_vf_fill_acquire_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
95 {
96     lm_status_t lm_status = LM_STATUS_SUCCESS;
97     struct vf_pf_msg_acquire*      request            = NULL;
98     struct pf_vf_msg_acquire_resp* response           = NULL;
99     u8_t                           i                  = 0;
100     u8_t                           num_mac_filters    = 0;
101     u8_t                           num_vlan_filters   = 0;
102     u8_t                           num_mc_filters     = 0;
103     u8_t                           status;
104 
105     DbgBreakIf(!(pdev && vf_info && vf_info->pf_vf_response.request_virt_addr && vf_info->pf_vf_response.response_virt_addr));
106 
107     request = vf_info->pf_vf_response.request_virt_addr;
108     response = vf_info->pf_vf_response.response_virt_addr;
109 
110     status = lm_pf_vf_check_compatibility(pdev, vf_info, request);
111     if (status != SW_PFVF_STATUS_SUCCESS)
112     {
113         response->hdr.status = status;
114         return lm_status;
115     }
116 
117     response->pfdev_info.chip_num = pdev->hw_info.chip_id;//CHIP_NUM(pdev);
118     response->pfdev_info.pf_cap               = PFVF_CAP_DHC | PFVF_CAP_TPA;
119     if (pdev->params.debug_sriov_vfs)
120     {
121         response->pfdev_info.pf_cap |= PFVF_DEBUG;
122     }
123     response->pfdev_info.db_size              = LM_VF_DQ_CID_SIZE;
124     response->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
125     vf_info->num_vf_chains_requested = request->resc_request.num_sbs;
126     vf_info->num_sbs = response->resc.num_sbs = min (vf_info->num_allocated_chains, request->resc_request.num_sbs);
127     response->resc.igu_cnt = vf_info->num_sbs;
128 
129     for (i = 0; i < response->resc.num_sbs; i++)
130     {
131         response->resc.hw_sbs[i].hw_sb_id = LM_VF_IGU_SB_ID(vf_info, i);
132         response->resc.hw_sbs[i].sb_qid = LM_FW_VF_DHC_QZONE_ID(vf_info, i);
133         response->resc.hw_qid[i] = LM_FW_VF_QZONE_ID(vf_info, i);
134     }
135 
136     if (response->resc.num_sbs < vf_info->num_allocated_chains)
137     {
138         for (i = response->resc.num_sbs; i < vf_info->num_allocated_chains; i++)
139         {
140             lm_pf_release_vf_igu_block(pdev, vf_info->vf_chains[i].igu_sb_id);
141             lm_pf_release_separate_vf_chain_resources(pdev, vf_info->relative_vf_id, i);
142         }
143 #ifdef _VBD_
144         //Generate message
145 #endif
146         vf_info->num_allocated_chains = response->resc.num_sbs;
147     }
148 
149     vf_info->num_rxqs = response->resc.num_rxqs = min(vf_info->num_sbs, request->resc_request.num_rxqs);
150     vf_info->num_txqs = response->resc.num_txqs = min(vf_info->num_sbs, request->resc_request.num_txqs);
151     vf_info->num_rxqs = response->resc.num_rxqs = min(vf_info->num_rxqs, response->resc.num_sbs);
152     vf_info->num_txqs = response->resc.num_txqs = min(vf_info->num_txqs, response->resc.num_sbs);
153 
154     lm_pf_get_filters_number(pdev,vf_info,
155                              &num_mac_filters,
156                              &num_vlan_filters,
157                              &num_mc_filters);
158 
159     vf_info->num_mac_filters = response->resc.num_mac_filters = min(num_mac_filters, request->resc_request.num_mac_filters);
160     vf_info->num_vlan_filters = response->resc.num_vlan_filters = min(num_vlan_filters, request->resc_request.num_vlan_filters);
161     vf_info->num_mc_filters = response->resc.num_mc_filters = min(num_mc_filters, request->resc_request.num_mc_filters);
162 
163     lm_pf_get_macs(pdev,vf_info, response->resc.permanent_mac_addr, response->resc.current_mac_addr);
164 //#ifdef UPDATED_MAC
165     if (pdev->params.sriov_inc_mac)
166     {
167         u8_t mac_addition = (u8_t)pdev->params.sriov_inc_mac;
168         response->resc.current_mac_addr[5] += mac_addition;
169     }
170 //#endif
171     response->hdr.status = SW_PFVF_STATUS_SUCCESS;
172     vf_info->vf_si_state = PF_SI_ACQUIRED;
173     return lm_status;
174 }
175 
lm_pf_vf_fill_init_vf_response(struct _lm_device_t * pdev,lm_vf_info_t * vf_info)176 static lm_status_t lm_pf_vf_fill_init_vf_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
177 {
178     lm_status_t lm_status = LM_STATUS_SUCCESS;
179     struct vf_pf_msg_init_vf * request = NULL;
180     struct pf_vf_msg_resp * response = NULL;
181     u8_t sb_idx = 0;
182     u8_t q_idx = 0;
183     u8_t function_fw_id;
184     u32_t i;
185 
186     DbgBreakIf(!(pdev && vf_info && vf_info->pf_vf_response.request_virt_addr && vf_info->pf_vf_response.response_virt_addr));
187 //    DbgBreak();
188     request = vf_info->pf_vf_response.request_virt_addr;
189     response = vf_info->pf_vf_response.response_virt_addr;
190 
191     //lm_status = lm_pf_enable_vf(pdev, vf_info->abs_vf_id);
192 
193     MM_ACQUIRE_VFS_STATS_LOCK(pdev);
194     DbgBreakIf(vf_info->vf_stats.vf_stats_state != VF_STATS_NONE);
195     vf_info->vf_stats.vf_fw_stats_phys_data.as_u64 = request->stats_addr;
196     vf_info->vf_stats.vf_stats_state = VF_STATS_REQ_READY;
197     vf_info->vf_stats.stop_collect_stats = TRUE;
198     vf_info->vf_stats.vf_stats_flag = 0;
199     vf_info->vf_stats.vf_stats_cnt = 0;
200     vf_info->vf_stats.vf_exracted_stats_cnt = 0;
201     MM_RELEASE_VFS_STATS_LOCK(pdev);
202 
203     for (sb_idx = 0; sb_idx < vf_info->num_sbs; sb_idx++) {
204         lm_pf_init_vf_non_def_sb(pdev, vf_info, sb_idx, request->sb_addr[sb_idx]);
205     }
206 
207     DbgBreakIf((XSTORM_SPQ_DATA_SIZE % 4) != 0);
208     for (i = 0; i < XSTORM_SPQ_DATA_SIZE/sizeof(u32_t); i++) {
209         REG_WR(PFDEV(pdev),XSEM_REG_FAST_MEMORY + XSTORM_VF_SPQ_DATA_OFFSET(vf_info->abs_vf_id) + i*sizeof(u32_t),0);
210     }
211 
212     REG_WR(PFDEV(pdev),XSEM_REG_FAST_MEMORY + (XSTORM_VF_SPQ_PAGE_BASE_OFFSET(vf_info->abs_vf_id)),0);
213     REG_WR(PFDEV(pdev),XSEM_REG_FAST_MEMORY + (XSTORM_VF_SPQ_PAGE_BASE_OFFSET(vf_info->abs_vf_id)) + 4,0);
214     REG_WR(PFDEV(pdev),XSEM_REG_FAST_MEMORY + (XSTORM_VF_SPQ_PROD_OFFSET(vf_info->abs_vf_id)),0);
215 
216     for (q_idx = 0; q_idx < vf_info->num_rxqs; q_idx++) {
217         u32_t reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + LM_FW_VF_QZONE_ID(vf_info,q_idx) * 4;
218         u32_t val = vf_info->abs_vf_id | (1 << 6);
219         REG_WR(PFDEV(pdev), reg, val);
220 
221     }
222     /*lm_status = lm_set_rx_mask(pdev, LM_CLI_IDX_NDIS, LM_RX_MASK_ACCEPT_NONE);
223     if(LM_STATUS_SUCCESS != lm_status)
224     {
225         DbgMessage(pdev, FATAL, "lm_set_rx_mask(LM_RX_MASK_ACCEPT_NONE) returns %d\n",lm_status);
226         return lm_status;
227     }*/
228 /*
229 Enable the function in STORMs
230 */
231     function_fw_id = 8 + vf_info->abs_vf_id;
232 
233     LM_INTMEM_WRITE8(PFDEV(pdev), XSTORM_VF_TO_PF_OFFSET(function_fw_id), FUNC_ID(pdev), BAR_XSTRORM_INTMEM);
234     LM_INTMEM_WRITE8(PFDEV(pdev), CSTORM_VF_TO_PF_OFFSET(function_fw_id), FUNC_ID(pdev), BAR_CSTRORM_INTMEM);
235     LM_INTMEM_WRITE8(PFDEV(pdev), TSTORM_VF_TO_PF_OFFSET(function_fw_id), FUNC_ID(pdev), BAR_TSTRORM_INTMEM);
236     LM_INTMEM_WRITE8(PFDEV(pdev), USTORM_VF_TO_PF_OFFSET(function_fw_id), FUNC_ID(pdev), BAR_USTRORM_INTMEM);
237 
238     LM_INTMEM_WRITE8(PFDEV(pdev), XSTORM_FUNC_EN_OFFSET(function_fw_id), 1, BAR_XSTRORM_INTMEM);
239     LM_INTMEM_WRITE8(PFDEV(pdev), CSTORM_FUNC_EN_OFFSET(function_fw_id), 1, BAR_CSTRORM_INTMEM);
240     LM_INTMEM_WRITE8(PFDEV(pdev), TSTORM_FUNC_EN_OFFSET(function_fw_id), 1, BAR_TSTRORM_INTMEM);
241     LM_INTMEM_WRITE8(PFDEV(pdev), USTORM_FUNC_EN_OFFSET(function_fw_id), 1, BAR_USTRORM_INTMEM);
242 
243     lm_status = lm_pf_enable_vf_igu_int(pdev, vf_info->abs_vf_id);
244 
245     if (lm_status == LM_STATUS_SUCCESS) {
246         response->hdr.status = SW_PFVF_STATUS_SUCCESS;
247         vf_info->vf_si_state = PF_SI_VF_INITIALIZED;
248     } else {
249         response->hdr.status = SW_PFVF_STATUS_FAILURE;
250         DbgBreak();
251     }
252     return lm_status;
253 }
254 
255 
lm_pf_vf_fill_setup_q_response(struct _lm_device_t * pdev,lm_vf_info_t * vf_info)256 static lm_status_t lm_pf_vf_fill_setup_q_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
257 {
258     lm_status_t lm_status = LM_STATUS_SUCCESS;
259     struct vf_pf_msg_setup_q * request = NULL;
260     struct pf_vf_msg_resp * response = NULL;
261     struct sw_vf_pf_rxq_params * rxq_params = NULL;
262     struct sw_vf_pf_txq_params * txq_params = NULL;
263 //    lm_rcq_chain_t * rcq_chain = NULL;
264     u8_t    cmd_id        = 0;
265     u8_t    type          = 0;
266     u8_t    q_id          = 0;
267     u8_t    valid         = 0;
268     u32_t   vf_cid_of_pf  = 0;
269 
270     DbgBreakIf(!(pdev && vf_info && vf_info->pf_vf_response.request_virt_addr && vf_info->pf_vf_response.response_virt_addr));
271 
272     request = vf_info->pf_vf_response.request_virt_addr;
273     response = vf_info->pf_vf_response.response_virt_addr;
274     q_id = request->vf_qid;
275     valid = request->param_valid;
276 
277 
278     if (request->param_valid & VFPF_RXQ_VALID) {
279         u32_t mem_size = sizeof(struct tpa_update_ramrod_data);
280         rxq_params = &request->rxq;
281         vf_info->vf_chains[q_id].mtu = rxq_params->mtu;
282         if (rxq_params->flags & SW_VFPF_QUEUE_FLG_TPA) {
283             DbgBreakIf(rxq_params->sge_addr == 0);
284             vf_info->vf_chains[q_id].sge_addr = rxq_params->sge_addr;
285             vf_info->vf_chains[q_id].tpa_ramrod_data_virt = mm_alloc_phys_mem(pdev, mem_size, &vf_info->vf_chains[q_id].tpa_ramrod_data_phys, 0, LM_RESOURCE_NDIS);
286 
287             if(CHK_NULL(vf_info->vf_chains[q_id].tpa_ramrod_data_virt))
288             {
289                 DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
290                 response->hdr.status = SW_PFVF_STATUS_FAILURE;
291                 return LM_STATUS_RESOURCE ;
292             }
293             mm_mem_zero((void *)vf_info->vf_chains[q_id].tpa_ramrod_data_virt, mem_size);
294     }
295     }
296     if (request->param_valid & VFPF_TXQ_VALID) {
297         txq_params = &request->txq;
298     }
299 
300     lm_status = lm_pf_init_vf_client_init_data(pdev, vf_info, q_id, rxq_params, txq_params);
301     if (lm_status == LM_STATUS_SUCCESS) {
302         vf_cid_of_pf = LM_VF_Q_ID_TO_PF_CID(pdev, vf_info, q_id);
303         lm_init_connection_context(pdev, vf_cid_of_pf, 0);
304         cmd_id = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
305         type = (ETH_CONNECTION_TYPE | ((8 + vf_info->abs_vf_id) << SPE_HDR_T_FUNCTION_ID_SHIFT));
306         lm_set_con_state(pdev, vf_cid_of_pf, LM_CON_STATE_OPEN_SENT);
307 
308         lm_sq_post(pdev,
309                    vf_cid_of_pf,
310                    cmd_id,
311                    CMD_PRIORITY_MEDIUM,
312                    type,
313                    pdev->client_info[LM_SW_VF_CLI_ID(vf_info,q_id)].client_init_data_phys.as_u64);
314 
315         lm_status = lm_eth_wait_state_change(pdev, LM_CON_STATE_OPEN, vf_cid_of_pf);
316 
317     }
318 
319     if (lm_status == LM_STATUS_SUCCESS) {
320         response->hdr.status = SW_PFVF_STATUS_SUCCESS;
321         mm_atomic_inc(&vf_info->vf_si_num_of_active_q);
322         if (q_id == 0) {
323             MM_ACQUIRE_VFS_STATS_LOCK(pdev);
324             DbgBreakIf(vf_info->vf_stats.vf_stats_state != VF_STATS_REQ_READY)
325             vf_info->vf_stats.vf_stats_state = VF_STATS_REQ_SUBMITTED;
326             vf_info->vf_stats.stop_collect_stats = FALSE;
327             if (!vf_info->vf_stats.do_not_collect_pf_stats) {
328                 vf_info->vf_stats.vf_stats_flag = VF_STATS_COLLECT_FW_STATS_FOR_PF;
329             }
330             if (vf_info->vf_stats.vf_fw_stats_phys_data.as_u64) {
331                 vf_info->vf_stats.vf_stats_flag |= VF_STATS_COLLECT_FW_STATS_FOR_VF;
332             }
333             MM_RELEASE_VFS_STATS_LOCK(pdev);
334         }
335     } else if (lm_status == LM_STATUS_PENDING) {
336         response->hdr.status = SW_PFVF_STATUS_WAITING;
337     } else {
338         response->hdr.status = SW_PFVF_STATUS_FAILURE;
339     }
340 
341     return lm_status;
342 }
343 
344 // ASSUMPTION: CALLED IN PASSIVE LEVEL!!!
345 
lm_pf_vf_fill_set_q_filters_response(struct _lm_device_t * pdev,lm_vf_info_t * vf_info)346 static lm_status_t lm_pf_vf_fill_set_q_filters_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
347 {
348     lm_status_t lm_status = LM_STATUS_SUCCESS;
349     struct vf_pf_msg_set_q_filters * request = NULL;
350     struct pf_vf_msg_resp * response = NULL;
351     lm_rx_mask_t    rx_mask = 0;
352 
353     request = vf_info->pf_vf_response.request_virt_addr;
354     response = vf_info->pf_vf_response.response_virt_addr;
355 
356  //   DbgBreak();
357     if (request->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
358         if (VFPF_RX_MASK_ACCEPT_NONE == request->rx_mask) {
359             lm_status = lm_set_rx_mask(pdev, LM_SW_VF_CLI_ID(vf_info,request->vf_qid), LM_RX_MASK_ACCEPT_NONE, NULL);
360             if (lm_status == LM_STATUS_PENDING)
361             {
362                 lm_status = lm_wait_set_rx_mask_done(pdev, LM_SW_VF_CLI_ID(vf_info,request->vf_qid));
363             }
364         } else {
365             if (GET_FLAGS(request->rx_mask,VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST | VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST |
366                            VFPF_RX_MASK_ACCEPT_ALL_MULTICAST | VFPF_RX_MASK_ACCEPT_ALL_UNICAST | VFPF_RX_MASK_ACCEPT_BROADCAST) ==
367                            (VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST | VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST |
368                            VFPF_RX_MASK_ACCEPT_ALL_MULTICAST | VFPF_RX_MASK_ACCEPT_ALL_UNICAST | VFPF_RX_MASK_ACCEPT_BROADCAST)) {
369                 if (!vf_info->is_promiscuous_mode_restricted)
370                 {
371                     rx_mask = LM_RX_MASK_PROMISCUOUS_MODE;
372                 lm_status = lm_set_rx_mask(pdev, LM_SW_VF_CLI_ID(vf_info,request->vf_qid), LM_RX_MASK_PROMISCUOUS_MODE, NULL);
373                 if (lm_status == LM_STATUS_PENDING)
374                 {
375                     lm_status = lm_wait_set_rx_mask_done(pdev, LM_SW_VF_CLI_ID(vf_info,request->vf_qid));
376                 }
377                 }
378                 else
379                 {
380                     request->rx_mask &= ~(VFPF_RX_MASK_ACCEPT_ALL_UNICAST | VFPF_RX_MASK_ACCEPT_ALL_MULTICAST);
381                 }
382             }
383 
384             if (!rx_mask)
385             {
386                 if (GET_FLAGS(request->rx_mask,VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)) {
387                     rx_mask |= LM_RX_MASK_ACCEPT_UNICAST;
388                 }
389                 if (GET_FLAGS(request->rx_mask,VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)) {
390                     rx_mask |= LM_RX_MASK_ACCEPT_MULTICAST;
391                 }
392                 if (GET_FLAGS(request->rx_mask,VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)) {
393                     rx_mask |= LM_RX_MASK_ACCEPT_ALL_MULTICAST;
394                 }
395                 if (GET_FLAGS(request->rx_mask, VFPF_RX_MASK_ACCEPT_BROADCAST)) {
396                     rx_mask |= LM_RX_MASK_ACCEPT_BROADCAST;
397                 }
398                 lm_status = lm_set_rx_mask(pdev, LM_SW_VF_CLI_ID(vf_info,request->vf_qid), rx_mask, NULL);
399                 if (lm_status == LM_STATUS_PENDING)
400                 {
401                     lm_status = lm_wait_set_rx_mask_done(pdev, LM_SW_VF_CLI_ID(vf_info,request->vf_qid));
402                 }
403             }
404         }
405     }
406     if (request->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
407         u8_t mac_idx;
408         u8_t set_mac;
409         for (mac_idx = 0; mac_idx < request->n_mac_vlan_filters; mac_idx++) {
410             if (request->filters[mac_idx].flags & VFPF_Q_FILTER_DEST_MAC_PRESENT) {
411                 if (request->filters[mac_idx].flags & VFPF_Q_FILTER_SET_MAC) {
412                     set_mac = TRUE;
413                 } else {
414                     set_mac = FALSE;
415                 }
416                 lm_status = lm_set_mac_addr(pdev, request->filters[mac_idx].dest_mac,
417                                             LM_SET_CAM_NO_VLAN_FILTER, LM_SW_VF_CLI_ID(vf_info,request->vf_qid), NULL, set_mac, 0);
418                 if (lm_status == LM_STATUS_PENDING) {
419                     lm_status = lm_wait_set_mac_done(pdev, LM_SW_VF_CLI_ID(vf_info,request->vf_qid));
420                 }
421             } else {
422                 //
423             }
424         }
425     }
426     if (lm_status == LM_STATUS_SUCCESS) {
427         response->hdr.status = SW_PFVF_STATUS_SUCCESS;
428     } else if (lm_status == LM_STATUS_PENDING) {
429         DbgBreak();
430         response->hdr.status = SW_PFVF_STATUS_WAITING;
431     } else {
432         response->hdr.status = SW_PFVF_STATUS_FAILURE;
433     }
434     return lm_status;
435 }
436 
lm_pf_vf_fill_teardown_q_response(struct _lm_device_t * pdev,lm_vf_info_t * vf_info)437 static lm_status_t lm_pf_vf_fill_teardown_q_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
438 {
439     lm_status_t lm_status = LM_STATUS_FAILURE;
440     struct vf_pf_msg_q_op * request = NULL;
441     struct pf_vf_msg_resp * response = NULL;
442     u8_t    q_id          = 0;
443     u32_t cid;
444 
445     //DbgBreak();
446     request = vf_info->pf_vf_response.request_virt_addr;
447     response = vf_info->pf_vf_response.response_virt_addr;
448     q_id = request->vf_qid;
449 
450     if (q_id == 0) {
451         MM_ACQUIRE_VFS_STATS_LOCK(pdev);
452         if (vf_info->vf_stats.vf_stats_state != VF_STATS_REQ_IN_PROCESSING) {
453             vf_info->vf_stats.vf_stats_state = VF_STATS_REQ_READY;
454         }
455         vf_info->vf_stats.stop_collect_stats = TRUE;
456         vf_info->vf_stats.vf_stats_flag = 0;
457         MM_RELEASE_VFS_STATS_LOCK(pdev);
458         DbgMessage(pdev, WARN, "lm_pf_vf_fill_teardown_q_response for VF[%d]: stats_cnt: %d\n",vf_info->relative_vf_id,vf_info->vf_stats.vf_stats_cnt);
459 
460         lm_status = lm_pf_vf_wait_for_stats_ready(pdev, vf_info);
461         DbgMessage(pdev, WARN, "lm_pf_vf_fill_teardown_q_response for VF[%d]: stats_cnt: %d\n",vf_info->relative_vf_id,vf_info->vf_stats.vf_stats_cnt);
462         if (lm_status != LM_STATUS_SUCCESS) {
463             if (lm_status != LM_STATUS_ABORTED)
464             {
465             DbgBreak();
466         }
467             response->hdr.status = SW_PFVF_STATUS_FAILURE;
468             return lm_status;
469     }
470     }
471 
472     cid = LM_VF_Q_ID_TO_PF_CID(pdev, vf_info, q_id);
473 
474 
475     if (vf_info->was_malicious || vf_info->was_flred)
476     {
477         lm_status = LM_STATUS_SUCCESS;
478 	lm_set_con_state(pdev, cid, LM_CON_STATE_CLOSE);
479     }
480     else
481     {
482 	lm_status = lm_close_eth_con(pdev, cid, TRUE);
483     }
484 
485     if (lm_status == LM_STATUS_SUCCESS) {
486         response->hdr.status = SW_PFVF_STATUS_SUCCESS;
487         mm_atomic_dec(&vf_info->vf_si_num_of_active_q);
488     } else if (lm_status == LM_STATUS_PENDING) {
489         DbgBreak();
490         response->hdr.status = SW_PFVF_STATUS_WAITING;
491     } else {
492         response->hdr.status = SW_PFVF_STATUS_FAILURE;
493     }
494     return lm_status;
495 }
496 
lm_pf_vf_fill_close_vf_response(struct _lm_device_t * pdev,lm_vf_info_t * vf_info)497 static lm_status_t lm_pf_vf_fill_close_vf_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
498 {
499     lm_status_t lm_status = LM_STATUS_SUCCESS;
500     u8_t function_fw_id;
501     u8_t sb_idx;
502     u8_t q_idx;
503     struct vf_pf_msg_close_vf * request = NULL;
504     struct pf_vf_msg_resp * response = NULL;
505     u32_t cid;
506 
507     //DbgBreak();
508     request = vf_info->pf_vf_response.request_virt_addr;
509     response = vf_info->pf_vf_response.response_virt_addr;
510 
511     MM_ACQUIRE_VFS_STATS_LOCK(pdev);
512     if (vf_info->vf_stats.vf_stats_state != VF_STATS_REQ_IN_PROCESSING) {
513         vf_info->vf_stats.vf_stats_state = VF_STATS_REQ_READY;
514     }
515     vf_info->vf_stats.stop_collect_stats = TRUE;
516     vf_info->vf_stats.vf_stats_flag = 0;
517     MM_RELEASE_VFS_STATS_LOCK(pdev);
518 
519     lm_status = lm_pf_vf_wait_for_stats_ready(pdev, vf_info);
520     if (lm_status != LM_STATUS_SUCCESS) {
521         DbgBreak();
522     } else {
523         vf_info->vf_stats.vf_stats_state = VF_STATS_NONE;
524     }
525 
526     for (q_idx = 0; q_idx < vf_info->vf_si_num_of_active_q; q_idx++) {
527         cid = LM_VF_Q_ID_TO_PF_CID(pdev, vf_info, q_idx);
528 	if (vf_info->was_malicious || vf_info->was_flred)
529 	{
530 	    lm_status = LM_STATUS_SUCCESS;
531 	    lm_set_con_state(pdev, cid, LM_CON_STATE_CLOSE);
532 	}
533 	else
534 	{
535 	    lm_status = lm_close_eth_con(pdev, cid, TRUE);
536 	}
537     }
538     vf_info->vf_si_num_of_active_q = 0;
539 
540     lm_pf_disable_vf_igu_int(pdev, vf_info->abs_vf_id);
541     /*
542     Disable the function in STORMs
543     */
544     function_fw_id = 8 + vf_info->abs_vf_id;
545 
546     LM_INTMEM_WRITE8(PFDEV(pdev), XSTORM_FUNC_EN_OFFSET(function_fw_id), 0, BAR_XSTRORM_INTMEM);
547     LM_INTMEM_WRITE8(PFDEV(pdev), CSTORM_FUNC_EN_OFFSET(function_fw_id), 0, BAR_CSTRORM_INTMEM);
548     LM_INTMEM_WRITE8(PFDEV(pdev), TSTORM_FUNC_EN_OFFSET(function_fw_id), 0, BAR_TSTRORM_INTMEM);
549     LM_INTMEM_WRITE8(PFDEV(pdev), USTORM_FUNC_EN_OFFSET(function_fw_id), 0, BAR_USTRORM_INTMEM);
550 
551     for (sb_idx = 0; sb_idx < vf_info->num_sbs; sb_idx++) {
552         lm_clear_non_def_status_block(pdev,  LM_FW_VF_SB_ID(vf_info, sb_idx));
553     }
554 
555     for (q_idx = 0; q_idx < vf_info->num_rxqs; q_idx++) {
556         u32_t reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + LM_FW_VF_QZONE_ID(vf_info,q_idx) * 4;
557         u32_t val = 0;
558         REG_WR(PFDEV(pdev), reg, val);
559     }
560 
561     vf_info->vf_si_state = PF_SI_ACQUIRED;
562     if (lm_status == LM_STATUS_SUCCESS) {
563         response->hdr.status = SW_PFVF_STATUS_SUCCESS;
564     } else if (lm_status == LM_STATUS_PENDING) {
565         DbgBreak();
566         response->hdr.status = SW_PFVF_STATUS_WAITING;
567     } else {
568         response->hdr.status = SW_PFVF_STATUS_FAILURE;
569     }
570     return lm_status;
571 }
572 
lm_pf_vf_fill_release_vf_response(struct _lm_device_t * pdev,lm_vf_info_t * vf_info)573 static lm_status_t lm_pf_vf_fill_release_vf_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
574 {
575     struct pf_vf_msg_resp * response = NULL;
576     lm_status_t lm_status = LM_STATUS_SUCCESS;
577 
578     response = vf_info->pf_vf_response.response_virt_addr;
579     response->hdr.status = SW_PFVF_STATUS_SUCCESS;
580     vf_info->vf_si_state = PF_SI_WAIT_FOR_ACQUIRING_REQUEST;
581 
582     return lm_status;
583 }
584 
585 
lm_pf_vf_fill_update_rss_response(struct _lm_device_t * pdev,lm_vf_info_t * vf_info)586 static lm_status_t lm_pf_vf_fill_update_rss_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
587 {
588     struct pf_vf_msg_resp * response = NULL;
589     struct vf_pf_msg_rss * request = NULL;
590     struct ecore_config_rss_params * rss_params = NULL;
591     lm_status_t lm_status = LM_STATUS_SUCCESS;
592     u8_t    ind_table_size;
593     u8_t    ind_table_idx;
594 
595  //   DbgBreak();
596     request = vf_info->pf_vf_response.request_virt_addr;
597     response = vf_info->pf_vf_response.response_virt_addr;
598     rss_params = &vf_info->vf_slowpath_info.rss_params;
599     mm_mem_zero(rss_params, sizeof(struct ecore_config_rss_params));
600     ECORE_SET_BIT(RAMROD_COMP_WAIT, &rss_params->ramrod_flags);
601     rss_params->rss_flags = request->rss_flags;
602     rss_params->rss_result_mask = request->rss_result_mask;
603     mm_memcpy(rss_params->rss_key, request->rss_key, sizeof(u32_t) * 10);
604 
605     ind_table_size = request->rss_result_mask + 1;
606     for (ind_table_idx = 0; ind_table_idx < ind_table_size; ind_table_idx++) {
607         rss_params->ind_table[ind_table_idx] = LM_FW_VF_CLI_ID(vf_info, request->ind_table[ind_table_idx]);
608     }
609     rss_params->rss_obj = &vf_info->vf_slowpath_info.rss_conf_obj;
610     lm_status = ecore_config_rss(pdev, rss_params);
611     if (lm_status == LM_STATUS_SUCCESS) {
612         response->hdr.status = SW_PFVF_STATUS_SUCCESS;
613     } else {
614         response->hdr.status = SW_PFVF_STATUS_FAILURE;
615     }
616 
617     return lm_status;
618 }
619 
lm_pf_vf_fill_update_rsc_response(struct _lm_device_t * pdev,lm_vf_info_t * vf_info)620 lm_status_t lm_pf_vf_fill_update_rsc_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
621 {
622     struct pf_vf_msg_resp * response = NULL;
623     struct vf_pf_msg_rsc * request = NULL;
624     lm_status_t lm_status = LM_STATUS_SUCCESS;
625     u32_t   q_idx;
626 
627     //DbgBreak();
628     request = vf_info->pf_vf_response.request_virt_addr;
629     response = vf_info->pf_vf_response.response_virt_addr;
630 
631     vf_info->vf_tpa_info.ramrod_recv_cnt = vf_info->vf_si_num_of_active_q;
632     for (q_idx = 0; q_idx < vf_info->vf_si_num_of_active_q; q_idx++) {
633         lm_status = lm_pf_tpa_send_vf_ramrod(pdev, vf_info, q_idx, (u8_t)request->rsc_ipv4_state, (u8_t)request->rsc_ipv6_state);
634 
635         if(LM_STATUS_SUCCESS != lm_status)
636         {
637             DbgBreakMsg(" Ramrod send failed ");
638             break;
639         }
640     }
641     lm_status = lm_wait_state_change(pdev, &vf_info->vf_tpa_info.ramrod_recv_cnt, 0);
642     if (lm_status == LM_STATUS_SUCCESS) {
643         response->hdr.status = SW_PFVF_STATUS_SUCCESS;
644     } else {
645         response->hdr.status = SW_PFVF_STATUS_FAILURE;
646     }
647     return lm_status;
648 }
649 
lm_pf_process_standard_request(struct _lm_device_t * pdev,lm_vf_info_t * vf_info)650 lm_status_t lm_pf_process_standard_request(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
651 {
652     lm_status_t lm_status = LM_STATUS_SUCCESS;
653     struct vf_pf_msg_hdr * requst_hdr = vf_info->pf_vf_response.request_virt_addr;
654     struct pf_vf_msg_hdr * resp_hdr = vf_info->pf_vf_response.response_virt_addr;
655 
656     DbgBreakIf(!(pdev && IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev) && vf_info && (vf_info->pf_vf_response.req_resp_state == VF_PF_REQUEST_IN_PROCESSING)));
657     DbgMessage(pdev, WARNvf, "lm_pf_process_standard_request %d for VF[%d]\n",requst_hdr->opcode,vf_info->relative_vf_id);
658 
659     resp_hdr->opcode = requst_hdr->opcode;
660     resp_hdr->status = SW_PFVF_STATUS_WAITING;
661     vf_info->pf_vf_response.response_size = sizeof(struct pf_vf_msg_hdr);
662     vf_info->pf_vf_response.response_offset = 0;
663 
664     // Check PF/VF interface
665     if ( PFVF_IF_VERSION != requst_hdr->if_ver )
666     {
667         resp_hdr->status                       = SW_PFVF_STATUS_MISMATCH_PF_VF_VERSION;
668         vf_info->pf_vf_response.req_resp_state = VF_PF_RESPONSE_READY;
669     }
670     else
671     {
672         switch (requst_hdr->opcode)
673         {
674         case PFVF_OP_ACQUIRE:
675             resp_hdr->opcode_ver = PFVF_ACQUIRE_VER;
676             if (vf_info->vf_si_state != PF_SI_WAIT_FOR_ACQUIRING_REQUEST)
677             {
678                 resp_hdr->status = SW_PFVF_STATUS_FAILURE;
679                 DbgBreakIf(!DBG_BREAK_ON(UNDER_TEST));
680                 break;
681             }
682             if (PFVF_ACQUIRE_VER != requst_hdr->opcode_ver)
683             {
684                 resp_hdr->status = SW_PFVF_STATUS_NOT_SUPPORTED;
685                 vf_info->pf_vf_response.req_resp_state = VF_PF_RESPONSE_READY;
686                 break;
687             }
688             lm_status = lm_pf_vf_fill_acquire_response(pdev,vf_info);
689             if (lm_status == LM_STATUS_SUCCESS)
690             {
691                 vf_info->pf_vf_response.response_size = sizeof(struct pf_vf_msg_acquire_resp);
692             }
693             break;
694         case PFVF_OP_INIT_VF:
695             resp_hdr->opcode_ver = PFVF_INIT_VF_VER;
696             if (vf_info->vf_si_state != PF_SI_ACQUIRED)
697             {
698                 resp_hdr->status = SW_PFVF_STATUS_FAILURE;
699                 DbgBreakIf(!DBG_BREAK_ON(UNDER_TEST));
700                 break;
701             }
702             if (PFVF_INIT_VF_VER != requst_hdr->opcode_ver) {
703                 resp_hdr->status = SW_PFVF_STATUS_NOT_SUPPORTED;
704                 vf_info->pf_vf_response.req_resp_state = VF_PF_RESPONSE_READY;
705                 break;
706             }
707             lm_status = lm_pf_vf_fill_init_vf_response(pdev,vf_info);
708             break;
709         case PFVF_OP_SETUP_Q:
710             resp_hdr->opcode_ver = PFVF_SETUP_Q_VER;
711             if (vf_info->vf_si_state != PF_SI_VF_INITIALIZED) {
712                 resp_hdr->status = SW_PFVF_STATUS_FAILURE;
713                 DbgBreakIf(!DBG_BREAK_ON(UNDER_TEST));
714                 break;
715             }
716             if (PFVF_SETUP_Q_VER != requst_hdr->opcode_ver) {
717                 resp_hdr->status = SW_PFVF_STATUS_NOT_SUPPORTED;
718                 vf_info->pf_vf_response.req_resp_state = VF_PF_RESPONSE_READY;
719                 break;
720             }
721             lm_status = lm_pf_vf_fill_setup_q_response(pdev,vf_info);
722             break;
723         case PFVF_OP_SET_Q_FILTERS:
724             resp_hdr->opcode_ver = PFVF_SET_Q_FILTERS_VER;
725             if (PFVF_SET_Q_FILTERS_VER != requst_hdr->opcode_ver) {
726                 resp_hdr->status = SW_PFVF_STATUS_NOT_SUPPORTED;
727                 vf_info->pf_vf_response.req_resp_state = VF_PF_RESPONSE_READY;
728                 break;
729             }
730             lm_status = lm_pf_vf_fill_set_q_filters_response(pdev,vf_info);
731             break;
732         case PFVF_OP_ACTIVATE_Q:
733             resp_hdr->opcode_ver = PFVF_ACTIVATE_Q_VER;
734             if (PFVF_ACTIVATE_Q_VER != requst_hdr->opcode_ver) {
735                 resp_hdr->status = SW_PFVF_STATUS_NOT_SUPPORTED;
736                 vf_info->pf_vf_response.req_resp_state = VF_PF_RESPONSE_READY;
737                 break;
738             }
739             break;
740         case PFVF_OP_DEACTIVATE_Q:
741             resp_hdr->opcode_ver = PFVF_DEACTIVATE_Q_VER;
742             if (PFVF_DEACTIVATE_Q_VER != requst_hdr->opcode_ver) {
743                 resp_hdr->status = SW_PFVF_STATUS_NOT_SUPPORTED;
744                 vf_info->pf_vf_response.req_resp_state = VF_PF_RESPONSE_READY;
745                 break;
746             }
747             break;
748         case PFVF_OP_TEARDOWN_Q:
749             resp_hdr->opcode_ver = PFVF_TEARDOWN_Q_VER;
750             if (vf_info->vf_si_state != PF_SI_VF_INITIALIZED) {
751                 resp_hdr->status = SW_PFVF_STATUS_FAILURE;
752                 DbgBreakIf(!DBG_BREAK_ON(UNDER_TEST));
753                 break;
754             }
755             if (PFVF_TEARDOWN_Q_VER != requst_hdr->opcode_ver) {
756                 resp_hdr->status = SW_PFVF_STATUS_NOT_SUPPORTED;
757                 vf_info->pf_vf_response.req_resp_state = VF_PF_RESPONSE_READY;
758                 break;
759             }
760             lm_status = lm_pf_vf_fill_teardown_q_response(pdev,vf_info);
761             break;
762         case PFVF_OP_CLOSE_VF:
763             resp_hdr->opcode_ver = PFVF_CLOSE_VF_VER;
764             if (vf_info->vf_si_state != PF_SI_VF_INITIALIZED) {
765                 resp_hdr->status = SW_PFVF_STATUS_SUCCESS;
766                 DbgMessage(pdev, FATAL, "VF[%d] already closesd!\n",vf_info->relative_vf_id);
767                 break;
768             }
769             if (PFVF_CLOSE_VF_VER != requst_hdr->opcode_ver)
770             {
771                 resp_hdr->status                       = SW_PFVF_STATUS_MISMATCH_PF_VF_VERSION;
772                 vf_info->pf_vf_response.req_resp_state = VF_PF_RESPONSE_READY;
773                 break;
774             }
775             lm_status = lm_pf_vf_fill_close_vf_response(pdev,vf_info);
776             break;
777         case PFVF_OP_RELEASE_VF:
778             if (vf_info->vf_si_state != PF_SI_ACQUIRED) {
779                 resp_hdr->status = SW_PFVF_STATUS_FAILURE;
780                 DbgBreakIf(!DBG_BREAK_ON(UNDER_TEST));
781                 break;
782             }
783             resp_hdr->opcode_ver = PFVF_RELEASE_VF_VER;
784             if (PFVF_RELEASE_VF_VER != requst_hdr->opcode_ver) {
785                 resp_hdr->status = SW_PFVF_STATUS_NOT_SUPPORTED;
786                 vf_info->pf_vf_response.req_resp_state = VF_PF_RESPONSE_READY;
787                 break;
788             }
789             lm_status = lm_pf_vf_fill_release_vf_response(pdev,vf_info);
790             break;
791         case PFVF_OP_UPDATE_RSS:
792             resp_hdr->opcode_ver = PFVF_UPDATE_RSS_VER;
793             if (PFVF_UPDATE_RSS_VER != requst_hdr->opcode_ver) {
794                 resp_hdr->status = SW_PFVF_STATUS_NOT_SUPPORTED;
795                 vf_info->pf_vf_response.req_resp_state = VF_PF_RESPONSE_READY;
796                 break;
797             }
798             lm_status = lm_pf_vf_fill_update_rss_response(pdev,vf_info);
799             break;
800         case PFVF_OP_UPDATE_RSC:
801             resp_hdr->opcode_ver = PFVF_UPDATE_RSC_VER;
802             if (PFVF_UPDATE_RSC_VER != requst_hdr->opcode_ver) {
803                 resp_hdr->status = SW_PFVF_STATUS_NOT_SUPPORTED;
804                 vf_info->pf_vf_response.req_resp_state = VF_PF_RESPONSE_READY;
805                 break;
806             }
807             lm_status = lm_pf_vf_fill_update_rsc_response(pdev,vf_info);
808             break;
809         default:
810             return LM_STATUS_FAILURE;
811         }
812     }
813     if (lm_status != LM_STATUS_PENDING)
814     {
815         vf_info->pf_vf_response.req_resp_state = VF_PF_RESPONSE_READY;
816     }
817     return lm_status;
818 }
819 
lm_pf_notify_standard_request_ready(struct _lm_device_t * pdev,lm_vf_info_t * vf_info,u8_t * set_done)820 lm_status_t lm_pf_notify_standard_request_ready(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, u8_t * set_done)
821 {
822     lm_status_t lm_status = LM_STATUS_SUCCESS;
823     struct vf_pf_msg_hdr * requst_hdr = vf_info->pf_vf_response.request_virt_addr;
824     struct pf_vf_msg_hdr * resp_hdr = vf_info->pf_vf_response.response_virt_addr;
825 
826     DbgBreakIf(!(pdev && IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev) && vf_info && (vf_info->pf_vf_response.req_resp_state != VF_PF_REQUEST_IN_PROCESSING)));
827     DbgMessage(pdev, WARNvf, "lm_pf_process_standard_request\n");
828 
829 
830     switch (requst_hdr->opcode) {
831     case PFVF_OP_ACQUIRE:
832         DbgBreak();
833         break;
834     case PFVF_OP_INIT_VF:
835         DbgBreak();
836         break;
837     case PFVF_OP_SETUP_Q:
838         resp_hdr->opcode_ver = PFVF_SETUP_Q_VER;
839         if (vf_info->vf_si_state != PF_SI_VF_INITIALIZED) {
840             resp_hdr->status = SW_PFVF_STATUS_FAILURE;
841             DbgBreak();
842             break;
843         }
844         break;
845     case PFVF_OP_SET_Q_FILTERS:
846         break;
847     case PFVF_OP_ACTIVATE_Q:
848         break;
849     case PFVF_OP_DEACTIVATE_Q:
850         break;
851     case PFVF_OP_TEARDOWN_Q:
852         break;
853     case PFVF_OP_CLOSE_VF:
854         if (vf_info->vf_si_state != PF_SI_VF_INITIALIZED) {
855             resp_hdr->status = SW_PFVF_STATUS_FAILURE;
856             DbgBreak();
857             break;
858         }
859         break;
860     case PFVF_OP_RELEASE_VF:
861         if (vf_info->vf_si_state != PF_SI_ACQUIRED) {
862             resp_hdr->status = SW_PFVF_STATUS_FAILURE;
863             //return LM_STATUS_FAILURE;
864             DbgBreak();
865             break;
866         }
867         break;
868     default:
869         lm_status = LM_STATUS_FAILURE;
870         DbgBreak();
871         break;
872     }
873 
874 
875     return lm_status;
876 }
877 
lm_vf_pf_send_message_to_hw_channel(struct _lm_device_t * pdev,lm_vf_pf_message_t * mess)878 static lm_status_t lm_vf_pf_send_message_to_hw_channel(struct _lm_device_t * pdev, lm_vf_pf_message_t * mess)
879 {
880     lm_address_t * message_phys_addr;
881 
882     DbgBreakIf(!(pdev && IS_CHANNEL_VFDEV(pdev)));
883 
884     DbgMessage(pdev, WARNvf, "lm_vf_pf_channel_send\n");
885 
886     if (mess != NULL) {
887         message_phys_addr = &mess->message_phys_addr;
888     } else {
889         message_phys_addr = &pdev->vars.vf_pf_mess.message_phys_addr;
890     }
891 
892     VF_REG_WR(pdev, (VF_BAR0_CSDM_GLOBAL_OFFSET +
893                 OFFSETOF(struct cstorm_vf_zone_data,non_trigger)
894               + OFFSETOF(struct non_trigger_vf_zone,vf_pf_channel)
895               + OFFSETOF(struct vf_pf_channel_zone_data, msg_addr_lo)),
896                 message_phys_addr->as_u32.low);
897 
898     VF_REG_WR(pdev, (VF_BAR0_CSDM_GLOBAL_OFFSET +
899                 OFFSETOF(struct cstorm_vf_zone_data,non_trigger)
900               + OFFSETOF(struct non_trigger_vf_zone,vf_pf_channel)
901               + OFFSETOF(struct vf_pf_channel_zone_data, msg_addr_hi)),
902                 message_phys_addr->as_u32.high);
903 
904     LM_INTMEM_WRITE8(pdev,(OFFSETOF(struct cstorm_vf_zone_data,trigger)
905                         + OFFSETOF(struct trigger_vf_zone,vf_pf_channel)
906                         + OFFSETOF(struct vf_pf_channel_zone_trigger, addr_valid)),
907                      1,VF_BAR0_CSDM_GLOBAL_OFFSET);
908 
909 /*    VF_REG_WR(pdev, VF_BAR0_CSDM_GLOBAL_OFFSET +
910                 OFFSETOF(struct cstorm_function_zone_data,non_trigger)
911               + OFFSETOF(struct trigger_function_zone,vf_pf_channel)
912               + OFFSETOF(struct vf_pf_channel_zone_trigger, addr_valid),
913                 message_phys_addr.as_u32.low);*/
914 
915     return LM_STATUS_SUCCESS;
916 }
917 
lm_vf_pf_send_request_to_sw_channel(struct _lm_device_t * pdev,lm_vf_pf_message_t * mess)918 lm_status_t lm_vf_pf_send_request_to_sw_channel(struct _lm_device_t * pdev, lm_vf_pf_message_t * mess)
919 {
920     lm_status_t     lm_status = LM_STATUS_SUCCESS;
921     struct vf_pf_msg_hdr *hdr = (struct vf_pf_msg_hdr*)pdev->vars.vf_pf_mess.message_virt_addr;
922     void *  buffer = mess->message_virt_addr;
923     u32_t   length = hdr->resp_msg_offset;
924 
925     lm_status = mm_vf_pf_write_block_to_sw_channel(pdev, VF_TO_PF_STANDARD_BLOCK_ID, buffer, length);
926     return lm_status;
927 }
928 
lm_vf_pf_recv_response_from_sw_channel(struct _lm_device_t * pdev,lm_vf_pf_message_t * mess)929 lm_status_t lm_vf_pf_recv_response_from_sw_channel(struct _lm_device_t * pdev, lm_vf_pf_message_t * mess)
930 {
931     lm_status_t     lm_status = LM_STATUS_SUCCESS;
932     struct vf_pf_msg_hdr *hdr = (struct vf_pf_msg_hdr*)pdev->vars.vf_pf_mess.message_virt_addr;
933     void *  buffer = (u8_t*)mess->message_virt_addr + hdr->resp_msg_offset;
934     u32_t   length = 0;
935     u32_t   received_length;
936     u32_t   received_offset = 0;
937 
938     //mess->message_size - hdr->resp_msg_offset;
939     if (hdr->opcode == PFVF_OP_ACQUIRE) {
940         received_length = length = sizeof(struct pf_vf_msg_acquire_resp);
941     } else {
942         received_length = length = sizeof(struct pf_vf_msg_resp);
943     }
944     while (length) {
945         received_length = length;
946         lm_status = mm_vf_pf_read_block_from_sw_channel(pdev, VF_TO_PF_STANDARD_BLOCK_ID, (u8_t*)buffer + received_offset, &received_length);
947         if (lm_status != LM_STATUS_SUCCESS) {
948             break;
949         }
950         if (!received_offset) {
951             if (((struct pf_vf_msg_hdr*)buffer)->status != SW_PFVF_STATUS_SUCCESS) {
952                 break;
953             }
954         }
955         length -= received_length;
956         received_offset += received_length;
957     }
958 
959     return lm_status;
960 }
961 
lm_vf_pf_channel_send(struct _lm_device_t * pdev,lm_vf_pf_message_t * mess)962 static lm_status_t lm_vf_pf_channel_send(struct _lm_device_t * pdev, lm_vf_pf_message_t * mess)
963 {
964     lm_status_t     lm_status = LM_STATUS_SUCCESS;
965 
966     DbgBreakIf(!(pdev && IS_CHANNEL_VFDEV(pdev)));
967 
968     DbgMessage(pdev, WARNvf, "lm_vf_pf_channel_send\n");
969 
970     if (IS_HW_CHANNEL_VIRT_MODE(pdev)) {
971         lm_vf_pf_send_message_to_hw_channel(pdev, mess);
972     } else if (IS_SW_CHANNEL_VIRT_MODE(pdev)) {
973         lm_status = lm_vf_pf_send_request_to_sw_channel(pdev, mess);
974     } else {
975         DbgBreakMsg("lm_vf_pf_channel_send: UNKNOWN channel type\n");
976         return LM_STATUS_FAILURE;
977     }
978 
979 
980     if (!mess->do_not_arm_trigger && (lm_status == LM_STATUS_SUCCESS)) {
981         mm_vf_pf_arm_trigger(pdev, mess);
982     }
983 
984     return lm_status;
985 }
986 
lm_vf_pf_channel_wait_response(struct _lm_device_t * pdev,lm_vf_pf_message_t * mess)987 static lm_status_t lm_vf_pf_channel_wait_response(struct _lm_device_t * pdev, lm_vf_pf_message_t * mess)
988 {
989     u32_t             delay_us = 0;
990     u32_t             sum_delay_us = 0;
991     u32_t             to_cnt   = 10000 + 2360; // We'll wait 10,000 times 100us (1 second) + 2360 times 25000us (59sec) = total 60 sec
992     lm_status_t       lm_status = LM_STATUS_SUCCESS;
993 
994     /* check args */
995     if ERR_IF(!(pdev && IS_CHANNEL_VFDEV(pdev) && mess && pdev->vars.vf_pf_mess.message_virt_addr)) {
996         DbgBreak();
997         return LM_STATUS_INVALID_PARAMETER;
998     }
999 
1000     /* wait for message done */
1001     DbgMessage(pdev, WARN, "lm_vf_pf_channel_wait_response\n");
1002     if (mess == NULL) {
1003         mess = &pdev->vars.vf_pf_mess;
1004     }
1005 
1006     if ((*mess->done == FALSE) && IS_SW_CHANNEL_VIRT_MODE(pdev) && !lm_reset_is_inprogress(pdev)) {
1007         lm_status = lm_vf_pf_recv_response_from_sw_channel(pdev, mess);
1008     }
1009 
1010     while ((lm_status == LM_STATUS_SUCCESS) && (*mess->done == FALSE) && to_cnt--)
1011     {
1012         delay_us = (to_cnt >= 2360) ? 100 : 25000 ;
1013         sum_delay_us += delay_us;
1014         mm_wait(pdev, delay_us);
1015 
1016         // in case reset in progress
1017         // we won't get completion so no need to wait
1018         if( lm_reset_is_inprogress(pdev) ) {
1019             break;
1020         } else if (IS_SW_CHANNEL_VIRT_MODE(pdev)) {
1021             lm_status = lm_vf_pf_recv_response_from_sw_channel(pdev,mess);
1022         }
1023     }
1024     if (*mess->done) {
1025         DbgMessage(pdev, WARN, "lm_vf_pf_channel_wait_response: message done(%dus waiting)\n",sum_delay_us);
1026     } else {
1027         switch (lm_status)
1028         {
1029         case LM_STATUS_REQUEST_NOT_ACCEPTED:
1030             break;
1031         case LM_STATUS_SUCCESS:
1032             lm_status = LM_STATUS_TIMEOUT;
1033         default:
1034             if (!lm_reset_is_inprogress(pdev))
1035             {
1036 #if defined(_VBD_)
1037                 DbgBreak();
1038 #endif
1039             }
1040             break;
1041         }
1042 	DbgMessage(pdev, FATAL, "lm_vf_pf_channel_wait_response returns %d\n", lm_status);
1043     }
1044     return lm_status;
1045 }
1046 
lm_vf_pf_channel_release_message(struct _lm_device_t * pdev,lm_vf_pf_message_t * mess)1047 static void lm_vf_pf_channel_release_message(struct _lm_device_t * pdev, lm_vf_pf_message_t * mess)
1048 {
1049     if (mess->cookie) { //TODO don't indicate in case of error processing
1050         DbgMessage(pdev, WARN, "VF_PF channel: assuming REQ_SET_INFORMATION - indicating back to NDIS!\n");
1051         mm_set_done(pdev, LM_SW_LEADING_RSS_CID(pdev), mess->cookie);
1052         mess->cookie = NULL;
1053     }
1054     mm_atomic_dec(&mess->state);
1055 }
1056 
lm_vf_pf_channel_get_message_to_send(struct _lm_device_t * pdev,const u32_t opcode)1057 static lm_vf_pf_message_t * lm_vf_pf_channel_get_message_to_send(struct _lm_device_t * pdev, const u32_t  opcode)
1058 {
1059     u16_t resp_offset = 0;
1060     struct vf_pf_msg_hdr    *sw_hdr;
1061     struct pf_vf_msg_hdr    *sw_resp_hdr;
1062     struct vfpf_first_tlv   *hw_first_tlv;
1063     struct channel_list_end_tlv *hw_list_end_tlv;
1064     struct pfvf_tlv         *hw_resp_hdr;
1065 
1066     DbgBreakIf(!(pdev && IS_CHANNEL_VFDEV(pdev)));
1067 
1068 #ifndef __LINUX
1069     if (mm_atomic_inc(&pdev->vars.vf_pf_mess.state) != 1) {
1070         DbgMessage(pdev, FATAL, "VF_PF Channel: pdev->vars.vf_pf_mess.state is %d\n",pdev->vars.vf_pf_mess.state);
1071         mm_atomic_dec(&pdev->vars.vf_pf_mess.state);
1072 
1073         return NULL;
1074     }
1075 #else
1076     mm_atomic_inc(&pdev->vars.vf_pf_mess.state);
1077     DbgMessage(pdev, FATAL, "VF_PF Channel: pdev->vars.vf_pf_mess.state is %d\n",pdev->vars.vf_pf_mess.state);
1078 #endif
1079     if (pdev->vars.vf_pf_mess.message_virt_addr == NULL) {
1080         if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1081         {
1082             pdev->vars.vf_pf_mess.message_size = ((sizeof(union vf_pf_msg) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
1083                                                + ((sizeof(union pf_vf_msg) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK);
1084         }
1085         else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
1086         {
1087             pdev->vars.vf_pf_mess.message_size = ((sizeof(union vfpf_tlvs) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
1088                                                + ((sizeof(union pfvf_tlvs) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
1089                                                + ((sizeof(union pf_vf_bulletin) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK);
1090         }
1091         else
1092         {
1093             DbgBreakMsg("lm_vf_pf_channel_get_message_to_send: UNKNOWN channel type\n");
1094             return NULL;
1095         }
1096         pdev->vars.vf_pf_mess.message_virt_addr = mm_alloc_phys_mem(pdev, pdev->vars.vf_pf_mess.message_size,
1097                                                                     &pdev->vars.vf_pf_mess.message_phys_addr, 0, LM_RESOURCE_COMMON);
1098         if CHK_NULL(pdev->vars.vf_pf_mess.message_virt_addr)
1099         {
1100             DbgMessage(pdev, FATAL, "VF_PF Channel: pdev->vvars.vf_pf_mess.message_virt_addr is NULL\n");
1101             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1102             mm_atomic_dec(&pdev->vars.vf_pf_mess.state);
1103             return NULL;
1104         }
1105         if (IS_HW_CHANNEL_VIRT_MODE(pdev))
1106         {
1107             u32_t buletin_offset;
1108 			buletin_offset = pdev->vars.vf_pf_mess.message_size =
1109                 ((sizeof(union vfpf_tlvs) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
1110                 + ((sizeof(union pfvf_tlvs) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK);
1111             pdev->vars.vf_pf_mess.bulletin_virt_addr = (u8_t*)pdev->vars.vf_pf_mess.message_virt_addr + buletin_offset;
1112             pdev->vars.vf_pf_mess.bulletin_phys_addr = pdev->vars.vf_pf_mess.message_phys_addr;
1113             LM_INC64(&pdev->vars.vf_pf_mess.bulletin_phys_addr, buletin_offset);
1114         }
1115     }
1116     mm_mem_zero(pdev->vars.vf_pf_mess.message_virt_addr, pdev->vars.vf_pf_mess.message_size);
1117     sw_hdr = (struct vf_pf_msg_hdr*)pdev->vars.vf_pf_mess.message_virt_addr;
1118     hw_first_tlv = (struct vfpf_first_tlv*)pdev->vars.vf_pf_mess.message_virt_addr;
1119     switch (opcode) {
1120     case PFVF_OP_ACQUIRE:
1121         if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1122         {
1123             resp_offset = (sizeof(struct vf_pf_msg_acquire) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1124             sw_hdr->opcode_ver = PFVF_ACQUIRE_VER;
1125         }
1126         else
1127         {
1128             resp_offset = (sizeof(struct vfpf_acquire_tlv) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1129             hw_first_tlv->tl.type = CHANNEL_TLV_ACQUIRE;
1130             hw_first_tlv->tl.length = sizeof(struct vfpf_acquire_tlv);
1131         }
1132         break;
1133     case PFVF_OP_INIT_VF:
1134         if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1135         {
1136             resp_offset = (sizeof(struct vf_pf_msg_init_vf) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1137             sw_hdr->opcode_ver = PFVF_INIT_VF_VER;
1138         }
1139         else
1140         {
1141             resp_offset = (sizeof(struct vfpf_init_tlv) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1142             hw_first_tlv->tl.type = CHANNEL_TLV_INIT;
1143             hw_first_tlv->tl.length = sizeof(struct vfpf_init_tlv);
1144         }
1145         break;
1146     case PFVF_OP_SETUP_Q:
1147         if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1148         {
1149             resp_offset = (sizeof(struct vf_pf_msg_setup_q) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1150             sw_hdr->opcode_ver = PFVF_SETUP_Q_VER;
1151         }
1152         else
1153         {
1154             resp_offset = (sizeof(struct vfpf_setup_q_tlv) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1155             hw_first_tlv->tl.type = CHANNEL_TLV_SETUP_Q;
1156             hw_first_tlv->tl.length = sizeof(struct vfpf_setup_q_tlv);
1157         }
1158         break;
1159     case PFVF_OP_SET_Q_FILTERS:
1160         if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1161         {
1162             sw_hdr->opcode_ver = PFVF_SET_Q_FILTERS_VER;
1163             resp_offset = (sizeof(struct vf_pf_msg_set_q_filters) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1164         }
1165         else
1166         {
1167             resp_offset = (sizeof(struct vfpf_set_q_filters_tlv) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1168             hw_first_tlv->tl.type = CHANNEL_TLV_SET_Q_FILTERS;
1169             hw_first_tlv->tl.length = sizeof(struct vfpf_set_q_filters_tlv);
1170         }
1171         break;
1172 #if 0
1173     case PFVF_OP_ACTIVATE_Q:
1174         if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1175         {
1176             resp_offset = (sizeof(struct vf_pf_msg_q_op) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1177             sw_hdr->opcode_ver = PFVF_ACTIVATE_Q_VER;
1178         }
1179         else
1180         {
1181             DbgBreakMsg("lm_vf_pf_channel_get_message_to_send: HW_CHANNEL is not implemented yet\n");
1182             return NULL;
1183         }
1184         break;
1185     case PFVF_OP_DEACTIVATE_Q:
1186         if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1187         {
1188             resp_offset = (sizeof(struct vf_pf_msg_q_op) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1189             sw_hdr->opcode_ver = PFVF_DEACTIVATE_Q_VER;
1190         }
1191         else
1192         {
1193             DbgBreakMsg("lm_vf_pf_channel_get_message_to_send: HW_CHANNEL is not implemented yet\n");
1194             return NULL;
1195         }
1196         break;
1197 #endif
1198     case PFVF_OP_TEARDOWN_Q:
1199         if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1200         {
1201             resp_offset = (sizeof(struct vf_pf_msg_q_op) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1202             sw_hdr->opcode_ver = PFVF_TEARDOWN_Q_VER;
1203         }
1204         else
1205         {
1206             resp_offset = (sizeof(struct vfpf_q_op_tlv) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1207             hw_first_tlv->tl.type = CHANNEL_TLV_TEARDOWN_Q;
1208             hw_first_tlv->tl.length = sizeof(struct vfpf_q_op_tlv);
1209         }
1210         break;
1211     case PFVF_OP_CLOSE_VF:
1212         if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1213         {
1214             resp_offset = (sizeof(struct vf_pf_msg_close_vf) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1215             sw_hdr->opcode_ver = PFVF_CLOSE_VF_VER;
1216         }
1217         else
1218         {
1219             resp_offset = (sizeof(struct vfpf_close_tlv) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1220             hw_first_tlv->tl.type = CHANNEL_TLV_CLOSE;
1221             hw_first_tlv->tl.length = sizeof(struct vfpf_close_tlv);
1222         }
1223         break;
1224     case PFVF_OP_RELEASE_VF:
1225         if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1226         {
1227             resp_offset = (sizeof(struct vf_pf_msg_release_vf) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1228             sw_hdr->opcode_ver = PFVF_RELEASE_VF_VER;
1229         }
1230         else
1231         {
1232             resp_offset = (sizeof(struct vfpf_release_tlv) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1233             hw_first_tlv->tl.type = CHANNEL_TLV_RELEASE;
1234             hw_first_tlv->tl.length = sizeof(struct vfpf_release_tlv);
1235         }
1236         break;
1237     case PFVF_OP_UPDATE_RSS:
1238         if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1239         {
1240             resp_offset = (sizeof(struct vf_pf_msg_rss) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1241             sw_hdr->opcode_ver = PFVF_UPDATE_RSS_VER;
1242         }
1243         else
1244         {
1245             resp_offset = (sizeof(struct vfpf_rss_tlv) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1246             hw_first_tlv->tl.type = CHANNEL_TLV_UPDATE_RSS;
1247             hw_first_tlv->tl.length = sizeof(struct vfpf_rss_tlv);
1248         }
1249         break;
1250     case PFVF_OP_UPDATE_RSC:
1251         if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1252         {
1253             resp_offset = (sizeof(struct vf_pf_msg_rsc) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1254             sw_hdr->opcode_ver = PFVF_UPDATE_RSC_VER;
1255         }
1256         else
1257         {
1258             resp_offset = (sizeof(struct vfpf_tpa_tlv) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
1259             hw_first_tlv->tl.type = CHANNEL_TLV_UPDATE_TPA;
1260             hw_first_tlv->tl.length = sizeof(struct vfpf_tpa_tlv);
1261         }
1262         break;
1263     default:
1264         mm_atomic_dec(&pdev->vars.vf_pf_mess.state);
1265         DbgMessage(pdev, FATAL, "VF_PF channel: Opcode %d is not supported\n",opcode);
1266         DbgBreak();
1267         return NULL;
1268     }
1269     if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1270     {
1271         sw_hdr->if_ver  = PFVF_IF_VERSION;
1272         sw_hdr->opcode = (u16_t)opcode;
1273         sw_hdr->resp_msg_offset = resp_offset;
1274         sw_resp_hdr = (struct pf_vf_msg_hdr *)((u8_t*)sw_hdr + resp_offset);
1275         sw_resp_hdr->status = SW_PFVF_STATUS_WAITING;
1276         pdev->vars.vf_pf_mess.done = (u16_t*)((u8_t *)pdev->vars.vf_pf_mess.message_virt_addr + resp_offset);
1277     }
1278     else
1279     {
1280         hw_list_end_tlv = (struct channel_list_end_tlv *)((u8_t*)hw_first_tlv + hw_first_tlv->tl.length);
1281         hw_list_end_tlv->tl.type = CHANNEL_TLV_LIST_END;
1282         hw_first_tlv->resp_msg_offset = resp_offset;
1283         hw_resp_hdr = (struct pfvf_tlv *)((u8_t*)hw_first_tlv + hw_first_tlv->resp_msg_offset);
1284         pdev->vars.vf_pf_mess.done = (u16_t*)(&hw_resp_hdr->status);
1285     }
1286     return &pdev->vars.vf_pf_mess;
1287 }
1288 
lm_vf_pf_get_sb_running_index(lm_device_t * pdev,u8_t sb_id,u8_t sm_idx)1289 u16_t lm_vf_pf_get_sb_running_index(lm_device_t *pdev, u8_t sb_id, u8_t sm_idx)
1290 {
1291     u16_t running_index = 0;
1292     DbgBreakIf(!(pdev && IS_CHANNEL_VFDEV(pdev) && pdev->pf_vf_acquiring_resp));
1293     if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1294     {
1295         struct pf_vf_msg_acquire_resp * p_sw_resp = (struct pf_vf_msg_acquire_resp *)pdev->pf_vf_acquiring_resp;
1296         running_index = pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.vf_sb[p_sw_resp->pfdev_info.indices_per_sb + sm_idx];
1297     }
1298     else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
1299     {
1300         struct pfvf_acquire_resp_tlv * p_hw_resp = (struct pfvf_acquire_resp_tlv *)pdev->pf_vf_acquiring_resp;;
1301         running_index = pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.vf_sb[p_hw_resp->pfdev_info.indices_per_sb + sm_idx];
1302     }
1303     else
1304     {
1305         DbgBreak();
1306     }
1307 
1308     return mm_le16_to_cpu(running_index);
1309 }
1310 
1311 
lm_vf_pf_get_sb_index(lm_device_t * pdev,u8_t sb_id,u8_t idx)1312 u16_t lm_vf_pf_get_sb_index(lm_device_t *pdev, u8_t sb_id, u8_t idx)
1313 {
1314     DbgBreakIf(!(pdev && IS_CHANNEL_VFDEV(pdev) && pdev->pf_vf_acquiring_resp));
1315     if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1316     {
1317         struct pf_vf_msg_acquire_resp * p_sw_resp = (struct pf_vf_msg_acquire_resp *)pdev->pf_vf_acquiring_resp;
1318         DbgBreakIf(!(p_sw_resp && (sb_id < p_sw_resp->pfdev_info.indices_per_sb)));
1319     }
1320     else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
1321     {
1322         struct pfvf_acquire_resp_tlv * p_hw_resp = (struct pfvf_acquire_resp_tlv *)pdev->pf_vf_acquiring_resp;;
1323         DbgBreakIf(!(p_hw_resp && (sb_id < p_hw_resp->pfdev_info.indices_per_sb)));
1324     }
1325     else
1326     {
1327         DbgBreak();
1328     }
1329     return mm_le16_to_cpu(pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.vf_sb[sb_id]);
1330 }
1331 
lm_vf_get_doorbell_size(struct _lm_device_t * pdev)1332 u16_t lm_vf_get_doorbell_size(struct _lm_device_t *pdev)
1333 {
1334     DbgBreakIf(!(pdev && IS_CHANNEL_VFDEV(pdev) && pdev->pf_vf_acquiring_resp));
1335     if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1336     {
1337         struct pf_vf_msg_acquire_resp * p_sw_resp = (struct pf_vf_msg_acquire_resp *)pdev->pf_vf_acquiring_resp;
1338         DbgBreakIf(!p_sw_resp->pfdev_info.db_size);
1339         return p_sw_resp->pfdev_info.db_size;
1340     }
1341     else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
1342     {
1343         struct pfvf_acquire_resp_tlv * p_hw_resp = (struct pfvf_acquire_resp_tlv *)pdev->pf_vf_acquiring_resp;;
1344         DbgBreakIf(!p_hw_resp->pfdev_info.db_size);
1345         return p_hw_resp->pfdev_info.db_size;
1346     }
1347     else
1348     {
1349         DbgBreak();
1350     }
1351     return 0;
1352 }
1353 
lm_vf_pf_wait_no_messages_pending(struct _lm_device_t * pdev)1354 lm_status_t lm_vf_pf_wait_no_messages_pending(struct _lm_device_t * pdev)
1355 {
1356     lm_status_t lm_status = LM_STATUS_SUCCESS;
1357     lm_vf_pf_message_t * pf_mess = NULL;
1358     pf_mess = &pdev->vars.vf_pf_mess;
1359     lm_status = lm_vf_pf_channel_wait_response(pdev, pf_mess);
1360 
1361     DbgMessage(pdev, WARNvf, "lm_vf_pf_wait_no_messages_pending\n");
1362 
1363     if (lm_status == LM_STATUS_SUCCESS) {
1364         if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1365         {
1366             struct vf_pf_msg_hdr * mess_hdr = NULL;
1367             struct pf_vf_msg_hdr * resp_hdr = NULL;
1368             mess_hdr = (struct vf_pf_msg_hdr *)pf_mess->message_virt_addr;
1369             resp_hdr = (struct pf_vf_msg_hdr *)((u8_t*)mess_hdr + mess_hdr->resp_msg_offset);
1370             switch (resp_hdr->status) {
1371             case SW_PFVF_STATUS_SUCCESS:
1372                 DbgMessage(pdev, WARN, "VF_PF Channel: Message %d(%d) is completed successfully\n",mess_hdr->opcode, resp_hdr->opcode);
1373                 lm_status = LM_STATUS_SUCCESS;
1374                 break;
1375             case SW_PFVF_STATUS_FAILURE:
1376             case SW_PFVF_STATUS_MISMATCH_PF_VF_VERSION:
1377             case SW_PFVF_STATUS_MISMATCH_FW_HSI:
1378             case SW_PFVF_STATUS_NO_RESOURCE:
1379                 DbgMessage(pdev, FATAL, "VF_PF Channel: Status %d is not supported yet\n", resp_hdr->status);
1380                 lm_status = LM_STATUS_FAILURE;
1381                 pf_mess->bad_response.sw_channel_hdr = *resp_hdr;
1382                 break;
1383             default:
1384                 DbgMessage(pdev, FATAL, "VF_PF Channel: Unknown status %d\n", resp_hdr->status);
1385                 pf_mess->bad_response.sw_channel_hdr = *resp_hdr;
1386                 lm_status = LM_STATUS_FAILURE;
1387                 break;
1388             }
1389         }
1390         else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
1391         {
1392             struct vfpf_first_tlv * mess_hdr = NULL;
1393             struct pfvf_tlv * resp_hdr = NULL;
1394             mess_hdr = (struct vfpf_first_tlv *)pf_mess->message_virt_addr;
1395             resp_hdr = (struct pfvf_tlv *)((u8_t*)mess_hdr + mess_hdr->resp_msg_offset);
1396             switch (resp_hdr->status)
1397             {
1398             case PFVF_STATUS_SUCCESS:
1399                 lm_status = LM_STATUS_SUCCESS;
1400                 break;
1401             case PFVF_STATUS_FAILURE:
1402             case PFVF_STATUS_NOT_SUPPORTED:
1403             case PFVF_STATUS_NO_RESOURCE:
1404                 DbgMessage(pdev, FATAL, "VF_PF Channel: Status %d is not supported yet\n", resp_hdr->status);
1405                 pf_mess->bad_response.hw_channel_hdr = *resp_hdr;
1406                 lm_status = LM_STATUS_FAILURE;
1407                 break;
1408             default:
1409                 DbgMessage(pdev, FATAL, "VF_PF Channel: Unknown status %d\n", resp_hdr->status);
1410                 pf_mess->bad_response.hw_channel_hdr = *resp_hdr;
1411                 lm_status = LM_STATUS_FAILURE;
1412                 break;
1413             }
1414         }
1415         else
1416         {
1417             DbgBreak();
1418         }
1419     }
1420     lm_vf_pf_channel_release_message(pdev,pf_mess);
1421     return lm_status;
1422 }
1423 
lm_vf_pf_acquire_msg(struct _lm_device_t * pdev)1424 lm_status_t lm_vf_pf_acquire_msg(struct _lm_device_t * pdev)
1425 {
1426     lm_status_t lm_status = LM_STATUS_SUCCESS;
1427     lm_vf_pf_message_t * pf_mess = NULL;
1428     struct  vf_pf_msg_acquire * sw_mess = NULL;
1429     struct  vfpf_acquire_tlv  * hw_mess = NULL;
1430     struct  pf_vf_msg_acquire_resp * sw_resp = NULL;
1431     struct  pfvf_acquire_resp_tlv  * hw_resp = NULL;
1432     u8_t                           max_dq    = 0;
1433 
1434     DbgBreakIf(!(pdev && IS_CHANNEL_VFDEV(pdev)));
1435 
1436     pf_mess = lm_vf_pf_channel_get_message_to_send(pdev, PFVF_OP_ACQUIRE);
1437 
1438     if (!pf_mess)
1439      {
1440         DbgMessage(pdev, FATAL, "VF_PF Channel: lm_vf_pf_channel_get_message_to_send returns NULL\n");
1441         lm_status = LM_STATUS_RESOURCE;
1442         DbgBreak();
1443         return lm_status;
1444     }
1445 
1446     if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1447     {
1448         sw_mess = (struct vf_pf_msg_acquire*)pf_mess->message_virt_addr;
1449 
1450     //    mess->vfdev_info.vf_pf_msg_size = sizeof(union vf_pf_msg);
1451            /* the following fields are for debug purposes */
1452         sw_mess->vfdev_info.vf_id = ABS_VFID(pdev);       /* ME register value */
1453         sw_mess->vfdev_info.vf_os = 0;             /* e.g. Linux, W2K8 */
1454         sw_mess->vfdev_info.vf_aux             = SW_VFPF_VFDEF_INFO_AUX_DIRECT_DQ;
1455         sw_mess->vfdev_info.vf_fw_hsi_version  = pdev->ver_num_fw;  /* Must not be zero otherwise, VF will yellow bang */
1456         sw_mess->vfdev_info.fp_hsi_ver         = ETH_FP_HSI_VER_1; /* We don't want to break support for old/new VF/PF so we retrun v1 */
1457         DbgBreakIf( 0 == sw_mess->vfdev_info.vf_fw_hsi_version );
1458 
1459         sw_mess->resc_request.num_rxqs = sw_mess->resc_request.num_txqs = sw_mess->resc_request.num_sbs = LM_SB_CNT(pdev);
1460         sw_mess->resc_request.num_mac_filters = 1;
1461         sw_mess->resc_request.num_vlan_filters = 0;
1462         sw_mess->resc_request.num_mc_filters = 0;
1463 
1464     }
1465     else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
1466     {
1467         hw_mess = (struct vfpf_acquire_tlv*)pf_mess->message_virt_addr;
1468         hw_mess->vfdev_info.vf_id = ABS_VFID(pdev);       /* ME register value */
1469         hw_mess->vfdev_info.vf_os = 0;             /* e.g. Linux, W2K8 */
1470         hw_mess->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VER_1; /* We don't want to break support for old/new VF/PF so we retrun v1 */
1471         hw_mess->resc_request.num_rxqs = hw_mess->resc_request.num_txqs = hw_mess->resc_request.num_sbs = LM_SB_CNT(pdev);
1472         hw_mess->resc_request.num_mac_filters = 1;
1473         hw_mess->resc_request.num_vlan_filters = 0;
1474         hw_mess->resc_request.num_mc_filters = PFVF_MAX_MULTICAST_PER_VF;
1475         hw_mess->bulletin_addr = pf_mess->bulletin_phys_addr.as_u64;
1476     }
1477     else
1478     {
1479         DbgBreak();
1480         lm_vf_pf_channel_release_message(pdev,pf_mess);
1481         return LM_STATUS_FAILURE;
1482     }
1483 
1484     pf_mess->do_not_arm_trigger = TRUE;
1485     lm_status = lm_vf_pf_channel_send(pdev,pf_mess);
1486 
1487     if (lm_status != LM_STATUS_SUCCESS)
1488     {
1489         lm_vf_pf_channel_release_message(pdev,pf_mess);
1490         return lm_status;
1491     }
1492     lm_status = lm_vf_pf_channel_wait_response(pdev, pf_mess);
1493 
1494     // FIXME TODO
1495     if (lm_status == LM_STATUS_SUCCESS)
1496     {
1497         if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1498         {
1499             sw_resp = (struct pf_vf_msg_acquire_resp *)((u8_t*)sw_mess + sw_mess->hdr.resp_msg_offset);
1500             if (sw_resp->hdr.opcode != PFVF_OP_ACQUIRE)
1501             {
1502                 lm_status = LM_STATUS_FAILURE;
1503             }
1504             else
1505             {
1506                 switch (sw_resp->hdr.status)
1507                 {
1508                 case SW_PFVF_STATUS_SUCCESS:
1509                     lm_status = LM_STATUS_SUCCESS;
1510                     break;
1511                 case SW_PFVF_STATUS_FAILURE:
1512                 case SW_PFVF_STATUS_MISMATCH_PF_VF_VERSION:
1513                 case SW_PFVF_STATUS_MISMATCH_FW_HSI:
1514                 case SW_PFVF_STATUS_NO_RESOURCE:
1515                     DbgMessage(pdev, FATAL, "VF_PF Channel: Status %d is not supported yet\n", sw_resp->hdr.status);
1516                     lm_status = LM_STATUS_FAILURE;
1517                     break;
1518                 default:
1519                     DbgMessage(pdev, FATAL, "VF_PF Channel: Unknown status %d\n", sw_resp->hdr.status);
1520                     lm_status = LM_STATUS_FAILURE;
1521                     break;
1522                 }
1523                 // We update here the status of pf_acquire
1524                 // in order to let the UM layer of the VF to report
1525                 // in the event log the relevant event log message
1526                 pdev->params.pf_acquire_status = sw_resp->hdr.status;
1527             }
1528         }
1529         else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
1530         {
1531             hw_resp = (struct pfvf_acquire_resp_tlv *)((u8_t*)hw_mess + hw_mess->first_tlv.resp_msg_offset);
1532             if (hw_resp->hdr.tl.type != CHANNEL_TLV_ACQUIRE)
1533             {
1534                 lm_status = LM_STATUS_FAILURE;
1535             }
1536             else
1537             {
1538                 switch (hw_resp->hdr.status)
1539                 {
1540                 case PFVF_STATUS_SUCCESS:
1541                     lm_status = LM_STATUS_SUCCESS;
1542                     break;
1543                 case PFVF_STATUS_FAILURE:
1544                 case PFVF_STATUS_NOT_SUPPORTED:
1545                 case PFVF_STATUS_NO_RESOURCE:
1546                     DbgMessage(pdev, FATAL, "VF_PF Channel: Status %d is not supported yet\n", hw_resp->hdr.status);
1547                     lm_status = LM_STATUS_FAILURE;
1548                     break;
1549                 default:
1550                     DbgMessage(pdev, FATAL, "VF_PF Channel: Unknown status %d\n", hw_resp->hdr.status);
1551                     lm_status = LM_STATUS_FAILURE;
1552                     break;
1553                 }
1554                 // We update here the status of pf_acquire
1555                 // in order to let the UM layer of the VF to report
1556                 // in the event log the relevant event log message
1557                 pdev->params.pf_acquire_status = hw_resp->hdr.status;
1558             }
1559         }
1560         else
1561         {
1562             DbgBreak();
1563             lm_status = LM_STATUS_FAILURE;
1564         }
1565     }
1566 
1567     if (lm_status == LM_STATUS_SUCCESS)
1568     {
1569         if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1570         {
1571             struct pf_vf_msg_acquire_resp * presp;
1572 
1573             if (pdev->pf_vf_acquiring_resp == NULL)
1574             {
1575                 pdev->pf_vf_acquiring_resp = mm_alloc_mem(pdev, sizeof(struct pf_vf_msg_acquire_resp),LM_RESOURCE_COMMON);
1576 
1577                 if CHK_NULL(pdev->pf_vf_acquiring_resp)
1578                 {
1579                     lm_vf_pf_channel_release_message(pdev, pf_mess);
1580                     DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1581                     return LM_STATUS_RESOURCE;
1582                 }
1583                 else
1584                 {
1585                     DbgMessage(pdev, FATAL, "VF_PF Channel: pdev->pf_vf_acquiring_resp is allocated (%db)\n",sizeof(struct pf_vf_msg_acquire_resp));
1586                 }
1587             }
1588 
1589             // FIXME TODO
1590             presp = (struct pf_vf_msg_acquire_resp *)pdev->pf_vf_acquiring_resp;
1591 
1592             // override for now to make sure we get correct answer...
1593             presp->pfdev_info.chip_num = CHIP_NUM_5712E;
1594 
1595             mm_memcpy(pdev->pf_vf_acquiring_resp, sw_resp, sizeof(struct pf_vf_msg_acquire_resp));
1596             if (!pdev->params.debug_sriov)
1597             {
1598                 pdev->params.debug_sriov = presp->pfdev_info.pf_cap & PFVF_DEBUG;
1599 	    }
1600             DbgMessage(pdev, FATALvf, "presp->pfdev_info.db_size = %d\n", presp->pfdev_info.db_size);
1601             DbgMessage(pdev, FATALvf, "presp->pfdev_info.indices_per_sb = %d\n", presp->pfdev_info.indices_per_sb);
1602             DbgMessage(pdev, FATALvf, "presp->pfdev_info.pf_cap = %d\n", presp->pfdev_info.pf_cap);
1603             DbgMessage(pdev, FATALvf, "presp->pfdev_info.chip_num = %d\n", presp->pfdev_info.chip_num);
1604             DbgMessage(pdev, FATALvf, "presp->resc.hw_qid[0] = %d\n", presp->resc.hw_qid[0]);
1605             DbgMessage(pdev, FATALvf, "presp->resc.hw_sbs[0].hw_sb_id = %d\n", presp->resc.hw_sbs[0].hw_sb_id);
1606             DbgMessage(pdev, FATALvf, "presp->resc.hw_sbs[0].sb_qid = %d\n", presp->resc.hw_sbs[0].sb_qid);
1607             DbgMessage(pdev, FATALvf, "presp->resc.num_sbs = %d\n", presp->resc.num_sbs);
1608             DbgMessage(pdev, FATALvf, "presp->resc.igu_cnt = %d\n", presp->resc.igu_cnt);
1609             DbgMessage(pdev, FATALvf, "presp->resc.igu_test_cnt = %d\n", presp->resc.igu_test_cnt);
1610             DbgMessage(pdev, FATALvf, "presp->resc.num_rxqs = %d\n", presp->resc.num_rxqs);
1611             DbgMessage(pdev, FATALvf, "presp->resc.num_txqs = %d\n", presp->resc.num_txqs);
1612             DbgMessage(pdev, FATALvf, "presp->resc.num_mac_filters = %d\n", presp->resc.num_mac_filters);
1613             DbgMessage(pdev, FATALvf, "presp->resc.num_mc_filters = %d\n", presp->resc.num_mc_filters);
1614             DbgMessage(pdev, FATALvf, "presp->resc.num_vlan_filters = %d\n", presp->resc.num_vlan_filters);
1615 
1616             if (presp->pfdev_info.db_size)
1617             {
1618                 max_dq = VF_BAR0_DB_SIZE / presp->pfdev_info.db_size;
1619                 if (!max_dq)
1620                 {
1621                     max_dq = 1;
1622                 }
1623             }
1624             else
1625             {
1626                 lm_vf_pf_channel_release_message(pdev, pf_mess);
1627                 DbgBreakIf(!DBG_BREAK_ON(UNDER_TEST));
1628                 return LM_STATUS_INVALID_PARAMETER;
1629             }
1630             pdev->params.fw_base_qzone_cnt = pdev->params.sb_cnt = min(presp->resc.num_sbs, max_dq);
1631             pdev->params.max_rss_chains = pdev->params.rss_chain_cnt = min(presp->resc.num_rxqs, max_dq);
1632             pdev->params.tss_chain_cnt = min(presp->resc.num_txqs, max_dq);
1633 
1634             pdev->hw_info.chip_id = presp->pfdev_info.chip_num;
1635             pdev->hw_info.intr_blk_info.blk_type = INTR_BLK_IGU;
1636             pdev->hw_info.intr_blk_info.blk_mode = INTR_BLK_MODE_NORM;
1637             pdev->hw_info.intr_blk_info.access_type = INTR_BLK_ACCESS_IGUMEM;
1638 
1639             /* IGU specific data */
1640             pdev->hw_info.intr_blk_info.igu_info.igu_base_sb = presp->resc.hw_sbs[0].hw_sb_id;
1641             pdev->hw_info.intr_blk_info.igu_info.igu_sb_cnt = presp->resc.igu_cnt;
1642             pdev->hw_info.intr_blk_info.igu_info.igu_test_sb_cnt = presp->resc.igu_test_cnt;
1643             /* TODO: don't assume consecutiveness... */
1644             {
1645                 u8_t idx;
1646                 for (idx = 0; idx < pdev->params.fw_base_qzone_cnt; idx++)
1647                 {
1648                     pdev->params.fw_qzone_id[idx] = presp->resc.hw_qid[idx];
1649                     IGU_VF_NDSB(pdev,idx) = presp->resc.hw_sbs[idx].hw_sb_id;
1650                 }
1651             }
1652 
1653 
1654             /* TODO: get this from presp... here for purpose of rx_mask... */
1655             //pdev->hw_info.chip_id |= CHIP_REV_EMUL;
1656             if (presp->resc.num_mc_filters == 0xFF)
1657             {
1658                 presp->resc.num_mc_filters = 0;
1659             }
1660             if (presp->resc.current_mac_addr[0]
1661                     || presp->resc.current_mac_addr[1]
1662                     || presp->resc.current_mac_addr[2]
1663                     || presp->resc.current_mac_addr[3]
1664                     || presp->resc.current_mac_addr[4]
1665                     || presp->resc.current_mac_addr[5])
1666             {
1667                 DbgMessage(pdev, WARN, "VF received MAC from PF\n");
1668                 pdev->params.mac_addr[0] = pdev->hw_info.mac_addr[0] = presp->resc.current_mac_addr[0];
1669                 pdev->params.mac_addr[1] = pdev->hw_info.mac_addr[1] = presp->resc.current_mac_addr[1];
1670                 pdev->params.mac_addr[2] = pdev->hw_info.mac_addr[2] = presp->resc.current_mac_addr[2];
1671                 pdev->params.mac_addr[3] = pdev->hw_info.mac_addr[3] = presp->resc.current_mac_addr[3];
1672                 pdev->params.mac_addr[4] = pdev->hw_info.mac_addr[4] = presp->resc.current_mac_addr[4];
1673                 pdev->params.mac_addr[5] = pdev->hw_info.mac_addr[5] = presp->resc.current_mac_addr[5];
1674             }
1675             else
1676             {
1677                 DbgMessage(pdev, WARN, "VF uses own MAC\n");
1678             }
1679         }
1680         else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
1681         {
1682             struct pfvf_acquire_resp_tlv * presp;
1683 
1684             if (pdev->pf_vf_acquiring_resp == NULL)
1685             {
1686                 pdev->pf_vf_acquiring_resp = mm_alloc_mem(pdev, sizeof(struct pfvf_acquire_resp_tlv),LM_RESOURCE_COMMON);
1687 
1688                 if CHK_NULL(pdev->pf_vf_acquiring_resp)
1689                 {
1690                     lm_vf_pf_channel_release_message(pdev, pf_mess);
1691                     DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1692                     return LM_STATUS_RESOURCE;
1693                 }
1694                 else
1695                 {
1696                     DbgMessage(pdev, FATAL, "VF_PF Channel: pdev->pf_vf_acquiring_resp is allocated (%db)\n",sizeof(struct pfvf_acquire_resp_tlv));
1697                 }
1698             }
1699 
1700             // FIXME TODO
1701             presp = (struct pfvf_acquire_resp_tlv *)pdev->pf_vf_acquiring_resp;
1702 
1703             presp->pfdev_info.chip_num = CHIP_NUM_5712E;
1704 
1705             mm_memcpy(pdev->pf_vf_acquiring_resp, hw_resp, sizeof(struct pfvf_acquire_resp_tlv));
1706 
1707             DbgMessage(pdev, FATALvf, "presp->pfdev_info.db_size = %d\n", presp->pfdev_info.db_size);
1708             DbgMessage(pdev, FATALvf, "presp->pfdev_info.indices_per_sb = %d\n", presp->pfdev_info.indices_per_sb);
1709             DbgMessage(pdev, FATALvf, "presp->pfdev_info.pf_cap = %d\n", presp->pfdev_info.pf_cap);
1710             DbgMessage(pdev, FATALvf, "presp->pfdev_info.chip_num = %d\n", presp->pfdev_info.chip_num);
1711             DbgMessage(pdev, FATALvf, "presp->resc.hw_qid[0] = %d\n", presp->resc.hw_qid[0]);
1712             DbgMessage(pdev, FATALvf, "presp->resc.hw_sbs[0].hw_sb_id = %d\n", presp->resc.hw_sbs[0].hw_sb_id);
1713             DbgMessage(pdev, FATALvf, "presp->resc.hw_sbs[0].sb_qid = %d\n", presp->resc.hw_sbs[0].sb_qid);
1714             DbgMessage(pdev, FATALvf, "presp->resc.num_sbs = %d\n", presp->resc.num_sbs);
1715             DbgMessage(pdev, FATALvf, "presp->resc.num_rxqs = %d\n", presp->resc.num_rxqs);
1716             DbgMessage(pdev, FATALvf, "presp->resc.num_txqs = %d\n", presp->resc.num_txqs);
1717             DbgMessage(pdev, FATALvf, "presp->resc.num_mac_filters = %d\n", presp->resc.num_mac_filters);
1718             DbgMessage(pdev, FATALvf, "presp->resc.num_mc_filters = %d\n", presp->resc.num_mc_filters);
1719             DbgMessage(pdev, FATALvf, "presp->resc.num_vlan_filters = %d\n", presp->resc.num_vlan_filters);
1720 
1721 
1722             if (presp->pfdev_info.db_size)
1723             {
1724                 max_dq = VF_BAR0_DB_SIZE / presp->pfdev_info.db_size;
1725                 if (!max_dq)
1726                 {
1727                     max_dq = 1;
1728                 }
1729             }
1730             else
1731             {
1732                 lm_vf_pf_channel_release_message(pdev, pf_mess);
1733                 DbgBreakIf(!DBG_BREAK_ON(UNDER_TEST));
1734                 return LM_STATUS_INVALID_PARAMETER;
1735             }
1736             pdev->params.fw_base_qzone_cnt = pdev->params.sb_cnt = min(presp->resc.num_sbs, max_dq);
1737             pdev->params.max_rss_chains = pdev->params.rss_chain_cnt = min(presp->resc.num_rxqs, max_dq);
1738             pdev->params.tss_chain_cnt = min(presp->resc.num_txqs, max_dq);
1739 
1740             pdev->hw_info.chip_id = presp->pfdev_info.chip_num;
1741             pdev->hw_info.intr_blk_info.blk_type = INTR_BLK_IGU;
1742             pdev->hw_info.intr_blk_info.blk_mode = INTR_BLK_MODE_NORM;
1743             pdev->hw_info.intr_blk_info.access_type = INTR_BLK_ACCESS_IGUMEM;
1744 
1745             /* IGU specific data */
1746             pdev->hw_info.intr_blk_info.igu_info.igu_base_sb = presp->resc.hw_sbs[0].hw_sb_id;
1747             pdev->hw_info.intr_blk_info.igu_info.igu_sb_cnt = presp->resc.num_sbs;
1748             /* TODO: don't assume consecutiveness... */
1749             {
1750                 u8_t idx;
1751                 for (idx = 0; idx < pdev->params.fw_base_qzone_cnt; idx++)
1752                 {
1753                     pdev->params.fw_qzone_id[idx] = presp->resc.hw_qid[idx];
1754                     IGU_VF_NDSB(pdev,idx) = presp->resc.hw_sbs[idx].hw_sb_id;
1755                 }
1756             }
1757 
1758 
1759             /* TODO: get this from presp... here for purpose of rx_mask... */
1760             //pdev->hw_info.chip_id |= CHIP_REV_EMUL;
1761             if (presp->resc.num_mc_filters == 0xFF)
1762             {
1763                 presp->resc.num_mc_filters = 0;
1764             }
1765 			else if (presp->resc.num_mc_filters == 0)
1766 			{
1767 				presp->resc.num_mc_filters = hw_mess->resc_request.num_mc_filters;
1768 			}
1769 			pdev->params.mc_table_size[LM_CLI_IDX_NDIS] = presp->resc.num_mc_filters;
1770 			pdev->vars.pf_link_speed = presp->resc.pf_link_speed;
1771 
1772             if (presp->resc.current_mac_addr[0]
1773                     || presp->resc.current_mac_addr[1]
1774                     || presp->resc.current_mac_addr[2]
1775                     || presp->resc.current_mac_addr[3]
1776                     || presp->resc.current_mac_addr[4]
1777                     || presp->resc.current_mac_addr[5])
1778             {
1779 
1780                 DbgMessage(pdev, WARN, "VF received MAC from PF\n");
1781                 pdev->params.mac_addr[0] = pdev->hw_info.mac_addr[0] = presp->resc.current_mac_addr[0];
1782                 pdev->params.mac_addr[1] = pdev->hw_info.mac_addr[1] = presp->resc.current_mac_addr[1];
1783                 pdev->params.mac_addr[2] = pdev->hw_info.mac_addr[2] = presp->resc.current_mac_addr[2];
1784                 pdev->params.mac_addr[3] = pdev->hw_info.mac_addr[3] = presp->resc.current_mac_addr[3];
1785                 pdev->params.mac_addr[4] = pdev->hw_info.mac_addr[4] = presp->resc.current_mac_addr[4];
1786                 pdev->params.mac_addr[5] = pdev->hw_info.mac_addr[5] = presp->resc.current_mac_addr[5];
1787                 pdev->vars.is_pf_provides_mac = TRUE;
1788                 pdev->vars.is_pf_restricts_lamac = lm_vf_check_mac_restriction(pdev, presp);
1789             }
1790             else
1791             {
1792                 DbgMessage(pdev, WARN, "VF uses own MAC\n");
1793                 pdev->vars.is_pf_provides_mac = FALSE;
1794                 pdev->vars.is_pf_restricts_lamac = FALSE;
1795             }
1796         }
1797         else
1798         {
1799             DbgBreak();
1800             lm_status = LM_STATUS_FAILURE;
1801         }
1802     }
1803 
1804     lm_vf_pf_channel_release_message(pdev, pf_mess);
1805     return lm_status;
1806 }
1807 
lm_vf_pf_init_vf(struct _lm_device_t * pdev)1808 lm_status_t lm_vf_pf_init_vf(struct _lm_device_t * pdev)
1809 {
1810     lm_status_t                 lm_status = LM_STATUS_SUCCESS;
1811     lm_vf_pf_message_t *        pf_mess = NULL;
1812    lm_address_t                q_stats;
1813     u8_t                        sb_id;
1814 
1815     DbgMessage(pdev, WARNvf, "lm_vf_pf_init_vf\n");
1816 
1817     DbgBreakIf(!(pdev && IS_CHANNEL_VFDEV(pdev) && (LM_SB_CNT(pdev) <= PFVF_MAX_SBS_PER_VF)));
1818     pf_mess = lm_vf_pf_channel_get_message_to_send(pdev, PFVF_OP_INIT_VF);
1819     if (!pf_mess) {
1820         lm_status = LM_STATUS_RESOURCE;
1821         DbgBreak();
1822         return lm_status;
1823     }
1824     if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1825     {
1826         struct vf_pf_msg_init_vf *  mess = NULL;
1827     mess = (struct vf_pf_msg_init_vf*)pf_mess->message_virt_addr;
1828 
1829     q_stats = pdev->vars.stats.stats_collect.stats_fw.fw_stats_data_mapping;
1830     LM_INC64(&q_stats, OFFSETOF(lm_stats_fw_stats_data_t, queue_stats));
1831     mess->stats_addr = q_stats.as_u64;
1832 
1833     LM_FOREACH_SB_ID(pdev,sb_id) {
1834         mess->sb_addr[sb_id] = pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.vf_sb_phy_address.as_u64;
1835     }
1836     }
1837     else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
1838     {
1839         struct vfpf_init_tlv *  mess = NULL;
1840         mess = (struct vfpf_init_tlv*)pf_mess->message_virt_addr;
1841 
1842         q_stats = pdev->vars.stats.stats_collect.stats_fw.fw_stats_data_mapping;
1843         LM_INC64(&q_stats, OFFSETOF(lm_stats_fw_stats_data_t, queue_stats));
1844         mess->stats_addr = q_stats.as_u64;
1845         mess->flags = VFPF_INIT_FLG_STATS_COALESCE;
1846 
1847         LM_FOREACH_SB_ID(pdev,sb_id) {
1848             mess->sb_addr[sb_id] = pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.vf_sb_phy_address.as_u64;
1849         }
1850     }
1851     else
1852     {
1853         DbgBreak();
1854         lm_vf_pf_channel_release_message(pdev,pf_mess);
1855         return LM_STATUS_FAILURE;
1856     }
1857 
1858     lm_status = lm_vf_pf_channel_send(pdev,pf_mess);
1859     if (lm_status != LM_STATUS_SUCCESS) {
1860         lm_vf_pf_channel_release_message(pdev,pf_mess);
1861     }
1862     DbgMessage(pdev, WARNvf, "lm_vf_pf_init_vf return lm_status = %d\n", lm_status);
1863 
1864     return lm_status;
1865 }
1866 
lm_vf_pf_setup_q(struct _lm_device_t * pdev,u8 vf_qid,u8_t validation_flag)1867 lm_status_t lm_vf_pf_setup_q(struct _lm_device_t * pdev, u8 vf_qid, u8_t validation_flag)
1868 {
1869     lm_status_t lm_status = LM_STATUS_SUCCESS;
1870     lm_vf_pf_message_t * pf_mess = NULL;
1871 
1872     DbgMessage(pdev, WARNvf, "lm_vf_pf_setup_q\n");
1873 
1874     DbgBreakIf(!(pdev && IS_CHANNEL_VFDEV(pdev)
1875                  && (validation_flag & (RX_Q_VALIDATE | TX_Q_VALIDATE))
1876                  && (vf_qid < LM_SB_CNT(pdev))
1877                  && pdev->pf_vf_acquiring_resp));
1878 
1879     pf_mess = lm_vf_pf_channel_get_message_to_send(pdev, PFVF_OP_SETUP_Q);
1880     if (!pf_mess) {
1881         lm_status = LM_STATUS_RESOURCE;
1882         DbgBreak();
1883         return lm_status;
1884     }
1885     if (IS_SW_CHANNEL_VIRT_MODE(pdev))
1886     {
1887         struct vf_pf_msg_setup_q * mess = NULL;
1888         struct pf_vf_msg_acquire_resp * presp = NULL;
1889 
1890     mess = (struct vf_pf_msg_setup_q*)pf_mess->message_virt_addr;
1891     presp = (struct pf_vf_msg_acquire_resp *)pdev->pf_vf_acquiring_resp;
1892     mess->vf_qid = vf_qid;
1893     if (validation_flag & RX_Q_VALIDATE) {
1894         SET_FLAGS(mess->param_valid, VFPF_RXQ_VALID);
1895         mess->rxq.rcq_addr = lm_bd_chain_phys_addr(&(LM_RCQ(pdev,vf_qid).bd_chain), 0).as_u64;
1896         mess->rxq.rcq_np_addr = lm_bd_chain_phys_addr(&(LM_RCQ(pdev,vf_qid).bd_chain), 1).as_u64;
1897         mess->rxq.rxq_addr = lm_bd_chain_phys_addr(&(LM_RXQ_CHAIN(pdev,vf_qid,0)), 0).as_u64;
1898         if (presp->pfdev_info.pf_cap & PFVF_CAP_TPA) {
1899             mess->rxq.sge_addr = LM_TPA_CHAIN_BD(pdev, vf_qid).bd_chain_phy.as_u64;
1900             if (mess->rxq.sge_addr) {
1901                     mess->rxq.flags |= SW_VFPF_QUEUE_FLG_TPA;
1902                 }
1903             } else {
1904                 mess->rxq.sge_addr = 0;
1905             }
1906 
1907             /* sb + hc info */
1908             mess->rxq.vf_sb = vf_qid;          /* relative to vf */
1909             mess->rxq.flags |= SW_VFPF_QUEUE_FLG_CACHE_ALIGN;
1910             mess->rxq.sb_index = LM_RCQ(pdev, vf_qid).hc_sb_info.hc_index_value;
1911             if ((pdev->params.int_coalesing_mode == LM_INT_COAL_PERIODIC_SYNC)/* && !pdev->params.int_coalesing_mode_disabled_by_ndis*/) {
1912                 mess->rxq.hc_rate = (u16_t)pdev->params.int_per_sec_rx[HC_PARAMS_ETH_INDEX];           /* desired interrupts per sec. *//* valid iff VFPF_QUEUE_FLG_HC */
1913                 mess->rxq.flags |= SW_VFPF_QUEUE_FLG_HC;
1914                 if (pdev->params.enable_dynamic_hc[HC_PARAMS_ETH_INDEX] && (presp->pfdev_info.pf_cap & PFVF_CAP_DHC)) {
1915                     mess->rxq.flags |= SW_VFPF_QUEUE_FLG_DHC;
1916                 }
1917             }
1918 
1919             /* rx buffer info */
1920             mess->rxq.mtu        = (u16_t)pdev->params.l2_cli_con_params[vf_qid].mtu;
1921             mess->rxq.buf_sz     = MAX_L2_CLI_BUFFER_SIZE(pdev, vf_qid);
1922             mess->rxq.drop_flags = 0; //(u8_t)pdev->params.rx_err_filter;
1923         }
1924 
1925         if (validation_flag & TX_Q_VALIDATE) {
1926             SET_FLAGS(mess->param_valid, VFPF_TXQ_VALID);
1927             mess->txq.txq_addr = lm_bd_chain_phys_addr(&(LM_TXQ(pdev,vf_qid).bd_chain), 0).as_u64;
1928             mess->txq.vf_sb = vf_qid;
1929             mess->txq.sb_index = LM_TXQ(pdev, vf_qid).hc_sb_info.hc_index_value;
1930             if ((pdev->params.int_coalesing_mode == LM_INT_COAL_PERIODIC_SYNC)/* && pdev->params.int_coalesing_mode_disabled_by_ndis*/) {
1931                 mess->txq.hc_rate = (u16_t)pdev->params.int_per_sec_tx[HC_PARAMS_ETH_INDEX];           /* desired interrupts per sec. *//* valid iff VFPF_QUEUE_FLG_HC */
1932                 mess->txq.flags |= SW_VFPF_QUEUE_FLG_HC;
1933             }
1934         }
1935     }
1936     else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
1937     {
1938         struct vfpf_setup_q_tlv * mess = NULL;
1939         struct pfvf_acquire_resp_tlv * presp = NULL;
1940 
1941         mess = (struct vfpf_setup_q_tlv*)pf_mess->message_virt_addr;
1942         presp = (struct pfvf_acquire_resp_tlv *)pdev->pf_vf_acquiring_resp;
1943         mess->vf_qid = vf_qid;
1944         if (validation_flag & RX_Q_VALIDATE) {
1945             SET_FLAGS(mess->param_valid, VFPF_RXQ_VALID);
1946             mess->rxq.rcq_addr = lm_bd_chain_phys_addr(&(LM_RCQ(pdev,vf_qid).bd_chain), 0).as_u64;
1947             mess->rxq.rcq_np_addr = lm_bd_chain_phys_addr(&(LM_RCQ(pdev,vf_qid).bd_chain), 1).as_u64;
1948             mess->rxq.rxq_addr = lm_bd_chain_phys_addr(&(LM_RXQ_CHAIN(pdev,vf_qid,0)), 0).as_u64;
1949 #if 0
1950             if (presp->pfdev_info.pf_cap & PFVF_CAP_TPA) {
1951                 mess->rxq.sge_addr = LM_TPA_CHAIN_BD(pdev, vf_qid).bd_chain_phy.as_u64;
1952                 if (mess->rxq.sge_addr) {
1953                 mess->rxq.flags |= VFPF_QUEUE_FLG_TPA;
1954             }
1955             }
1956             else
1957 #endif
1958             {
1959         mess->rxq.sge_addr = 0;
1960         }
1961 
1962         /* sb + hc info */
1963         mess->rxq.vf_sb = vf_qid;          /* relative to vf */
1964         mess->rxq.flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
1965         mess->rxq.flags |= VFPF_QUEUE_FLG_STATS;
1966         mess->rxq.flags |= VFPF_QUEUE_FLG_VLAN;
1967         mess->rxq.sb_index = LM_RCQ(pdev, vf_qid).hc_sb_info.hc_index_value;
1968         if ((pdev->params.int_coalesing_mode == LM_INT_COAL_PERIODIC_SYNC)/* && !pdev->params.int_coalesing_mode_disabled_by_ndis*/) {
1969             mess->rxq.hc_rate = (u16_t)pdev->params.int_per_sec_rx[HC_PARAMS_ETH_INDEX];           /* desired interrupts per sec. *//* valid iff VFPF_QUEUE_FLG_HC */
1970             mess->rxq.flags |= VFPF_QUEUE_FLG_HC;
1971             if (pdev->params.enable_dynamic_hc[HC_PARAMS_ETH_INDEX] && (presp->pfdev_info.pf_cap & PFVF_CAP_DHC)) {
1972                 mess->rxq.flags |= VFPF_QUEUE_FLG_DHC;
1973             }
1974         }
1975         if (!vf_qid)
1976         {
1977             mess->rxq.flags |= VFPF_QUEUE_FLG_LEADING_RSS;
1978         }
1979         /* rx buffer info */
1980         mess->rxq.mtu        = (u16_t)pdev->params.l2_cli_con_params[vf_qid].mtu;
1981         mess->rxq.buf_sz     = MAX_L2_CLI_BUFFER_SIZE(pdev, vf_qid);
1982         mess->rxq.drop_flags = 0; //(u8_t)pdev->params.rx_err_filter;
1983     }
1984 
1985     if (validation_flag & TX_Q_VALIDATE) {
1986         SET_FLAGS(mess->param_valid, VFPF_TXQ_VALID);
1987         mess->txq.txq_addr = lm_bd_chain_phys_addr(&(LM_TXQ(pdev,vf_qid).bd_chain), 0).as_u64;
1988         mess->txq.vf_sb = vf_qid;
1989         mess->txq.sb_index = LM_TXQ(pdev, vf_qid).hc_sb_info.hc_index_value;
1990         if ((pdev->params.int_coalesing_mode == LM_INT_COAL_PERIODIC_SYNC)/* && pdev->params.int_coalesing_mode_disabled_by_ndis*/) {
1991             mess->txq.hc_rate = (u16_t)pdev->params.int_per_sec_tx[HC_PARAMS_ETH_INDEX];           /* desired interrupts per sec. *//* valid iff VFPF_QUEUE_FLG_HC */
1992             mess->txq.flags |= VFPF_QUEUE_FLG_HC;
1993         }
1994     }
1995     }
1996     else
1997     {
1998         DbgBreak();
1999         lm_vf_pf_channel_release_message(pdev,pf_mess);
2000         return LM_STATUS_FAILURE;
2001     }
2002     lm_status = lm_vf_pf_channel_send(pdev,pf_mess);
2003     if (lm_status != LM_STATUS_SUCCESS) {
2004         lm_vf_pf_channel_release_message(pdev,pf_mess);
2005     }
2006 
2007     DbgMessage(pdev, WARNvf, "lm_vf_pf_setup_q lm_status = %d\n", lm_status);
2008     return lm_status;
2009 }
2010 
2011 
2012 
lm_vf_pf_tear_q_down(struct _lm_device_t * pdev,u8 vf_qid)2013 lm_status_t lm_vf_pf_tear_q_down(struct _lm_device_t * pdev, u8 vf_qid)
2014 {
2015     lm_status_t lm_status = LM_STATUS_SUCCESS;
2016     lm_vf_pf_message_t * pf_mess = NULL;
2017 
2018     DbgBreakIf(!(pdev && IS_CHANNEL_VFDEV(pdev)
2019                  && (vf_qid < LM_SB_CNT(pdev))));
2020 
2021     pf_mess = lm_vf_pf_channel_get_message_to_send(pdev, PFVF_OP_TEARDOWN_Q);
2022     if (!pf_mess) {
2023         lm_status = LM_STATUS_RESOURCE;
2024         DbgBreak();
2025         return lm_status;
2026     }
2027     if (IS_SW_CHANNEL_VIRT_MODE(pdev))
2028     {
2029         struct vf_pf_msg_q_op * mess = NULL;
2030     mess = (struct vf_pf_msg_q_op*)pf_mess->message_virt_addr;
2031     mess->vf_qid = vf_qid;
2032     }
2033     else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
2034     {
2035         struct vfpf_q_op_tlv * mess = NULL;
2036         mess = (struct vfpf_q_op_tlv*)pf_mess->message_virt_addr;
2037         mess->vf_qid = vf_qid;
2038     }
2039     else
2040     {
2041         DbgBreak();
2042         lm_vf_pf_channel_release_message(pdev,pf_mess);
2043         return LM_STATUS_FAILURE;
2044     }
2045     lm_status = lm_vf_pf_channel_send(pdev,pf_mess);
2046     if (lm_status != LM_STATUS_SUCCESS) {
2047         lm_vf_pf_channel_release_message(pdev,pf_mess);
2048     }
2049 
2050     return lm_status;
2051 }
2052 
lm_vf_pf_set_q_filters(struct _lm_device_t * pdev,u8 vf_qid,void * cookie,q_filter_type filter_type,u8_t * pbuf,u32_t buf_len,u16_t vlan_tag,u8_t set_mac)2053 lm_status_t lm_vf_pf_set_q_filters(struct _lm_device_t * pdev, u8 vf_qid, void * cookie, q_filter_type filter_type, u8_t * pbuf, u32_t buf_len,
2054                                    u16_t vlan_tag, u8_t set_mac)
2055 {
2056     lm_status_t lm_status = LM_STATUS_SUCCESS;
2057     lm_vf_pf_message_t * pf_mess = NULL;
2058     u8_t    num_entries, idx_entries;
2059     u8_t    is_clear;
2060     lm_rx_mask_t * rx_mask;
2061     u8_t    send_it = FALSE;
2062 
2063     DbgMessage(pdev, WARNvf, "lm_vf_pf_set_q_filters\n");
2064 
2065     DbgBreakIf(!(pdev && IS_CHANNEL_VFDEV(pdev) && (vf_qid < LM_SB_CNT(pdev)) && pdev->pf_vf_acquiring_resp));
2066 
2067     pf_mess = lm_vf_pf_channel_get_message_to_send(pdev, PFVF_OP_SET_Q_FILTERS);
2068     if (!pf_mess) {
2069         lm_status = LM_STATUS_RESOURCE;
2070         DbgBreak();
2071         return lm_status;
2072     }
2073     if (IS_SW_CHANNEL_VIRT_MODE(pdev))
2074     {
2075         struct vf_pf_msg_set_q_filters * mess = NULL;
2076         struct pf_vf_msg_acquire_resp * resp = NULL;
2077     mess = (struct vf_pf_msg_set_q_filters*)pf_mess->message_virt_addr;
2078     resp = (struct pf_vf_msg_acquire_resp *)pdev->pf_vf_acquiring_resp;
2079     mess->vf_qid = vf_qid;
2080     pf_mess->cookie = cookie;
2081     is_clear = ((pbuf == NULL) || (buf_len == 0));
2082 
2083     switch (filter_type) {
2084     case Q_FILTER_MAC:
2085         num_entries = resp->resc.num_mac_filters;
2086         is_clear = !set_mac;
2087         if (!is_clear) {
2088             num_entries = min((u32_t)num_entries, buf_len/ETHERNET_ADDRESS_SIZE);
2089         }
2090         mess->n_mac_vlan_filters = num_entries;
2091         for (idx_entries = 0; idx_entries < num_entries; idx_entries++) {
2092             mess->filters[idx_entries].flags = VFPF_Q_FILTER_DEST_MAC_PRESENT;
2093             if (is_clear) {
2094                 mess->filters[idx_entries].flags &= ~VFPF_Q_FILTER_SET_MAC;
2095             } else {
2096                 mess->filters[idx_entries].flags |= VFPF_Q_FILTER_SET_MAC;
2097             }
2098             mm_memcpy(mess->filters[idx_entries].dest_mac, pbuf + idx_entries*ETHERNET_ADDRESS_SIZE, ETHERNET_ADDRESS_SIZE);
2099             if (vlan_tag != LM_SET_CAM_NO_VLAN_FILTER) {
2100                 mess->filters[idx_entries].vlan_tag = vlan_tag;
2101                 mess->filters[idx_entries].flags |= VFPF_Q_FILTER_VLAN_TAG_PRESENT;
2102             }
2103         }
2104         if (mess->n_mac_vlan_filters) {
2105             mess->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
2106         }
2107         break;
2108     case Q_FILTER_VLAN:
2109         DbgMessage(pdev, FATAL, "VLAN filter is not supported yet\n");
2110         DbgBreak();
2111         break;
2112     case Q_FILTER_MC:
2113         num_entries = resp->resc.num_mc_filters;
2114         if (!is_clear) {
2115             num_entries = min((u32_t)num_entries, buf_len/ETHERNET_ADDRESS_SIZE);
2116         }
2117         DbgMessage(pdev, FATAL, "Q_FILTER_MC: %d entries\n", num_entries);
2118         mess->n_multicast = num_entries;
2119         for (idx_entries = 0; idx_entries < num_entries; idx_entries++) {
2120             if (is_clear) {
2121                 mm_mem_zero(&mess->multicast[idx_entries][0], ETHERNET_ADDRESS_SIZE);
2122             } else {
2123                 mm_memcpy(&mess->multicast[idx_entries][0], pbuf + idx_entries*ETHERNET_ADDRESS_SIZE, ETHERNET_ADDRESS_SIZE);
2124             }
2125         }
2126         if (mess->n_multicast) {
2127             mess->flags = VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
2128         }
2129         break;
2130     case Q_FILTER_RX_MASK:
2131         DbgBreakIf(is_clear || (buf_len != sizeof(lm_rx_mask_t)));
2132         mess->rx_mask = 0;
2133         rx_mask = (lm_rx_mask_t*)pbuf;
2134         if (GET_FLAGS(*rx_mask, LM_RX_MASK_PROMISCUOUS_MODE)) {
2135             mess->rx_mask |= (VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST | VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST |
2136                                VFPF_RX_MASK_ACCEPT_ALL_MULTICAST | VFPF_RX_MASK_ACCEPT_ALL_UNICAST | VFPF_RX_MASK_ACCEPT_BROADCAST);
2137         }
2138         if (GET_FLAGS(*rx_mask, LM_RX_MASK_ACCEPT_UNICAST)) {
2139             mess->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
2140         }
2141         if (GET_FLAGS(*rx_mask, LM_RX_MASK_ACCEPT_MULTICAST)) {
2142             mess->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
2143         }
2144         if (GET_FLAGS(*rx_mask, LM_RX_MASK_ACCEPT_ALL_MULTICAST)) {
2145             mess->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
2146         }
2147         if (GET_FLAGS(*rx_mask, LM_RX_MASK_ACCEPT_BROADCAST)) {
2148             mess->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
2149         }
2150         mess->flags = VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
2151 
2152         DbgMessage(pdev, FATAL, "Q_FILTER_RX_MASK: mess->rx_mask=%x mess->flags=%x\n", mess->rx_mask, mess->flags);
2153         break;
2154     default:
2155         break;
2156     }
2157         if (mess->flags)
2158         {
2159             send_it = TRUE;
2160         }
2161     }
2162     else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
2163     {
2164         struct vfpf_set_q_filters_tlv * mess = NULL;
2165         struct pfvf_acquire_resp_tlv *  resp = NULL;
2166         mess = (struct vfpf_set_q_filters_tlv*)pf_mess->message_virt_addr;
2167         resp = (struct pfvf_acquire_resp_tlv *)pdev->pf_vf_acquiring_resp;
2168         mess->vf_qid = vf_qid;
2169         pf_mess->cookie = cookie;
2170         is_clear = ((pbuf == NULL) || (buf_len == 0));
2171 
2172         switch (filter_type) {
2173         case Q_FILTER_MAC:
2174             num_entries = resp->resc.num_mac_filters;
2175             is_clear = !set_mac;
2176             if (!is_clear) {
2177                 num_entries = min((u32_t)num_entries, buf_len/ETHERNET_ADDRESS_SIZE);
2178             }
2179             mess->n_mac_vlan_filters = num_entries;
2180             for (idx_entries = 0; idx_entries < num_entries; idx_entries++) {
2181                 mess->filters[idx_entries].flags = VFPF_Q_FILTER_DEST_MAC_PRESENT;
2182                 if (is_clear) {
2183                     mess->filters[idx_entries].flags &= ~VFPF_Q_FILTER_SET_MAC;
2184                 } else {
2185                     mess->filters[idx_entries].flags |= VFPF_Q_FILTER_SET_MAC;
2186                 }
2187                 mm_memcpy(mess->filters[idx_entries].mac, pbuf + idx_entries*ETHERNET_ADDRESS_SIZE, ETHERNET_ADDRESS_SIZE);
2188                 if (vlan_tag != LM_SET_CAM_NO_VLAN_FILTER) {
2189                     mess->filters[idx_entries].vlan_tag = vlan_tag;
2190                     mess->filters[idx_entries].flags |= VFPF_Q_FILTER_VLAN_TAG_PRESENT;
2191                 }
2192             }
2193             if (mess->n_mac_vlan_filters) {
2194                 mess->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
2195             }
2196             break;
2197         case Q_FILTER_VLAN:
2198             DbgMessage(pdev,FATAL,"VLAN filter is not supported yet\n");
2199             DbgBreak();
2200             break;
2201         case Q_FILTER_MC:
2202             num_entries = resp->resc.num_mc_filters;
2203             if (!is_clear) {
2204                 num_entries = min((u32_t)num_entries, buf_len/ETHERNET_ADDRESS_SIZE);
2205             }
2206             DbgMessage(pdev, FATAL, "Q_FILTER_MC: %d entries\n", num_entries);
2207             mess->n_multicast = num_entries;
2208             for (idx_entries = 0; idx_entries < num_entries; idx_entries++) {
2209                 if (is_clear) {
2210                     mm_mem_zero(&mess->multicast[idx_entries][0], ETHERNET_ADDRESS_SIZE);
2211                 } else {
2212                     mm_memcpy(&mess->multicast[idx_entries][0], pbuf + idx_entries*ETHERNET_ADDRESS_SIZE, ETHERNET_ADDRESS_SIZE);
2213                 }
2214             }
2215             if (mess->n_multicast) {
2216                 mess->flags = VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
2217             }
2218             break;
2219         case Q_FILTER_RX_MASK:
2220             DbgBreakIf(is_clear || (buf_len != sizeof(lm_rx_mask_t)));
2221             mess->rx_mask = 0;
2222             rx_mask = (lm_rx_mask_t*)pbuf;
2223             if (GET_FLAGS(*rx_mask, LM_RX_MASK_PROMISCUOUS_MODE)) {
2224                 mess->rx_mask |= (VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST | VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST |
2225                                    VFPF_RX_MASK_ACCEPT_ALL_MULTICAST | VFPF_RX_MASK_ACCEPT_ALL_UNICAST | VFPF_RX_MASK_ACCEPT_BROADCAST);
2226             }
2227             if (GET_FLAGS(*rx_mask, LM_RX_MASK_ACCEPT_UNICAST)) {
2228                 mess->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
2229             }
2230             if (GET_FLAGS(*rx_mask, LM_RX_MASK_ACCEPT_MULTICAST)) {
2231                 mess->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
2232             }
2233             if (GET_FLAGS(*rx_mask, LM_RX_MASK_ACCEPT_ALL_MULTICAST)) {
2234                 mess->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
2235             }
2236             if (GET_FLAGS(*rx_mask, LM_RX_MASK_ACCEPT_BROADCAST)) {
2237                 mess->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
2238             }
2239             mess->flags = VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
2240 
2241             DbgMessage(pdev, FATAL, "Q_FILTER_RX_MASK: mess->rx_mask=%x mess->flags=%x\n", mess->rx_mask, mess->flags);
2242             break;
2243         default:
2244             break;
2245         }
2246         if (mess->flags)
2247         {
2248             send_it = TRUE;
2249         }
2250     }
2251     else
2252     {
2253         DbgBreak();
2254         lm_vf_pf_channel_release_message(pdev,pf_mess);
2255         return LM_STATUS_FAILURE;
2256     }
2257 
2258     if (send_it) {
2259         lm_status = lm_vf_pf_channel_send(pdev,pf_mess);
2260         if (lm_status != LM_STATUS_SUCCESS) {
2261             lm_vf_pf_channel_release_message(pdev,pf_mess);
2262         }
2263     } else {
2264         DbgMessage(pdev, FATAL, "lm_vf_pf_set_q_filters: flag is not set. Use bypass\n");
2265         *pf_mess->done = SW_PFVF_STATUS_SUCCESS;
2266         DbgBreakIf(filter_type != Q_FILTER_MC);
2267     }
2268 
2269     return lm_status;
2270 }
2271 
lm_vf_pf_set_q_filters_list(struct _lm_device_t * pdev,u8 vf_qid,void * cookie,q_filter_type filter_type,d_list_t * pbuf,u16_t vlan_tag,u8_t set_mac)2272 lm_status_t lm_vf_pf_set_q_filters_list(struct _lm_device_t * pdev, u8 vf_qid, void * cookie, q_filter_type filter_type, d_list_t * pbuf,
2273                                         u16_t vlan_tag, u8_t set_mac)
2274 
2275 {
2276     DbgMessage(NULL, FATAL, "lm_vf_pf_set_q_filters_list is not used in channel VF\n");
2277     DbgBreak();
2278     return LM_STATUS_FAILURE;
2279 }
2280 
lm_vf_pf_update_rss(struct _lm_device_t * pdev,void * cookie,u32_t rss_flags,u8_t rss_result_mask,u8_t * ind_table,u32_t * rss_key)2281 lm_status_t lm_vf_pf_update_rss(struct _lm_device_t *pdev, void * cookie, u32_t rss_flags, u8_t rss_result_mask, u8_t * ind_table, u32_t * rss_key)
2282 {
2283     lm_status_t lm_status = LM_STATUS_SUCCESS;
2284     lm_vf_pf_message_t * pf_mess = NULL;
2285     u8_t ind_table_idx;
2286 
2287     DbgMessage(pdev, WARNvf, "lm_vf_pf_update_rss\n");
2288 
2289     DbgBreakIf(!(pdev && IS_CHANNEL_VFDEV(pdev) && pdev->pf_vf_acquiring_resp));
2290 
2291     pf_mess = lm_vf_pf_channel_get_message_to_send(pdev, PFVF_OP_UPDATE_RSS);
2292     if (!pf_mess) {
2293         lm_status = LM_STATUS_RESOURCE;
2294         DbgBreak();
2295         return lm_status;
2296     }
2297     if (IS_SW_CHANNEL_VIRT_MODE(pdev))
2298     {
2299         struct vf_pf_msg_rss * mess = NULL;
2300         mess = (struct vf_pf_msg_rss*)pf_mess->message_virt_addr;
2301         pf_mess->cookie = cookie;
2302         mess->rss_flags = rss_flags;
2303         mess->rss_result_mask = rss_result_mask;
2304         mm_memcpy(mess->ind_table, ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
2305         mess->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
2306         mm_memcpy(mess->rss_key, rss_key, sizeof(u32_t)*T_ETH_RSS_KEY);
2307         mess->rss_key_size = T_ETH_RSS_KEY;
2308     }
2309     else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
2310     {
2311         struct vfpf_rss_tlv * mess = NULL;
2312         mess = (struct vfpf_rss_tlv*)pf_mess->message_virt_addr;
2313         pf_mess->cookie = cookie;
2314         mess->rss_flags = rss_flags;
2315         mess->rss_result_mask = rss_result_mask;
2316         for (ind_table_idx = 0; ind_table_idx < T_ETH_INDIRECTION_TABLE_SIZE; ind_table_idx++) {
2317             mess->ind_table[ind_table_idx] = IGU_VF_NDSB(pdev,ind_table[ind_table_idx]);
2318         }
2319         mess->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
2320         mm_memcpy(mess->rss_key, rss_key, sizeof(u32_t)*T_ETH_RSS_KEY);
2321         mess->rss_key_size = T_ETH_RSS_KEY;
2322     }
2323     else
2324     {
2325         DbgBreak();
2326         lm_vf_pf_channel_release_message(pdev,pf_mess);
2327         return LM_STATUS_FAILURE;
2328     }
2329 
2330     lm_status = lm_vf_pf_channel_send(pdev,pf_mess);
2331     if (lm_status != LM_STATUS_SUCCESS) {
2332         lm_vf_pf_channel_release_message(pdev,pf_mess);
2333     }
2334 
2335     return lm_status;
2336 }
2337 
lm_vf_pf_update_rsc(struct _lm_device_t * pdev)2338 lm_status_t lm_vf_pf_update_rsc(struct _lm_device_t *pdev)
2339 {
2340     lm_status_t lm_status = LM_STATUS_SUCCESS;
2341     lm_vf_pf_message_t * pf_mess = NULL;
2342     u8_t rss_idx = 0;
2343 
2344     DbgMessage(pdev, WARNvf, "lm_vf_pf_update_rsc\n");
2345 
2346     DbgBreakIf(!(pdev && IS_CHANNEL_VFDEV(pdev) && pdev->pf_vf_acquiring_resp));
2347 
2348     pf_mess = lm_vf_pf_channel_get_message_to_send(pdev, PFVF_OP_UPDATE_RSC);
2349     if (!pf_mess) {
2350         lm_status = LM_STATUS_RESOURCE;
2351         DbgBreak();
2352         return lm_status;
2353     }
2354     if (IS_SW_CHANNEL_VIRT_MODE(pdev))
2355     {
2356         struct vf_pf_msg_rsc * mess = (struct vf_pf_msg_rsc *)pf_mess->message_virt_addr;
2357         mess->rsc_ipv4_state = lm_tpa_ramrod_update_ipvx(pdev, 0, TPA_IPV4_ENABLED);
2358         mess->rsc_ipv6_state = lm_tpa_ramrod_update_ipvx(pdev, 0, TPA_IPV6_ENABLED);
2359     }
2360     else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
2361     {
2362         struct vfpf_tpa_tlv * mess = (struct vfpf_tpa_tlv*)pf_mess->message_virt_addr;
2363 
2364         LM_FOREACH_RSS_IDX(pdev, rss_idx)
2365         {
2366             mess->tpa_client_info.sge_addr[rss_idx] = LM_TPA_CHAIN_BD(pdev, rss_idx).bd_chain_phy.as_u64;
2367         }
2368 
2369         mess->tpa_client_info.complete_on_both_clients = 1;
2370         mess->tpa_client_info.max_tpa_queues = LM_TPA_MAX_AGGS;
2371         mess->tpa_client_info.max_sges_for_packet = DIV_ROUND_UP_BITS((u16_t)pdev->params.l2_cli_con_params[0].mtu, LM_TPA_PAGE_BITS);
2372         mess->tpa_client_info.sge_buff_size = LM_TPA_PAGE_SIZE;
2373         mess->tpa_client_info.max_agg_size = LM_TPA_MAX_AGG_SIZE * LM_TPA_PAGE_SIZE;
2374         mess->tpa_client_info.sge_pause_thr_low = LM_TPA_SGE_PAUSE_THR_LOW;
2375         mess->tpa_client_info.sge_pause_thr_high = LM_TPA_SGE_PAUSE_THR_HIGH;
2376         mess->tpa_client_info.complete_on_both_clients = TRUE;
2377         mess->tpa_client_info.dont_verify_thr = 0;
2378         mess->tpa_client_info.tpa_mode = TPA_LRO;
2379         mess->tpa_client_info.update_ipv4 = lm_tpa_ramrod_update_ipvx(pdev, 0, TPA_IPV4_ENABLED);
2380         mess->tpa_client_info.update_ipv6 = lm_tpa_ramrod_update_ipvx(pdev, 0, TPA_IPV6_ENABLED);
2381 
2382     }
2383     else
2384     {
2385         DbgBreak();
2386         lm_vf_pf_channel_release_message(pdev,pf_mess);
2387         return LM_STATUS_FAILURE;
2388     }
2389 
2390     lm_status = lm_vf_pf_channel_send(pdev,pf_mess);
2391     if (lm_status != LM_STATUS_SUCCESS) {
2392         lm_vf_pf_channel_release_message(pdev,pf_mess);
2393     }
2394 
2395     return lm_status;
2396 }
2397 
lm_vf_pf_close_vf(struct _lm_device_t * pdev)2398 lm_status_t lm_vf_pf_close_vf(struct _lm_device_t * pdev)
2399 {
2400     lm_status_t lm_status = LM_STATUS_SUCCESS;
2401     lm_vf_pf_message_t * pf_mess = NULL;
2402 
2403     DbgBreakIf(!(pdev && IS_CHANNEL_VFDEV(pdev)));
2404     pf_mess = lm_vf_pf_channel_get_message_to_send(pdev, PFVF_OP_CLOSE_VF);
2405     if (!pf_mess) {
2406         lm_status = LM_STATUS_RESOURCE;
2407         DbgBreak();
2408         return lm_status;
2409     }
2410     if (IS_SW_CHANNEL_VIRT_MODE(pdev))
2411     {
2412         struct vf_pf_msg_close_vf * mess = NULL;
2413         mess = (struct vf_pf_msg_close_vf*)pf_mess->message_virt_addr;
2414         mess->vf_id = ABS_VFID(pdev);
2415     }
2416     else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
2417     {
2418         struct vfpf_close_tlv * mess = NULL;
2419         mess = (struct vfpf_close_tlv*)pf_mess->message_virt_addr;
2420         mess->vf_id = ABS_VFID(pdev);
2421     }
2422     else
2423     {
2424         DbgBreak();
2425         lm_vf_pf_channel_release_message(pdev,pf_mess);
2426         return LM_STATUS_FAILURE;
2427     }
2428 
2429     lm_status = lm_vf_pf_channel_send(pdev,pf_mess);
2430     if (lm_status != LM_STATUS_SUCCESS) {
2431         lm_vf_pf_channel_release_message(pdev,pf_mess);
2432     }
2433 
2434     return lm_status;
2435 
2436 }
2437 
lm_vf_pf_release_vf(struct _lm_device_t * pdev)2438 lm_status_t lm_vf_pf_release_vf(struct _lm_device_t * pdev)
2439 {
2440     lm_status_t lm_status = LM_STATUS_SUCCESS;
2441     lm_vf_pf_message_t * pf_mess = NULL;
2442     void* vresp = NULL;
2443 
2444     DbgBreakIf(!(pdev && IS_CHANNEL_VFDEV(pdev)));
2445 
2446     pf_mess = lm_vf_pf_channel_get_message_to_send(pdev, PFVF_OP_RELEASE_VF);
2447     if (!pf_mess) {
2448         lm_status = LM_STATUS_RESOURCE;
2449         DbgBreak();
2450         return lm_status;
2451     }
2452     if (IS_SW_CHANNEL_VIRT_MODE(pdev))
2453     {
2454         struct vf_pf_msg_release_vf * mess = NULL;
2455     mess = (struct vf_pf_msg_release_vf*)pf_mess->message_virt_addr;
2456     mess->vf_id = ABS_VFID(pdev);          /* ME register value */
2457         vresp = (u8_t*)mess + mess->hdr.resp_msg_offset;
2458     }
2459     else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
2460     {
2461         struct vfpf_release_tlv * mess = NULL;
2462         mess = (struct vfpf_release_tlv*)pf_mess->message_virt_addr;
2463         mess->vf_id = ABS_VFID(pdev);
2464     }
2465     else
2466     {
2467         DbgBreak();
2468         lm_vf_pf_channel_release_message(pdev,pf_mess);
2469         return LM_STATUS_FAILURE;
2470     }
2471     pf_mess->do_not_arm_trigger = TRUE;
2472     lm_status = lm_vf_pf_channel_send(pdev,pf_mess);
2473     if (lm_status != LM_STATUS_SUCCESS) {
2474         lm_vf_pf_channel_release_message(pdev,pf_mess);
2475         return lm_status;
2476     }
2477     lm_status = lm_vf_pf_channel_wait_response(pdev, pf_mess);
2478     // FIXME TODO
2479     if (lm_status == LM_STATUS_SUCCESS) {
2480         if (IS_SW_CHANNEL_VIRT_MODE(pdev))
2481         {
2482             struct pf_vf_msg_resp * resp = NULL;
2483             resp = (struct pf_vf_msg_resp *)vresp;
2484         if (resp->hdr.opcode != PFVF_OP_RELEASE_VF) {
2485             lm_status = LM_STATUS_FAILURE;
2486         } else {
2487             switch (resp->hdr.status) {
2488                 case SW_PFVF_STATUS_SUCCESS:
2489                 lm_status = LM_STATUS_SUCCESS;
2490                 break;
2491                 case SW_PFVF_STATUS_FAILURE:
2492                 case SW_PFVF_STATUS_MISMATCH_PF_VF_VERSION:
2493                 case SW_PFVF_STATUS_MISMATCH_FW_HSI:
2494                 case SW_PFVF_STATUS_NO_RESOURCE:
2495                 DbgMessage(pdev, FATAL, "VF_PF Channel: Status %d is not supported yet\n", resp->hdr.status);
2496                 lm_status = LM_STATUS_FAILURE;
2497                 break;
2498             default:
2499                 DbgMessage(pdev, FATAL, "VF_PF Channel: Unknown status %d\n", resp->hdr.status);
2500                 lm_status = LM_STATUS_FAILURE;
2501                 break;
2502             }
2503         }
2504     }
2505     }
2506 
2507 
2508     lm_vf_pf_channel_release_message(pdev, pf_mess);
2509     return lm_status;
2510 }
2511 
lm_vf_fl_reset_set_inprogress(struct _lm_device_t * pdev)2512 void lm_vf_fl_reset_set_inprogress(struct _lm_device_t * pdev)
2513 {
2514     DbgMessage(pdev, WARN, "Set FLR flag is not implemented yet\n");
2515 }
2516 
lm_vf_fl_reset_clear_inprogress(struct _lm_device_t * pdev)2517 void lm_vf_fl_reset_clear_inprogress(struct _lm_device_t *pdev)
2518 {
2519     DbgMessage(pdev, WARN, "Clear FLR flag is not implemented yet\n");
2520 }
2521 
lm_vf_fl_reset_is_inprogress(struct _lm_device_t * pdev)2522 u8_t lm_vf_fl_reset_is_inprogress(struct _lm_device_t *pdev)
2523 {
2524     DbgMessage(pdev, WARN, "Get FLR flag is not implemented yet\n");
2525     return FALSE;
2526 }
2527 
lm_vf_get_vf_id(struct _lm_device_t * pdev)2528 lm_status_t lm_vf_get_vf_id(struct _lm_device_t * pdev)
2529 {
2530     pdev->params.debug_me_register = _vf_reg_rd(pdev,VF_BAR0_DB_OFFSET);;
2531 
2532     DbgMessage(pdev, WARN, "vf ME-REG value: 0x%x\n", pdev->params.debug_me_register);
2533 
2534     if (!(pdev->params.debug_me_register & ME_REG_VF_VALID)) {
2535         DbgBreakIf(!(pdev->params.debug_me_register & ME_REG_VF_VALID));
2536         return LM_STATUS_FAILURE;
2537     }
2538     pdev->params.vf_num_in_path = (pdev->params.debug_me_register & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
2539     DbgMessage(pdev, WARN, "vf_num_in_path=%d\n", pdev->params.vf_num_in_path);
2540     return LM_STATUS_SUCCESS;
2541 }
2542 
lm_vf_setup_alloc_resc(struct _lm_device_t * pdev,u8_t b_is_alloc)2543 lm_status_t lm_vf_setup_alloc_resc(struct _lm_device_t *pdev, u8_t b_is_alloc )
2544 {
2545     lm_variables_t* vars       = NULL ;
2546     u32_t           mem_size   = 0 ;
2547     //u32_t           alloc_size = 0 ;
2548     u8_t            mm_cli_idx = 0 ;
2549     u8_t            sb_id      = 0 ;
2550     lm_address_t    sb_phy_address;
2551     void * p_sb;
2552     DbgBreakIf(!(pdev && IS_CHANNEL_VFDEV(pdev) && pdev->pf_vf_acquiring_resp));
2553     //DbgBreakIf(!(presp && (sb_id < presp->pfdev_info.indices_per_sb)));
2554 
2555     if CHK_NULL( pdev )
2556     {
2557         return LM_STATUS_INVALID_PARAMETER ;
2558     }
2559 
2560     DbgMessage(pdev, FATAL, "### VF lm_common_setup_alloc_resc b_is_alloc=%s\n", b_is_alloc ? "TRUE" : "FALSE" );
2561 
2562     vars       = &(pdev->vars) ;
2563 
2564     //       Status blocks allocation. We allocate mem both for the default and non-default status blocks
2565     //       there is 1 def sb and 16 non-def sb per port.
2566     //       non-default sb: index 0-15, default sb: index 16.
2567     if (IS_CHANNEL_VFDEV(pdev)) {
2568         if (IS_SW_CHANNEL_VIRT_MODE(pdev))
2569         {
2570             struct pf_vf_msg_acquire_resp * presp;
2571             presp = (struct pf_vf_msg_acquire_resp *)pdev->pf_vf_acquiring_resp;
2572         mem_size = (sizeof(u16_t) * (presp->pfdev_info.indices_per_sb + 2) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
2573         }
2574         else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
2575         {
2576             struct pfvf_acquire_resp_tlv * presp;
2577             presp = (struct pfvf_acquire_resp_tlv *)pdev->pf_vf_acquiring_resp;
2578             mem_size = (sizeof(u16_t) * (presp->pfdev_info.indices_per_sb + 2) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
2579         }
2580         else
2581         {
2582             DbgBreak();
2583             return LM_STATUS_FAILURE;
2584         }
2585     } else {
2586         mem_size = E2_STATUS_BLOCK_BUFFER_SIZE;
2587     }
2588 
2589     mm_cli_idx = LM_RESOURCE_COMMON;//!!DP mm_cli_idx_to_um_idx(LM_CLI_IDX_MAX);
2590 
2591     LM_FOREACH_SB_ID(pdev, sb_id)
2592     {
2593         if( b_is_alloc )
2594         {
2595             if (IS_CHANNEL_VFDEV(pdev)) {
2596                 pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.vf_sb = p_sb = mm_alloc_phys_mem(pdev, mem_size, &sb_phy_address, 0, mm_cli_idx);
2597                 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.vf_sb_phy_address.as_u32.low = sb_phy_address.as_u32.low;
2598                 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.vf_sb_phy_address.as_u32.high = sb_phy_address.as_u32.high;
2599             } else {
2600                 pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e2_sb = p_sb = mm_alloc_phys_mem(pdev, mem_size, &sb_phy_address, 0, mm_cli_idx);
2601                 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.host_sb_addr.lo = sb_phy_address.as_u32.low;
2602                 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.host_sb_addr.hi = sb_phy_address.as_u32.high;
2603             }
2604         }
2605         else
2606         {
2607             if (IS_CHANNEL_VFDEV(pdev)) {
2608                 p_sb = (void *)pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.vf_sb;
2609             } else {
2610                 p_sb = (void *)pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e2_sb;
2611             }
2612         }
2613 
2614         if CHK_NULL(p_sb)
2615         {
2616             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
2617             return LM_STATUS_RESOURCE ;
2618         }
2619         mm_mem_zero(p_sb, mem_size);
2620     }
2621     lm_reset_sb_ack_values(pdev);
2622 
2623     mm_mem_zero(pdev->debug_info.ack_dis,     sizeof(pdev->debug_info.ack_dis));
2624     mm_mem_zero(pdev->debug_info.ack_en,      sizeof(pdev->debug_info.ack_en));
2625     mm_mem_zero(pdev->debug_info.rx_only_int, sizeof(pdev->debug_info.rx_only_int));
2626     mm_mem_zero(pdev->debug_info.tx_only_int, sizeof(pdev->debug_info.tx_only_int));
2627     mm_mem_zero(pdev->debug_info.both_int,    sizeof(pdev->debug_info.both_int));
2628     mm_mem_zero(pdev->debug_info.empty_int,   sizeof(pdev->debug_info.empty_int));
2629     mm_mem_zero(pdev->debug_info.false_int,   sizeof(pdev->debug_info.false_int));
2630 
2631 #if 0
2632     //CAM
2633     alloc_size = sizeof(struct mac_configuration_cmd) ;
2634 
2635     if( b_is_alloc )
2636     {
2637         pdev->params.mac_config[LM_CLI_IDX_NDIS] = mm_alloc_phys_mem(pdev,
2638                                                     alloc_size,
2639                                                     &pdev->params.mac_config_phy[LM_CLI_IDX_NDIS],
2640                                                     0,
2641                                                     mm_cli_idx);
2642     }
2643     if CHK_NULL( pdev->params.mac_config[LM_CLI_IDX_NDIS] )
2644     {
2645         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
2646         return LM_STATUS_RESOURCE ;
2647     }
2648 
2649     mm_mem_zero((void *) (pdev->params.mac_config[LM_CLI_IDX_NDIS]), alloc_size );
2650 
2651     if( b_is_alloc )
2652     {
2653         pdev->params.mcast_config = mm_alloc_phys_mem(pdev,
2654                                                       alloc_size,
2655                                                       &pdev->params.mcast_config_phy,
2656                                                       0,
2657                                                       mm_cli_idx);
2658     }
2659 
2660     if CHK_NULL( pdev->params.mcast_config )
2661     {
2662         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
2663         return LM_STATUS_RESOURCE ;
2664     }
2665     mm_mem_zero((void *) (pdev->params.mcast_config), alloc_size);
2666 #endif
2667     return LM_STATUS_SUCCESS;
2668 }
2669 
2670 
lm_vf_chip_init(struct _lm_device_t * pdev)2671 lm_status_t lm_vf_chip_init(struct _lm_device_t *pdev)
2672 {
2673     lm_status_t lm_status;
2674 
2675     DbgMessage(pdev, WARNvf, "lm_vf_chip_init\n");
2676 
2677     lm_status = lm_vf_pf_init_vf(pdev);
2678     if (lm_status == LM_STATUS_SUCCESS) {
2679         lm_status = lm_vf_pf_wait_no_messages_pending(pdev);
2680 #if 0
2681 "ACK_enable" (even "ACK_disable/ACK_enable") does not help when IGU block is stuck from previous VM shutdown/reboot (not ACKed sunbitted interrupt interrupt).
2682 Windows8 PF executes clear IGU block on VM initialization. Must be checked for Linux PF.
2683         if (lm_status == LM_STATUS_SUCCESS)
2684         {
2685             u8_t sb_id;
2686             u8_t igu_sb_cnt;
2687 
2688             igu_sb_cnt = LM_IGU_SB_CNT(pdev);
2689             for (sb_id = 0; sb_id < igu_sb_cnt; sb_id++)
2690             {
2691                 /* Give Consumer updates with value '0' */
2692                 lm_int_igu_ack_sb(pdev, IGU_VF_NDSB(pdev,sb_id), IGU_SEG_ACCESS_NORM, 0, IGU_INT_DISABLE, 0);
2693                 lm_int_igu_ack_sb(pdev, IGU_VF_NDSB(pdev,sb_id), IGU_SEG_ACCESS_NORM, 0, IGU_INT_ENABLE, 1);
2694             }
2695         }
2696 #endif
2697     }
2698 
2699 
2700     /* Temporary FIXME TODO: is this the right location??? */
2701     pdev->vars.cable_is_attached = TRUE;
2702     pdev->vars.link_status = LM_STATUS_LINK_ACTIVE;
2703 	if (IS_HW_CHANNEL_VIRT_MODE(pdev) && pdev->vars.pf_link_speed)
2704 	{
2705         switch(pdev->vars.pf_link_speed)
2706         {
2707         case 10:
2708             SET_MEDIUM_SPEED(pdev->vars.medium, LM_MEDIUM_SPEED_10MBPS);
2709             break;
2710 
2711         case 100:
2712             SET_MEDIUM_SPEED(pdev->vars.medium, LM_MEDIUM_SPEED_100MBPS);
2713             break;
2714 
2715         case 1000:
2716             SET_MEDIUM_SPEED(pdev->vars.medium, LM_MEDIUM_SPEED_1000MBPS);
2717             break;
2718 
2719         case 2500:
2720             SET_MEDIUM_SPEED(pdev->vars.medium, LM_MEDIUM_SPEED_2500MBPS);
2721             break;
2722 
2723         case 20000:
2724             SET_MEDIUM_SPEED(pdev->vars.medium, LM_MEDIUM_SPEED_20GBPS);
2725             break;
2726 
2727 		case 10000:
2728         default:
2729             SET_MEDIUM_SPEED(pdev->vars.medium, LM_MEDIUM_SPEED_10GBPS);
2730         break;
2731         }
2732 	}
2733 	else
2734 	{
2735         SET_MEDIUM_SPEED(pdev->vars.medium,LM_MEDIUM_SPEED_10GBPS);
2736 	}
2737 
2738 
2739     DbgMessage(pdev, WARNvf, "lm_vf_chip_init lm_status = %d\n", lm_status);
2740     return lm_status;
2741 }
2742 
lm_vf_queue_init(struct _lm_device_t * pdev,u8_t cid)2743 lm_status_t lm_vf_queue_init(struct _lm_device_t *pdev, u8_t cid)
2744 {
2745     lm_status_t lm_status;
2746     u8_t validation_flag = 0;
2747     u8_t q_index = LM_SW_CID_TO_SW_QID(pdev,cid);
2748 
2749     if (IS_SW_CHANNEL_VIRT_MODE(pdev))
2750     {
2751         struct pf_vf_msg_acquire_resp * presp;
2752     presp = (struct pf_vf_msg_acquire_resp *)pdev->pf_vf_acquiring_resp;
2753 
2754     if (q_index < presp->resc.num_rxqs) {
2755         validation_flag |= RX_Q_VALIDATE;
2756     }
2757 
2758     if (q_index < presp->resc.num_txqs) {
2759         validation_flag |= TX_Q_VALIDATE;
2760     }
2761     }
2762     else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
2763     {
2764         struct pfvf_acquire_resp_tlv * presp;
2765         presp = (struct pfvf_acquire_resp_tlv *)pdev->pf_vf_acquiring_resp;
2766         if (q_index < presp->resc.num_rxqs) {
2767             validation_flag |= RX_Q_VALIDATE;
2768         }
2769 
2770         if (q_index < presp->resc.num_txqs) {
2771             validation_flag |= TX_Q_VALIDATE;
2772         }
2773     }
2774     else
2775     {
2776         DbgBreak();
2777         return LM_STATUS_FAILURE;
2778     }
2779 
2780     DbgMessage(pdev, WARNvf, "validation_flag = %d\n", validation_flag);
2781 
2782     lm_status = lm_vf_pf_setup_q(pdev, q_index, validation_flag);
2783     if (lm_status == LM_STATUS_SUCCESS) {
2784         lm_status = lm_vf_pf_wait_no_messages_pending(pdev);
2785     }
2786     if (lm_status == LM_STATUS_SUCCESS) {
2787         lm_set_con_state(pdev, cid, LM_CON_STATE_OPEN);
2788     }
2789     return lm_status;
2790 
2791 }
2792 
lm_vf_queue_close(struct _lm_device_t * pdev,u8_t cid)2793 lm_status_t lm_vf_queue_close(struct _lm_device_t *pdev, u8_t cid)
2794 {
2795     lm_status_t lm_status = LM_STATUS_SUCCESS;
2796     u8_t q_index = LM_SW_CID_TO_SW_QID(pdev,cid);
2797 
2798     if (lm_reset_is_inprogress(pdev)) {
2799         lm_set_con_state(pdev, cid, LM_CON_STATE_CLOSE);
2800         return LM_STATUS_SUCCESS;
2801     }
2802 
2803     if (lm_get_con_state(pdev, cid) == LM_CON_STATE_OPEN) {
2804         lm_status = lm_vf_pf_tear_q_down(pdev, q_index);
2805         if (lm_status == LM_STATUS_SUCCESS) {
2806             lm_status = lm_vf_pf_wait_no_messages_pending(pdev);
2807         } else {
2808             if (lm_status == LM_STATUS_REQUEST_NOT_ACCEPTED) {
2809                lm_status = LM_STATUS_SUCCESS;
2810             }
2811         }
2812             if (lm_status == LM_STATUS_SUCCESS) {
2813                 lm_set_con_state(pdev, cid, LM_CON_STATE_CLOSE);
2814             }
2815         }
2816     return lm_status;
2817 }
2818 
lm_vf_chip_reset(struct _lm_device_t * pdev,lm_reason_t reason)2819 lm_status_t lm_vf_chip_reset(struct _lm_device_t *pdev, lm_reason_t reason)
2820 {
2821     lm_status_t lm_status = LM_STATUS_SUCCESS;
2822 
2823     if (lm_reset_is_inprogress(pdev)) {
2824         return LM_STATUS_SUCCESS;
2825     }
2826 
2827     lm_status = lm_vf_pf_close_vf(pdev);
2828     if (lm_status == LM_STATUS_SUCCESS) {
2829         lm_status = lm_vf_pf_wait_no_messages_pending(pdev);
2830     }
2831 
2832     return lm_status;
2833 }
2834 
lm_vf_is_function_after_flr(struct _lm_device_t * pdev)2835 u8_t lm_vf_is_function_after_flr(struct _lm_device_t * pdev)
2836 {
2837     return FALSE;
2838 }
2839 
lm_vf_init_dev_info(struct _lm_device_t * pdev)2840 lm_status_t lm_vf_init_dev_info(struct _lm_device_t *pdev)
2841 {
2842     DbgMessage(pdev, WARN, "lm_vf_init_dev_info>>\n");
2843     // Cleaning after driver unload
2844     pdev->context_info = NULL;
2845     mm_mem_zero((void *) &pdev->cid_recycled_callbacks, sizeof(pdev->cid_recycled_callbacks));
2846     mm_mem_zero((void *) &pdev->toe_info, sizeof(pdev->toe_info));
2847 
2848     return LM_STATUS_SUCCESS;
2849 }
2850 
2851 
lm_vf_recycle_resc_in_pf(struct _lm_device_t * pdev)2852 lm_status_t lm_vf_recycle_resc_in_pf(struct _lm_device_t *pdev)
2853 {
2854     DbgMessage(NULL, WARN, "lm_vf_recycle_resc_in_pf is used only in basic VF\n");
2855     return LM_STATUS_SUCCESS;
2856 }
2857 
2858 
lm_vf_enable_vf(struct _lm_device_t * pdev)2859 lm_status_t lm_vf_enable_vf(struct _lm_device_t *pdev)
2860 {
2861     DbgMessage(NULL, WARN, "lm_vf_enable_vf is used only in basic VF\n");
2862     return LM_STATUS_SUCCESS;
2863 }
2864 
lm_vf_enable_igu_int(struct _lm_device_t * pdev)2865 lm_status_t lm_vf_enable_igu_int(struct _lm_device_t * pdev)
2866 {
2867     return LM_STATUS_SUCCESS;
2868 }
2869 
2870 
lm_vf_disable_igu_int(struct _lm_device_t * pdev)2871 lm_status_t lm_vf_disable_igu_int(struct _lm_device_t * pdev)
2872 {
2873     /* TODO?? */
2874     return LM_STATUS_SUCCESS;
2875 }
2876 
lm_vf_check_hw_back_channel(struct _lm_device_t * pdev)2877 pfvf_bb_event_type lm_vf_check_hw_back_channel(struct _lm_device_t * pdev)
2878 {
2879     struct pf_vf_bulletin_content volatile *bulletin = (struct pf_vf_bulletin_content *)pdev->vars.vf_pf_mess.bulletin_virt_addr;
2880     u32_t attempts;
2881 
2882     if (bulletin == NULL)
2883     {
2884          DbgMessage(pdev, FATAL, "PF to VF channel is not active\n");
2885          return PFVF_BB_CHANNEL_IS_NOT_ACTIVE;
2886     }
2887     if (pdev->vars.vf_pf_mess.old_version != bulletin->version)
2888     {
2889         for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++)
2890         {
2891             if ((bulletin->length >= sizeof(bulletin->crc)) && (bulletin->length <= sizeof(union pf_vf_bulletin))
2892                     && (bulletin->crc == mm_crc32((u8_t*)bulletin + sizeof(bulletin->crc), bulletin->length - sizeof(bulletin->crc), BULLETIN_CRC_SEED)))
2893             break;
2894         }
2895         if (attempts == BULLETIN_ATTEMPTS)
2896         {
2897             DbgMessage(pdev, FATAL, "PF to VF channel: CRC error\n");
2898             return PFVF_BB_CHANNEL_CRC_ERR;
2899         }
2900         pdev->vars.vf_pf_mess.old_version = bulletin->version;
2901         if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2902         {
2903             DbgMessage(pdev, FATAL, "PF to VF channel: PF provides VLAN\n");
2904         }
2905         if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
2906         {
2907             if ((bulletin->mac[0] != pdev->params.mac_addr[0])
2908                     || (bulletin->mac[1] != pdev->params.mac_addr[1])
2909                     || (bulletin->mac[2] != pdev->params.mac_addr[2])
2910                     || (bulletin->mac[3] != pdev->params.mac_addr[3])
2911                     || (bulletin->mac[4] != pdev->params.mac_addr[4])
2912                     || (bulletin->mac[5] != pdev->params.mac_addr[5]))
2913             {
2914                 DbgMessage(pdev, FATAL, "PF to VF channel: PF provides new MAC\n");
2915 		return PFVF_BB_VALID_MAC;
2916             }
2917         }
2918     }
2919     return PFVF_BB_NO_UPDATE;
2920 }
lm_pf_enable_vf_igu_int(struct _lm_device_t * pdev,u8_t abs_vf_id)2921 lm_status_t lm_pf_enable_vf_igu_int(struct _lm_device_t * pdev, u8_t abs_vf_id)
2922 {
2923     lm_status_t lm_status = LM_STATUS_SUCCESS;
2924     u32_t val;
2925     u16_t pretend_val;
2926     u8_t num_segs;
2927     u8_t prod_idx;
2928     u8_t sb_id;
2929     u8_t i;
2930     u8_t igu_sb_cnt;
2931 
2932     lm_vf_info_t * vf_info = lm_pf_find_vf_info_by_abs_id(pdev, abs_vf_id);
2933 
2934 
2935     /* Need to use pretend for VF */
2936     pretend_val = ABS_FUNC_ID(pdev) | (1<<3) | (abs_vf_id << 4);
2937     lm_pretend_func(PFDEV(pdev), pretend_val);
2938 
2939     REG_WR(PFDEV(pdev), IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
2940     REG_WR(PFDEV(pdev), IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
2941     REG_WR(PFDEV(pdev), IGU_REG_SB_MASK_LSB, 0);
2942     REG_WR(PFDEV(pdev), IGU_REG_SB_MASK_MSB, 0);
2943     REG_WR(PFDEV(pdev), IGU_REG_PBA_STATUS_LSB, 0);
2944     REG_WR(PFDEV(pdev), IGU_REG_PBA_STATUS_MSB, 0);
2945 
2946 
2947     val=REG_RD(PFDEV(pdev), IGU_REG_VF_CONFIGURATION);
2948 
2949     SET_FLAGS(val, IGU_VF_CONF_FUNC_EN);
2950     SET_FLAGS(val, IGU_VF_CONF_MSI_MSIX_EN);
2951 
2952     if (pdev->params.interrupt_mode == LM_INT_MODE_SIMD) {
2953         SET_FLAGS(val,IGU_VF_CONF_SINGLE_ISR_EN);
2954     }
2955 
2956     /* set Parent PF */
2957     val |= ((FUNC_ID(pdev) << IGU_VF_CONF_PARENT_SHIFT) & IGU_VF_CONF_PARENT_MASK);
2958 
2959     REG_WR(PFDEV(pdev),  IGU_REG_VF_CONFIGURATION, val);
2960 
2961     igu_sb_cnt = vf_info->num_allocated_chains; // pdev->hw_info.intr_blk_info.igu_info.vf_igu_info[abs_vf_id].igu_sb_cnt;
2962     num_segs = IGU_NORM_NDSB_NUM_SEGS;
2963     for (sb_id = 0; sb_id < igu_sb_cnt; sb_id++) {
2964         prod_idx = LM_VF_IGU_SB_ID(vf_info,sb_id)*num_segs; /* bc-assumption consecutive pfs, norm-no assumption */
2965         for (i = 0; i < num_segs;i++) {
2966             REG_WR(PFDEV(pdev), IGU_REG_PROD_CONS_MEMORY + (prod_idx + i)*4, 0);
2967         }
2968         SB_RX_INDEX(pdev,LM_VF_IGU_SB_ID(vf_info,sb_id)) = 0;
2969         lm_int_ack_sb_enable(pdev, LM_VF_IGU_SB_ID(vf_info,sb_id));
2970         lm_pf_int_vf_igu_sb_cleanup(pdev, vf_info, sb_id);
2971     }
2972 
2973     lm_status = lm_pretend_func(PFDEV(pdev), ABS_FUNC_ID(pdev));
2974     return lm_status;
2975 }
2976 
lm_pf_disable_vf_igu_int(struct _lm_device_t * pdev,u8_t abs_vf_id)2977 lm_status_t lm_pf_disable_vf_igu_int(struct _lm_device_t * pdev,  u8_t abs_vf_id)
2978 {
2979     lm_status_t lm_status = LM_STATUS_SUCCESS;
2980     u32_t val;
2981     u16_t pretend_val;
2982 
2983     /* Need to use pretend for VF */
2984     if (lm_fl_reset_is_inprogress(PFDEV(pdev))) {
2985         DbgMessage(pdev, FATAL, "PF[%d] of VF[%d] is under FLR\n", FUNC_ID(pdev), abs_vf_id);
2986         return LM_STATUS_SUCCESS;
2987     }
2988     pretend_val = ABS_FUNC_ID(pdev) | (1<<3) | (abs_vf_id << 4);
2989     lm_pretend_func(PFDEV(pdev), pretend_val);
2990 
2991     val = REG_RD(PFDEV(pdev), IGU_REG_VF_CONFIGURATION);
2992 
2993     /* disable both bits, for INTA, MSI and MSI-X. */
2994     RESET_FLAGS(val, (IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK));
2995 
2996     REG_WR(PFDEV(pdev),  IGU_REG_VF_CONFIGURATION, val);
2997 
2998     lm_status = lm_pretend_func(PFDEV(pdev), ABS_FUNC_ID(pdev));
2999     return lm_status;
3000 }
3001 
3002 lm_status_t
lm_pf_enable_vf(struct _lm_device_t * pdev,u8_t abs_vf_id)3003 lm_pf_enable_vf(struct _lm_device_t *pdev,   u8_t abs_vf_id)
3004 {
3005     lm_status_t lm_status = LM_STATUS_SUCCESS;
3006     u16_t pretend_val;
3007     u32_t prod_idx;
3008     u8_t igu_sb_id;
3009     u32_t was_err_num;
3010     u32_t was_err_value;
3011     u32_t was_err_reg;
3012     u8_t    igu_sb_cnt;
3013 
3014     lm_vf_info_t * vf_info = lm_pf_find_vf_info_by_abs_id(pdev, abs_vf_id);
3015 
3016     /* Enable the VF in PXP - this will enable read/write from VF bar.
3017      * Need to use Pretend in order to do this. Note: once we do pretend
3018      * all accesses to SPLIT-68 will be done as if-vf...
3019      * Bits. Bits [13:10] - Reserved.  Bits [9:4] - VFID. Bits [3] - VF valid. Bits [2:0] - PFID.
3020      */
3021 
3022     pretend_val = ABS_FUNC_ID(pdev) | (1<<3) | (abs_vf_id << 4);
3023     lm_status = lm_pretend_func(PFDEV(pdev), pretend_val);
3024     if (lm_status == LM_STATUS_SUCCESS) {
3025         REG_WR(PFDEV(pdev), PBF_REG_DISABLE_VF,0);
3026         REG_WR(PFDEV(pdev), PGLUE_B_REG_INTERNAL_VFID_ENABLE, 1);
3027         lm_pretend_func(PFDEV(pdev), ABS_FUNC_ID(pdev) );
3028         DbgMessage(pdev, FATAL, "vf[%d] is enabled\n", abs_vf_id);
3029 
3030         was_err_num = 2 * PATH_ID(pdev) + abs_vf_id / 32;
3031         switch (was_err_num) {
3032         case 0:
3033             was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
3034             break;
3035         case 1:
3036             was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
3037             break;
3038         case 2:
3039             was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
3040             break;
3041         case 3:
3042             was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
3043             break;
3044         default:
3045             was_err_reg = 0;
3046             DbgMessage(pdev, FATAL,"Wrong Path[%d], VF[%d]\n",PATH_ID(pdev),abs_vf_id);
3047             DbgBreak();
3048         }
3049 
3050         was_err_value = 1 << (abs_vf_id % 32);
3051         if (was_err_reg) {
3052             REG_WR(PFDEV(pdev), was_err_reg, was_err_value); /* PglueB - Clear the was_error indication of the relevant function*/
3053         }
3054 
3055         /* IGU Initializations */
3056         igu_sb_cnt = vf_info->num_allocated_chains;
3057         for (igu_sb_id = 0; igu_sb_id < igu_sb_cnt; igu_sb_id++) {
3058             prod_idx = LM_VF_IGU_SB_ID(vf_info, igu_sb_id);
3059             REG_WR(PFDEV(pdev), IGU_REG_PROD_CONS_MEMORY + prod_idx*4, 0);
3060             DbgMessage(pdev, FATAL, "IGU[%d] is inialized\n", prod_idx);
3061         }
3062         REG_WR(PFDEV(pdev),TSEM_REG_VFPF_ERR_NUM, abs_vf_id);
3063         REG_WR(PFDEV(pdev),USEM_REG_VFPF_ERR_NUM, abs_vf_id);
3064         REG_WR(PFDEV(pdev),CSEM_REG_VFPF_ERR_NUM, abs_vf_id);
3065         REG_WR(PFDEV(pdev),XSEM_REG_VFPF_ERR_NUM, abs_vf_id);
3066     } else {
3067         DbgMessage(pdev, FATAL, "lm_pretend_func(%x) returns %d\n",pretend_val,lm_status);
3068         DbgMessage(pdev, FATAL, "vf[%d] is not enabled\n", abs_vf_id);
3069         DbgBreak();
3070     }
3071 
3072     return lm_status;
3073 }
3074 
3075 lm_status_t
lm_pf_disable_vf(struct _lm_device_t * pdev,u8_t abs_vf_id)3076 lm_pf_disable_vf(struct _lm_device_t *pdev,   u8_t abs_vf_id)
3077 {
3078     lm_status_t lm_status = LM_STATUS_SUCCESS;
3079     u16_t pretend_val;
3080 
3081     if (lm_pf_fl_vf_reset_is_inprogress(pdev,abs_vf_id)) {
3082         DbgMessage(pdev, FATAL, "vf disable called on a flred function - not much we can do here... \n");
3083         return LM_STATUS_SUCCESS;
3084     }
3085     pretend_val = ABS_FUNC_ID(pdev) | (1<<3) | (abs_vf_id << 4);
3086     lm_status = lm_pretend_func(PFDEV(pdev), pretend_val);
3087     if (lm_status == LM_STATUS_SUCCESS) {
3088         REG_WR(PFDEV(pdev), PBF_REG_DISABLE_VF,1);
3089         REG_WR(PFDEV(pdev), PGLUE_B_REG_INTERNAL_VFID_ENABLE, 0);
3090         lm_pretend_func(PFDEV(pdev), ABS_FUNC_ID(pdev) );
3091         DbgMessage(pdev, FATAL, "vf[%d] is disbled\n", abs_vf_id);
3092     } else {
3093         DbgMessage(pdev, FATAL, "lm_pretend_func(%x) returns %d\n",pretend_val,lm_status);
3094         DbgMessage(pdev, FATAL, "vf[%d] is not enabled\n", abs_vf_id);
3095     }
3096 
3097     return lm_status;
3098 }
3099 
3100 /*Master Channel Virt*/
3101 
lm_pf_create_vf(struct _lm_device_t * pdev,u16_t abs_vf_id,void * ctx)3102 lm_status_t lm_pf_create_vf(struct _lm_device_t *pdev, u16_t abs_vf_id, void* ctx)
3103 {
3104     lm_status_t lm_status = LM_STATUS_SUCCESS;
3105     u8_t chains_resource_acquired;
3106     u8_t base_fw_stat_id;
3107     lm_vf_info_t * vf_info;
3108     u32_t   num_of_vf_avaiable_chains;
3109     u8_t    num_rxqs,num_txqs;
3110 
3111     DbgMessage(pdev, WARN, "lm_pf_create_vf(%d)\n",abs_vf_id);
3112     vf_info = lm_pf_find_vf_info_by_abs_id(pdev, (u8_t)abs_vf_id);
3113     if (!vf_info) {
3114         DbgBreakMsg("lm_pf_create_vf: vf_info is not found\n");
3115         return LM_STATUS_FAILURE;
3116     }
3117     lm_status = lm_pf_set_vf_ctx(pdev, vf_info->relative_vf_id, ctx);
3118     DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
3119 
3120     DbgBreakIf(!vf_info);
3121 
3122     lm_pf_get_queues_number(pdev, vf_info, &num_rxqs, &num_txqs);
3123     num_of_vf_avaiable_chains = lm_pf_allocate_vf_igu_sbs(pdev, vf_info, num_rxqs);
3124 
3125     if (num_of_vf_avaiable_chains == 0)
3126     {
3127         return LM_STATUS_RESOURCE;
3128     }
3129 
3130     chains_resource_acquired = lm_pf_acquire_vf_chains_resources(pdev, vf_info->relative_vf_id, num_of_vf_avaiable_chains);
3131 
3132     if (!chains_resource_acquired) {
3133         DbgBreak();
3134         return LM_STATUS_RESOURCE;
3135     }
3136 
3137 
3138     if (vf_info != NULL) {
3139         base_fw_stat_id = 8 + vf_info->abs_vf_id;
3140         lm_status = lm_pf_set_vf_stat_id(pdev, vf_info->relative_vf_id, base_fw_stat_id);
3141         lm_pf_init_vf_slow_path(pdev, vf_info);
3142         lm_pf_init_vf_client(pdev, vf_info, 0);
3143 #if 0
3144         lm_status = lm_set_rx_mask(pdev, vf_info->vf_chains[0].sw_client_id, LM_RX_MASK_ACCEPT_NONE, NULL);
3145 
3146         if (lm_status == LM_STATUS_PENDING)
3147         {
3148             /* Synchrounous complete */
3149             lm_status = lm_wait_set_rx_mask_done(pdev, vf_info->vf_chains[0].sw_client_id);
3150         }
3151 #endif
3152         lm_status = lm_pf_enable_vf(pdev, vf_info->abs_vf_id);
3153     } else {
3154         lm_status = LM_STATUS_FAILURE;
3155     }
3156 
3157     return lm_status;
3158 }
3159 
lm_pf_remove_vf(struct _lm_device_t * pdev,u16_t abs_vf_id)3160 lm_status_t lm_pf_remove_vf(struct _lm_device_t *pdev, u16_t abs_vf_id)
3161 {
3162     lm_status_t     lm_status = LM_STATUS_SUCCESS;
3163     u8_t            q_idx;
3164     u32_t           cid,client_info_idx, con_state;
3165     lm_vf_info_t *  vf_info = lm_pf_find_vf_info_by_abs_id(pdev, (u8_t)abs_vf_id);
3166 
3167     DbgMessage(pdev, WARN, "lm_pf_remove_vf(%d)\n",abs_vf_id);
3168     if (!vf_info) {
3169         DbgBreakMsg("lm_pf_remove_vf: vf_info is not found\n");
3170         return LM_STATUS_FAILURE;
3171     }
3172     if (lm_pf_fl_vf_reset_is_inprogress(pdev, (u8_t)abs_vf_id)) {
3173         MM_ACQUIRE_VFS_STATS_LOCK(pdev);
3174         if (vf_info->vf_stats.vf_stats_state != VF_STATS_REQ_IN_PROCESSING) {
3175             vf_info->vf_stats.vf_stats_state = VF_STATS_REQ_READY;
3176         }
3177         vf_info->vf_stats.stop_collect_stats = TRUE;
3178         vf_info->vf_stats.vf_stats_flag = 0;
3179         MM_RELEASE_VFS_STATS_LOCK(pdev);
3180         lm_status = lm_pf_vf_wait_for_stats_ready(pdev, vf_info);
3181         if (lm_status != LM_STATUS_SUCCESS) {
3182             DbgBreak();
3183         } else {
3184             vf_info->vf_stats.vf_stats_state = VF_STATS_NONE;
3185         }
3186 
3187         for (q_idx = 0; q_idx < vf_info->vf_si_num_of_active_q; q_idx++) {
3188             cid = LM_VF_Q_ID_TO_PF_CID(pdev, vf_info, q_idx);
3189             client_info_idx = LM_SW_VF_CLI_ID(vf_info, q_idx);
3190             con_state = lm_get_con_state(pdev, cid);
3191             if (con_state != LM_CON_STATE_CLOSE)
3192             {
3193                 if (con_state != LM_CON_STATE_OPEN) {
3194                     DbgMessage(pdev, FATAL, "State of CID %d of VF[%d(rel)] is %d)\n",cid, vf_info->relative_vf_id,
3195                                 con_state);
3196                     DbgBreak();
3197                 } else {
3198                     lm_set_con_state(pdev, cid, LM_CON_STATE_HALT);
3199                     lm_status = lm_terminate_eth_con(pdev, cid);
3200                     DbgMessage(pdev, WARN, "lm_pf_remove_vf(%d): terminate CID %d (0x%x)\n",abs_vf_id,cid,lm_status);
3201                     if (lm_status != LM_STATUS_SUCCESS)
3202                     {
3203                         DbgBreak();
3204                         return lm_status;
3205                     }
3206                     lm_set_con_state(pdev, cid, LM_CON_STATE_CLOSE);
3207                 }
3208             }
3209         }
3210         vf_info->vf_si_num_of_active_q = 0;
3211         lm_status = lm_pf_cleanup_vf_after_flr(pdev, vf_info);
3212     } else {
3213         lm_status = lm_pf_disable_vf(pdev,vf_info->abs_vf_id);
3214     }
3215 
3216     lm_pf_release_vf_chains_resources(pdev, vf_info->relative_vf_id);
3217     lm_status = lm_pf_set_vf_ctx(pdev, vf_info->relative_vf_id, NULL);
3218     DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
3219 
3220 
3221     return lm_status;
3222 }
3223 
lm_pf_cleanup_vf_after_flr(struct _lm_device_t * pdev,lm_vf_info_t * vf_info)3224 lm_status_t lm_pf_cleanup_vf_after_flr(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
3225 {
3226     lm_status_t lm_status  = LM_STATUS_SUCCESS;
3227     u32_t wait_ms          = 10000;
3228     u16_t pretend_value    = 0;
3229     u32_t factor           = 0;
3230     u32_t cleanup_complete = 0;
3231 
3232     u8_t  function_for_clean_up = 0;
3233     u8_t  idx                   = 0;
3234 
3235     struct sdm_op_gen final_cleanup;
3236 
3237     // TODO - use here pdev->vars.clk_factor
3238     if (CHIP_REV_IS_EMUL(pdev))
3239     {
3240             factor = LM_EMUL_FACTOR;
3241     }
3242     else if (CHIP_REV_IS_FPGA(pdev))
3243     {
3244             factor = LM_FPGA_FACTOR;
3245     }
3246     else
3247     {
3248             factor = 1;
3249     }
3250 
3251     wait_ms *= factor;
3252     pdev->flr_stats.default_wait_interval_ms = DEFAULT_WAIT_INTERVAL_MICSEC;
3253     DbgMessage(pdev, FATAL, "lm_cleanup_after_flr VF[%d] >>>\n",vf_info->abs_vf_id);
3254 
3255 /*
3256 VF FLR only part
3257 a.  Wait until there are no pending ramrods for this VFid in the PF DB. - No pending VF's pending ramrod. It's based on "FLR not during driver load/unload".
3258 What about set MAC?
3259 
3260 b.  Send the new "L2 connection terminate" ramrod for each L2 CID that was used by the VF,
3261 including sending the doorbell with the "terminate" flag. - Will be implemented in FW later
3262 
3263 c.  Send CFC delete ramrod on all L2 connections of that VF (set the CDU-validation field to "invalid"). - part of FW cleanup. VF_TO_PF_CID must initialized in
3264 PF CID array*/
3265 
3266 /*  3.  Poll on the DQ per-function usage-counter until it's 0. */
3267     pretend_value = ABS_FUNC_ID(pdev) | (1<<3) | (vf_info->abs_vf_id << 4);
3268     lm_status = lm_pretend_func(PFDEV(pdev), pretend_value);
3269     if (lm_status == LM_STATUS_SUCCESS) {
3270         pdev->flr_stats.dq_usage_counter = REG_WAIT_VERIFY_VAL(PFDEV(pdev), DORQ_REG_VF_USAGE_CNT, 0, wait_ms);
3271         lm_pretend_func(PFDEV(pdev), ABS_FUNC_ID(pdev));
3272         DbgMessage(pdev, FATAL, "%d*%dms waiting for DQ per vf usage counter\n", pdev->flr_stats.dq_usage_counter, DEFAULT_WAIT_INTERVAL_MICSEC);
3273     } else {
3274         DbgMessage(pdev, FATAL,"lm_pretend_func(%x) returns %d\n",pretend_value,lm_status);
3275         DbgMessage(pdev, FATAL, "VF[%d]: could not read DORQ_REG_VF_USAGE_CNT\n", ABS_VFID(pdev));
3276         return lm_status;
3277     }
3278 
3279 /*  4.  Activate the FW cleanup process by activating AggInt in the FW with GRC. Set the bit of the relevant function in the AggInt bitmask,
3280         to indicate to the FW which function is being cleaned. Wait for the per-function completion indication in the Cstorm RAM
3281 */
3282     function_for_clean_up = 8 + vf_info->abs_vf_id;
3283     cleanup_complete = 0xFFFFFFFF;
3284     LM_INTMEM_READ32(PFDEV(pdev),CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(function_for_clean_up),&cleanup_complete, BAR_CSTRORM_INTMEM);
3285     if (cleanup_complete) {
3286         DbgMessage(pdev, FATAL, "CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET is %x",cleanup_complete);
3287         DbgBreak();
3288     }
3289 
3290     final_cleanup.command = (XSTORM_AGG_INT_FINAL_CLEANUP_INDEX << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM;
3291     final_cleanup.command |= (XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE;
3292     final_cleanup.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
3293     final_cleanup.command |= (function_for_clean_up << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX;
3294 
3295     DbgMessage(pdev, FATAL, "Final cleanup\n");
3296     REG_WR(PFDEV(pdev),XSDM_REG_OPERATION_GEN, final_cleanup.command);
3297     pdev->flr_stats.final_cleanup_complete = REG_WAIT_VERIFY_VAL(PFDEV(pdev), BAR_CSTRORM_INTMEM + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(function_for_clean_up), 1, wait_ms);
3298     DbgMessage(pdev, FATAL, "%d*%dms waiting for final cleanup compete\n", pdev->flr_stats.final_cleanup_complete, DEFAULT_WAIT_INTERVAL_MICSEC);
3299     /* Lets cleanup for next FLR final-cleanup... */
3300     LM_INTMEM_WRITE32(PFDEV(pdev),CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(function_for_clean_up),0, BAR_CSTRORM_INTMEM);
3301 
3302 
3303 /*  5.  ATC cleanup. This process will include the following steps (note that ATC will not be available for phase2 of the
3304         integration and the following should be added only in phase3):
3305     a.  Optionally, wait 2 ms. This is not a must. The driver can start polling (next steps) immediately,
3306         but take into account that it may take time till the done indications will be set.
3307     b.  Wait until INVALIDATION_DONE[function] = 1
3308     c.  Write-clear INVALIDATION_DONE[function] */
3309 
3310 
3311 /*  6.  Verify PBF cleanup. Do the following for all PBF queues (queues 0,1,4, that will be indicated below with N):
3312     a.  Make sure PBF command-queue is flushed: Read pN_tq_occupancy. Let's say that the value is X.
3313         This number indicates the number of occupied transmission-queue lines.
3314         Poll on pN_tq_occupancy and pN_tq_lines_freed_cnt until one of the following:
3315             i.  pN_tq_occupancy is 0 (queue is empty). OR
3316             ii. pN_tq_lines_freed_cnt equals has advanced (cyclically) by X (all lines that were in the queue were processed). */
3317 
3318     for (idx = 0; idx < 3; idx++) {
3319         u32_t tq_to_free;
3320         u32_t tq_freed_cnt_start;
3321         u32_t tq_occ;
3322         u32_t tq_freed_cnt_last;
3323         u32_t pbf_reg_pN_tq_occupancy = 0;
3324         u32_t pbf_reg_pN_tq_lines_freed_cnt = 0;
3325 
3326         switch (idx) {
3327         case 0:
3328             pbf_reg_pN_tq_occupancy = (CHIP_IS_E3B0(pdev))? PBF_REG_TQ_OCCUPANCY_Q0 : PBF_REG_P0_TQ_OCCUPANCY;
3329             pbf_reg_pN_tq_lines_freed_cnt = (CHIP_IS_E3B0(pdev)) ? PBF_REG_TQ_LINES_FREED_CNT_Q0 : PBF_REG_P0_TQ_LINES_FREED_CNT;
3330             break;
3331         case 1:
3332             pbf_reg_pN_tq_occupancy = (CHIP_IS_E3B0(pdev)) ? PBF_REG_TQ_OCCUPANCY_Q1 : PBF_REG_P1_TQ_OCCUPANCY;
3333             pbf_reg_pN_tq_lines_freed_cnt = (CHIP_IS_E3B0(pdev)) ? PBF_REG_TQ_LINES_FREED_CNT_Q1 : PBF_REG_P1_TQ_LINES_FREED_CNT;
3334             break;
3335         case 2:
3336             pbf_reg_pN_tq_occupancy = (CHIP_IS_E3B0(pdev)) ? PBF_REG_TQ_OCCUPANCY_LB_Q : PBF_REG_P4_TQ_OCCUPANCY;
3337             pbf_reg_pN_tq_lines_freed_cnt = (CHIP_IS_E3B0(pdev)) ? PBF_REG_TQ_LINES_FREED_CNT_LB_Q : PBF_REG_P4_TQ_LINES_FREED_CNT;
3338             break;
3339         }
3340         pdev->flr_stats.pbf_queue[idx] = 0;
3341         tq_freed_cnt_last = tq_freed_cnt_start = REG_RD(PFDEV(pdev), pbf_reg_pN_tq_lines_freed_cnt);
3342         tq_occ = tq_to_free = REG_RD(PFDEV(pdev), pbf_reg_pN_tq_occupancy);
3343         DbgMessage(pdev, FATAL, "TQ_OCCUPANCY[%d]      : s:%x\n", (idx == 2) ? 4 : idx, tq_to_free);
3344         DbgMessage(pdev, FATAL, "TQ_LINES_FREED_CNT[%d]: s:%x\n", (idx == 2) ? 4 : idx, tq_freed_cnt_start);
3345         while(tq_occ && ((u32_t)S32_SUB(tq_freed_cnt_last, tq_freed_cnt_start) < tq_to_free)) {
3346             if (pdev->flr_stats.pbf_queue[idx]++ < wait_ms/DEFAULT_WAIT_INTERVAL_MICSEC) {
3347                 mm_wait(PFDEV(pdev), DEFAULT_WAIT_INTERVAL_MICSEC);
3348                 tq_occ = REG_RD(PFDEV(pdev), pbf_reg_pN_tq_occupancy);
3349                 tq_freed_cnt_last = REG_RD(PFDEV(pdev), pbf_reg_pN_tq_lines_freed_cnt);
3350             } else {
3351                 DbgMessage(pdev, FATAL, "TQ_OCCUPANCY[%d]      : c:%x\n", (idx == 2) ? 4 : idx, tq_occ);
3352                 DbgMessage(pdev, FATAL, "TQ_LINES_FREED_CNT[%d]: c:%x\n", (idx == 2) ? 4 : idx, tq_freed_cnt_last);
3353                 DbgBreak();
3354                 break;
3355             }
3356         }
3357         DbgMessage(pdev, FATAL, "%d*%dms waiting for PBF command queue[%d] is flushed\n",
3358                     pdev->flr_stats.pbf_queue[idx], DEFAULT_WAIT_INTERVAL_MICSEC, (idx == 2) ? 4 : idx);
3359     }
3360 
3361 /*  b.  Make sure PBF transmission buffer is flushed: read pN_init_crd once and keep it in variable Y.
3362         Read pN_credit and keep it in X. Poll on pN_credit and pN_internal_crd_freed until one of the following:
3363             i.  (Y - pN_credit) is 0 (transmission buffer is empty). OR
3364             ii. pN_internal_crd_freed_cnt has advanced (cyclically) by Y-X (all transmission buffer lines that were occupied were freed).*/
3365 
3366     for (idx = 0; idx < 3; idx++) {
3367         u32_t init_crd;
3368         u32_t credit_last,credit_start;
3369         u32_t inernal_freed_crd_start;
3370         u32_t inernal_freed_crd_last = 0;
3371         u32_t pbf_reg_pN_init_crd = 0;
3372         u32_t pbf_reg_pN_credit = 0;
3373         u32_t pbf_reg_pN_internal_crd_freed = 0;
3374         switch (idx) {
3375         case 0:
3376             pbf_reg_pN_init_crd = (CHIP_IS_E3B0(pdev)) ? PBF_REG_INIT_CRD_Q0 : PBF_REG_P0_INIT_CRD;
3377             pbf_reg_pN_credit = (CHIP_IS_E3B0(pdev)) ? PBF_REG_CREDIT_Q0 : PBF_REG_P0_CREDIT;
3378             pbf_reg_pN_internal_crd_freed = (CHIP_IS_E3B0(pdev)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : PBF_REG_P0_INTERNAL_CRD_FREED_CNT;
3379             break;
3380         case 1:
3381             pbf_reg_pN_init_crd = (CHIP_IS_E3B0(pdev)) ? PBF_REG_INIT_CRD_Q1: PBF_REG_P1_INIT_CRD;
3382             pbf_reg_pN_credit = (CHIP_IS_E3B0(pdev)) ? PBF_REG_CREDIT_Q1 : PBF_REG_P1_CREDIT;
3383             pbf_reg_pN_internal_crd_freed = (CHIP_IS_E3B0(pdev)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : PBF_REG_P1_INTERNAL_CRD_FREED_CNT;
3384             break;
3385         case 2:
3386             pbf_reg_pN_init_crd = (CHIP_IS_E3B0(pdev)) ? PBF_REG_INIT_CRD_LB_Q : PBF_REG_P4_INIT_CRD;
3387             pbf_reg_pN_credit = (CHIP_IS_E3B0(pdev)) ? PBF_REG_CREDIT_LB_Q : PBF_REG_P4_CREDIT;
3388             pbf_reg_pN_internal_crd_freed = (CHIP_IS_E3B0(pdev)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : PBF_REG_P4_INTERNAL_CRD_FREED_CNT;
3389             break;
3390         }
3391         pdev->flr_stats.pbf_transmit_buffer[idx] = 0;
3392         inernal_freed_crd_last = inernal_freed_crd_start = REG_RD(PFDEV(pdev), pbf_reg_pN_internal_crd_freed);
3393         credit_last = credit_start = REG_RD(PFDEV(pdev), pbf_reg_pN_credit);
3394         init_crd = REG_RD(PFDEV(pdev), pbf_reg_pN_init_crd);
3395         DbgMessage(pdev, FATAL, "INIT CREDIT[%d]       : %x\n", (idx == 2) ? 4 : idx, init_crd);
3396         DbgMessage(pdev, FATAL, "CREDIT[%d]            : s:%x\n", (idx == 2) ? 4 : idx, credit_start);
3397         DbgMessage(pdev, FATAL, "INTERNAL_CRD_FREED[%d]: s:%x\n", (idx == 2) ? 4 : idx, inernal_freed_crd_start);
3398         while ((credit_last != init_crd)
3399                && (u32_t)S32_SUB(inernal_freed_crd_last, inernal_freed_crd_start) < (init_crd - credit_start)) {
3400             if (pdev->flr_stats.pbf_transmit_buffer[idx]++ < wait_ms/DEFAULT_WAIT_INTERVAL_MICSEC) {
3401                 mm_wait(PFDEV(pdev), DEFAULT_WAIT_INTERVAL_MICSEC);
3402                 credit_last = REG_RD(PFDEV(pdev), pbf_reg_pN_credit);
3403                 inernal_freed_crd_last = REG_RD(PFDEV(pdev), pbf_reg_pN_internal_crd_freed);
3404             } else {
3405                 DbgMessage(pdev, FATAL, "CREDIT[%d]            : c:%x\n", (idx == 2) ? 4 : idx, credit_last);
3406                 DbgMessage(pdev, FATAL, "INTERNAL_CRD_FREED[%d]: c:%x\n", (idx == 2) ? 4 : idx, inernal_freed_crd_last);
3407                 DbgBreak();
3408                 break;
3409             }
3410         }
3411         DbgMessage(pdev, FATAL, "%d*%dms waiting for PBF transmission buffer[%d] is flushed\n",
3412                     pdev->flr_stats.pbf_transmit_buffer[idx], DEFAULT_WAIT_INTERVAL_MICSEC, (idx == 2) ? 4 : idx);
3413     }
3414 
3415 /*  7.  Wait for 100ms in order to make sure that the chip is clean, including all PCI related paths
3416         (in Emulation the driver can wait for 10ms*EmulationFactor, i.e.: 20s). This is especially required if FW doesn't implement
3417         the flows in Optional Operations (future enhancements).) */
3418     mm_wait(pdev, 10000*factor);
3419 
3420 /*  9.  Initialize the function as usual this should include also re-enabling the function in all the HW blocks and Storms that
3421     were disabled by the MCP and cleaning relevant per-function information in the chip (internal RAM related information, IGU memory etc.).
3422         a.  In case of VF, PF resources that were allocated for previous VF can be re-used by the new VF. If there are resources
3423             that are not needed by the new VF then they should be cleared.
3424         b.  Note that as long as slow-path prod/cons update to Xstorm is not atomic, they must be cleared by the driver before setting
3425             the function to "enable" in the Xstorm.
3426         c.  Don't forget to enable the VF in the PXP or the DMA operation for PF in the PXP. */
3427 
3428 
3429     if (IS_VFDEV(pdev))
3430     {
3431 #ifdef VF_INVOLVED
3432         lm_set_con_state(pdev, LM_VF_Q_ID_TO_PF_CID(pdev, vf_info,0), LM_CON_STATE_CLOSE);
3433 #endif
3434     }
3435 
3436     vf_info->was_flred = FALSE;
3437 
3438     return lm_status;
3439 }
3440 
lm_pf_fl_vf_reset_set_inprogress(struct _lm_device_t * pdev,u8_t abs_vf_id)3441 void lm_pf_fl_vf_reset_set_inprogress(struct _lm_device_t * pdev, u8_t abs_vf_id)
3442 {
3443     lm_vf_info_t * vf_info = lm_pf_find_vf_info_by_abs_id(pdev, (u8_t)abs_vf_id);
3444 
3445     DbgMessage(pdev, WARN, "lm_pf_fl_vf_reset_set_inprogress(%d)\n",abs_vf_id);
3446     if (!vf_info) {
3447         DbgBreakMsg("lm_pf_fl_vf_reset_set_inprogress: vf_info is not found\n");
3448         return;
3449     } else {
3450         vf_info->was_flred = TRUE;
3451     }
3452 }
3453 
lm_pf_fl_vf_reset_clear_inprogress(struct _lm_device_t * pdev,u8_t abs_vf_id)3454 void lm_pf_fl_vf_reset_clear_inprogress(struct _lm_device_t *pdev, u8_t abs_vf_id)
3455 {
3456     lm_vf_info_t * vf_info = lm_pf_find_vf_info_by_abs_id(pdev, (u8_t)abs_vf_id);
3457 
3458     DbgMessage(pdev, WARN, "lm_pf_fl_vf_reset_clear_inprogress(%d)\n",abs_vf_id);
3459     if (!vf_info) {
3460         DbgBreakMsg("lm_pf_fl_vf_reset_clear_inprogress: vf_info is not found\n");
3461         return;
3462     } else {
3463         vf_info->was_flred = FALSE;
3464     }
3465 }
3466 
lm_pf_fl_vf_reset_is_inprogress(struct _lm_device_t * pdev,u8_t abs_vf_id)3467 u8_t lm_pf_fl_vf_reset_is_inprogress(struct _lm_device_t *pdev, u8_t abs_vf_id)
3468 {
3469     lm_vf_info_t * vf_info = lm_pf_find_vf_info_by_abs_id(pdev, (u8_t)abs_vf_id);
3470 
3471     DbgMessage(pdev, WARN, "lm_pf_fl_vf_reset_clear_inprogress(%d)\n",abs_vf_id);
3472     if (!vf_info) {
3473         DbgBreakMsg("lm_pf_fl_vf_reset_is_inprogress: vf_info is not found\n");
3474         return FALSE;
3475     } else {
3476         return vf_info->was_flred;
3477     }
3478 }
3479 
lm_pf_finally_release_vf(struct _lm_device_t * pdev,lm_vf_info_t * vf_info)3480 lm_status_t lm_pf_finally_release_vf(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
3481 {
3482     lm_status_t lm_status = LM_STATUS_SUCCESS;
3483     u8_t function_fw_id;
3484     u8_t sb_idx;
3485     u8_t q_idx;
3486     u32_t cid;
3487 
3488     DbgBreakIf(!(pdev && vf_info));
3489     if (vf_info->vf_si_state == PF_SI_VF_INITIALIZED) {
3490         DbgMessage(pdev, WARN, "VF[%d%d)] is not closed yet\n", vf_info->relative_vf_id, vf_info->abs_vf_id);
3491         MM_ACQUIRE_VFS_STATS_LOCK(pdev);
3492         if (vf_info->vf_stats.vf_stats_state != VF_STATS_REQ_IN_PROCESSING) {
3493             vf_info->vf_stats.vf_stats_state = VF_STATS_REQ_READY;
3494         }
3495         vf_info->vf_stats.stop_collect_stats = TRUE;
3496         vf_info->vf_stats.vf_stats_flag = 0;
3497         MM_RELEASE_VFS_STATS_LOCK(pdev);
3498 
3499         lm_status = lm_pf_vf_wait_for_stats_ready(pdev, vf_info);
3500         if (lm_status != LM_STATUS_SUCCESS) {
3501             DbgBreak();
3502         } else {
3503             vf_info->vf_stats.vf_stats_state = VF_STATS_NONE;
3504         }
3505 
3506         for (q_idx = 0; q_idx < vf_info->vf_si_num_of_active_q; q_idx++) {
3507             cid = LM_VF_Q_ID_TO_PF_CID(pdev, vf_info, q_idx);
3508             if (vf_info->was_malicious || vf_info->was_flred)
3509             {
3510                 lm_set_con_state(pdev, cid, LM_CON_STATE_CLOSE);
3511             }
3512             else
3513             {
3514                 lm_status = lm_close_eth_con(pdev, cid, TRUE);
3515             }
3516         }
3517         vf_info->vf_si_num_of_active_q = 0;
3518 
3519 //        if (!(vf_info->was_malicious || vf_info->was_flred))
3520         {
3521             lm_pf_disable_vf_igu_int(pdev, vf_info->abs_vf_id);
3522             /*
3523             Disable the function in STORMs
3524             */
3525             function_fw_id = 8 + vf_info->abs_vf_id;
3526 
3527             LM_INTMEM_WRITE8(PFDEV(pdev), XSTORM_FUNC_EN_OFFSET(function_fw_id), 0, BAR_XSTRORM_INTMEM);
3528             LM_INTMEM_WRITE8(PFDEV(pdev), CSTORM_FUNC_EN_OFFSET(function_fw_id), 0, BAR_CSTRORM_INTMEM);
3529             LM_INTMEM_WRITE8(PFDEV(pdev), TSTORM_FUNC_EN_OFFSET(function_fw_id), 0, BAR_TSTRORM_INTMEM);
3530             LM_INTMEM_WRITE8(PFDEV(pdev), USTORM_FUNC_EN_OFFSET(function_fw_id), 0, BAR_USTRORM_INTMEM);
3531 
3532             for (sb_idx = 0; sb_idx < vf_info->num_sbs; sb_idx++) {
3533                 lm_clear_non_def_status_block(pdev,  LM_FW_VF_SB_ID(vf_info, sb_idx));
3534             }
3535 
3536             for (q_idx = 0; q_idx < vf_info->num_rxqs; q_idx++) {
3537                 u32_t reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + LM_FW_VF_QZONE_ID(vf_info,q_idx) * 4;
3538                 u32_t val = 0;
3539                 REG_WR(PFDEV(pdev), reg, val);
3540             }
3541         }
3542         vf_info->vf_si_state = PF_SI_ACQUIRED;
3543     }
3544 
3545     if (vf_info->vf_si_state == PF_SI_ACQUIRED) {
3546         DbgMessage(pdev, WARN, "VF[%d%d)] is not released yet\n", vf_info->relative_vf_id, vf_info->abs_vf_id);
3547         vf_info->vf_si_state = PF_SI_WAIT_FOR_ACQUIRING_REQUEST;
3548     }
3549     return lm_status;
3550 }
3551 
lm_pf_tpa_send_vf_ramrod(struct _lm_device_t * pdev,lm_vf_info_t * vf_info,u32_t q_idx,u8_t update_ipv4,u8_t update_ipv6)3552 lm_status_t lm_pf_tpa_send_vf_ramrod(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, u32_t q_idx, u8_t update_ipv4, u8_t update_ipv6)
3553 {
3554     // Add ramrod send code
3555     lm_vf_chain_info_t*     tpa_chain = &vf_info->vf_chains[q_idx];
3556     lm_status_t             lm_status = LM_STATUS_SUCCESS;
3557     lm_address_t            q_addr;
3558     u32_t                   vf_cid_of_pf = 0;
3559     u16_t                   type = 0;
3560 
3561     if((CHK_NULL(tpa_chain->tpa_ramrod_data_virt)))
3562     {
3563         DbgBreakMsg("lm_tpa_send_ramrod : invalid paramters");
3564         return LM_STATUS_FAILURE;
3565     }
3566 
3567     tpa_chain->tpa_ramrod_data_virt->update_ipv4 = update_ipv4;
3568     tpa_chain->tpa_ramrod_data_virt->update_ipv6 = update_ipv6;
3569 
3570     tpa_chain->tpa_ramrod_data_virt->client_id     = LM_FW_VF_CLI_ID(vf_info, q_idx);
3571     /* maximal TPA queues allowed for this client */
3572     tpa_chain->tpa_ramrod_data_virt->max_tpa_queues        = LM_TPA_MAX_AGGS;
3573     /* The maximal number of SGEs that can be used for one packet. depends on MTU and SGE size. must be 0 if SGEs are disabled */
3574     tpa_chain->tpa_ramrod_data_virt->max_sges_for_packet   = DIV_ROUND_UP_BITS(tpa_chain->mtu, LM_TPA_PAGE_BITS);
3575     /* Size of the buffers pointed by SGEs */
3576     ASSERT_STATIC(LM_TPA_PAGE_SIZE < MAX_VARIABLE_VALUE(tpa_chain->tpa_ramrod_data_virt->sge_buff_size));
3577     tpa_chain->tpa_ramrod_data_virt->sge_buff_size         = mm_cpu_to_le16(LM_TPA_PAGE_SIZE);
3578     /* maximal size for the aggregated TPA packets, reprted by the host */
3579     ASSERT_STATIC((LM_TPA_MAX_AGG_SIZE * LM_TPA_PAGE_SIZE) < MAX_VARIABLE_VALUE(tpa_chain->tpa_ramrod_data_virt->max_agg_size));
3580     tpa_chain->tpa_ramrod_data_virt->max_agg_size          = mm_cpu_to_le16(LM_TPA_MAX_AGG_SIZE * LM_TPA_PAGE_SIZE);
3581 
3582     q_addr.as_u64 = tpa_chain->sge_addr;
3583     //u32_t sge_page_base_lo /* The address to fetch the next sges from (low) */;
3584     tpa_chain->tpa_ramrod_data_virt->sge_page_base_lo      = mm_cpu_to_le32(q_addr.as_u32.low);
3585     //u32_t sge_page_base_hi /* The address to fetch the next sges from (high) */;
3586     tpa_chain->tpa_ramrod_data_virt->sge_page_base_hi      = mm_cpu_to_le32(q_addr.as_u32.high);
3587     //u16_t sge_pause_thr_low /* number of remaining sges under which, we send pause message */;
3588     tpa_chain->tpa_ramrod_data_virt->sge_pause_thr_low     = mm_cpu_to_le16(LM_TPA_SGE_PAUSE_THR_LOW);
3589     //u16_t sge_pause_thr_high /* number of remaining sges above which, we send un-pause message */;
3590     tpa_chain->tpa_ramrod_data_virt->sge_pause_thr_high    = mm_cpu_to_le16(LM_TPA_SGE_PAUSE_THR_HIGH);
3591 
3592     vf_cid_of_pf = LM_VF_Q_ID_TO_PF_CID(pdev, vf_info, q_idx);
3593     type = (ETH_CONNECTION_TYPE | ((8 + vf_info->abs_vf_id) << SPE_HDR_T_FUNCTION_ID_SHIFT));
3594 
3595     tpa_chain->tpa_ramrod_data_virt->complete_on_both_clients = TRUE;
3596 
3597     lm_status = lm_sq_post(pdev,
3598                            vf_cid_of_pf,
3599                            RAMROD_CMD_ID_ETH_TPA_UPDATE,
3600                            CMD_PRIORITY_MEDIUM,
3601                            type,
3602                            *(u64_t *)&(tpa_chain->tpa_ramrod_data_phys));
3603 
3604     DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
3605 
3606     return lm_status;
3607 }
3608 
lm_is_vf_rsc_supported(struct _lm_device_t * pdev)3609 u8_t lm_is_vf_rsc_supported(struct _lm_device_t *pdev)
3610 {
3611     u8_t is_rsc_supported = TRUE;
3612     if (IS_VFDEV(pdev)) {
3613         if (IS_SW_CHANNEL_VIRT_MODE(pdev))
3614         {
3615             struct pf_vf_msg_acquire_resp * presp = (struct pf_vf_msg_acquire_resp *)pdev->pf_vf_acquiring_resp;
3616             if (!(presp->pfdev_info.pf_cap & PFVF_CAP_TPA)) {
3617                 is_rsc_supported = FALSE;
3618             }
3619         }
3620         else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
3621         {
3622             struct pfvf_acquire_resp_tlv * presp;
3623             presp = (struct pfvf_acquire_resp_tlv *)pdev->pf_vf_acquiring_resp;
3624             if (!(presp->pfdev_info.pf_cap & PFVF_CAP_TPA_UPDATE)) {
3625                 is_rsc_supported = FALSE;
3626             }
3627         }
3628         else
3629         {
3630             DbgBreak();
3631         }
3632     }
3633     return is_rsc_supported;
3634 }
3635 
lm_pf_init_vf_filters(struct _lm_device_t * pdev,lm_vf_info_t * vf_info)3636 void lm_pf_init_vf_filters(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
3637 {
3638     if ((vf_info == NULL) || (pdev == NULL))
3639     {
3640         DbgBreakMsg("lm_pf_init_vf_filters : invalid paramters");
3641     }
3642     else
3643     {
3644         vf_info->is_promiscuous_mode_restricted = (pdev->params.vf_promiscuous_mode_restricted != 0);
3645     }
3646     return;
3647 }
3648 
lm_pf_allow_vf_promiscuous_mode(lm_vf_info_t * vf_info,u8_t is_allowed)3649 void lm_pf_allow_vf_promiscuous_mode(lm_vf_info_t *vf_info, u8_t is_allowed)
3650 {
3651     if (vf_info == NULL)
3652     {
3653         DbgBreakMsg("lm_pf_allow_vf_promiscuous_mode : invalid paramters");
3654     }
3655     else
3656     {
3657         vf_info->is_promiscuous_mode_restricted = !is_allowed;
3658     }
3659     return;
3660 }
3661 
lm_pf_int_vf_igu_sb_cleanup(lm_device_t * pdev,lm_vf_info_t * vf_info,u8_t vf_chain_id)3662 void lm_pf_int_vf_igu_sb_cleanup(lm_device_t *pdev, lm_vf_info_t *vf_info, u8_t vf_chain_id)
3663 {
3664     struct igu_regular  cmd_data = {0};
3665     struct igu_ctrl_reg cmd_ctrl = {0};
3666     u32_t igu_addr_ack           = 0;
3667     u32_t sb_bit                 = 0;
3668     u32_t cnt                    = 100;
3669     u8_t  igu_sb_id              = 0;
3670 #ifdef _VBD_CMD_
3671     return;
3672 #endif
3673 
3674     /* Not supported in backward compatible mode! */
3675     if (INTR_BLK_MODE(pdev) == INTR_BLK_MODE_BC)
3676     {
3677         return;
3678     }
3679 
3680     if ((vf_info == NULL) || (pdev == NULL))
3681     {
3682         DbgBreakMsg("lm_pf_int_vf_igu_sb_cleanup : invalid paramters");
3683         return;
3684     }
3685 
3686     if (IS_VFDEV(pdev))
3687     {
3688         DbgBreakMsg("lm_pf_int_vf_igu_sb_cleanup : only available on Host/PF side");
3689         return;
3690     }
3691 
3692     igu_sb_id = LM_VF_IGU_SB_ID(vf_info,vf_chain_id);
3693     igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (igu_sb_id/32)*4;
3694     sb_bit =  1 << (igu_sb_id%32);
3695 
3696     /* Cleanup can be done only via GRC access using the producer update command */
3697     cmd_data.sb_id_and_flags =
3698         ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
3699           IGU_REGULAR_CLEANUP_SET |
3700           IGU_REGULAR_BCLEANUP);
3701 
3702     cmd_ctrl.ctrl_data =
3703         (((IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id) << IGU_CTRL_REG_ADDRESS_SHIFT) |
3704          (vf_info->abs_vf_id << IGU_CTRL_REG_FID_SHIFT) |
3705          (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
3706 
3707     REG_WR(pdev, IGU_REG_COMMAND_REG_32LSB_DATA, cmd_data.sb_id_and_flags);
3708     REG_WR(pdev, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl.ctrl_data);
3709 
3710     /* wait for clean up to finish */
3711     while (!(REG_RD(pdev, igu_addr_ack) & sb_bit) && --cnt)
3712     {
3713         mm_wait(pdev, 10);
3714     }
3715 
3716     if (!(REG_RD(pdev, igu_addr_ack) & sb_bit))
3717     {
3718         DbgMessage(pdev, FATAL, "Unable to finish IGU cleanup - set: igu_sb_id %d offset %d bit %d (cnt %d)\n",
3719                     igu_sb_id, igu_sb_id/32, igu_sb_id%32, cnt);
3720     }
3721 
3722     /* Now we clear the cleanup-bit... same command without cleanup_set... */
3723     cmd_data.sb_id_and_flags =
3724         ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
3725           IGU_REGULAR_BCLEANUP);
3726 
3727 
3728     REG_WR(pdev, IGU_REG_COMMAND_REG_32LSB_DATA, cmd_data.sb_id_and_flags);
3729     REG_WR(pdev, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl.ctrl_data);
3730 
3731     /* wait for clean up to finish */
3732     while ((REG_RD(pdev, igu_addr_ack) & sb_bit) && --cnt)
3733     {
3734         mm_wait(pdev, 10);
3735     }
3736 
3737     if ((REG_RD(pdev, igu_addr_ack) & sb_bit))
3738     {
3739         DbgMessage(pdev, FATAL, "Unable to finish IGU cleanup - clear: igu_sb_id %d offset %d bit %d (cnt %d)\n",
3740                     igu_sb_id, igu_sb_id/32, igu_sb_id%32, cnt);
3741     }
3742 }
3743 
3744 #endif
3745 /* */
3746