xref: /linux/drivers/net/ethernet/brocade/bna/bna_tx_rx.c (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12   */
13 /*
14  * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 #include "bna.h"
19 #include "bfi.h"
20 
21 /* IB */
22 static void
23 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
24 {
25 	ib->coalescing_timeo = coalescing_timeo;
26 	ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
27 				(u32)ib->coalescing_timeo, 0);
28 }
29 
30 /* RXF */
31 
32 #define bna_rxf_vlan_cfg_soft_reset(rxf)				\
33 do {									\
34 	(rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;		\
35 	(rxf)->vlan_strip_pending = true;				\
36 } while (0)
37 
38 #define bna_rxf_rss_cfg_soft_reset(rxf)					\
39 do {									\
40 	if ((rxf)->rss_status == BNA_STATUS_T_ENABLED)			\
41 		(rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING |		\
42 				BNA_RSS_F_CFG_PENDING |			\
43 				BNA_RSS_F_STATUS_PENDING);		\
44 } while (0)
45 
46 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
47 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
48 static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
49 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
50 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
51 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
52 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
53 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
54 					enum bna_cleanup_type cleanup);
55 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
56 					enum bna_cleanup_type cleanup);
57 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
58 					enum bna_cleanup_type cleanup);
59 
60 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
61 			enum bna_rxf_event);
62 bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
63 			enum bna_rxf_event);
64 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
65 			enum bna_rxf_event);
66 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
67 			enum bna_rxf_event);
68 bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
69 			enum bna_rxf_event);
70 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
71 			enum bna_rxf_event);
72 
73 static void
74 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
75 {
76 	call_rxf_stop_cbfn(rxf);
77 }
78 
79 static void
80 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
81 {
82 	switch (event) {
83 	case RXF_E_START:
84 		if (rxf->flags & BNA_RXF_F_PAUSED) {
85 			bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
86 			call_rxf_start_cbfn(rxf);
87 		} else
88 			bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
89 		break;
90 
91 	case RXF_E_STOP:
92 		call_rxf_stop_cbfn(rxf);
93 		break;
94 
95 	case RXF_E_FAIL:
96 		/* No-op */
97 		break;
98 
99 	case RXF_E_CONFIG:
100 		call_rxf_cam_fltr_cbfn(rxf);
101 		break;
102 
103 	case RXF_E_PAUSE:
104 		rxf->flags |= BNA_RXF_F_PAUSED;
105 		call_rxf_pause_cbfn(rxf);
106 		break;
107 
108 	case RXF_E_RESUME:
109 		rxf->flags &= ~BNA_RXF_F_PAUSED;
110 		call_rxf_resume_cbfn(rxf);
111 		break;
112 
113 	default:
114 		bfa_sm_fault(event);
115 	}
116 }
117 
118 static void
119 bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
120 {
121 	call_rxf_pause_cbfn(rxf);
122 }
123 
124 static void
125 bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
126 {
127 	switch (event) {
128 	case RXF_E_STOP:
129 	case RXF_E_FAIL:
130 		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
131 		break;
132 
133 	case RXF_E_CONFIG:
134 		call_rxf_cam_fltr_cbfn(rxf);
135 		break;
136 
137 	case RXF_E_RESUME:
138 		rxf->flags &= ~BNA_RXF_F_PAUSED;
139 		bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
140 		break;
141 
142 	default:
143 		bfa_sm_fault(event);
144 	}
145 }
146 
147 static void
148 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
149 {
150 	if (!bna_rxf_cfg_apply(rxf)) {
151 		/* No more pending config updates */
152 		bfa_fsm_set_state(rxf, bna_rxf_sm_started);
153 	}
154 }
155 
156 static void
157 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
158 {
159 	switch (event) {
160 	case RXF_E_STOP:
161 		bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
162 		break;
163 
164 	case RXF_E_FAIL:
165 		bna_rxf_cfg_reset(rxf);
166 		call_rxf_start_cbfn(rxf);
167 		call_rxf_cam_fltr_cbfn(rxf);
168 		call_rxf_resume_cbfn(rxf);
169 		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
170 		break;
171 
172 	case RXF_E_CONFIG:
173 		/* No-op */
174 		break;
175 
176 	case RXF_E_PAUSE:
177 		rxf->flags |= BNA_RXF_F_PAUSED;
178 		call_rxf_start_cbfn(rxf);
179 		bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
180 		break;
181 
182 	case RXF_E_FW_RESP:
183 		if (!bna_rxf_cfg_apply(rxf)) {
184 			/* No more pending config updates */
185 			bfa_fsm_set_state(rxf, bna_rxf_sm_started);
186 		}
187 		break;
188 
189 	default:
190 		bfa_sm_fault(event);
191 	}
192 }
193 
194 static void
195 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
196 {
197 	call_rxf_start_cbfn(rxf);
198 	call_rxf_cam_fltr_cbfn(rxf);
199 	call_rxf_resume_cbfn(rxf);
200 }
201 
202 static void
203 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
204 {
205 	switch (event) {
206 	case RXF_E_STOP:
207 	case RXF_E_FAIL:
208 		bna_rxf_cfg_reset(rxf);
209 		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
210 		break;
211 
212 	case RXF_E_CONFIG:
213 		bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
214 		break;
215 
216 	case RXF_E_PAUSE:
217 		rxf->flags |= BNA_RXF_F_PAUSED;
218 		if (!bna_rxf_fltr_clear(rxf))
219 			bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
220 		else
221 			bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
222 		break;
223 
224 	default:
225 		bfa_sm_fault(event);
226 	}
227 }
228 
229 static void
230 bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
231 {
232 }
233 
234 static void
235 bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
236 {
237 	switch (event) {
238 	case RXF_E_FAIL:
239 		bna_rxf_cfg_reset(rxf);
240 		call_rxf_pause_cbfn(rxf);
241 		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
242 		break;
243 
244 	case RXF_E_FW_RESP:
245 		if (!bna_rxf_fltr_clear(rxf)) {
246 			/* No more pending CAM entries to clear */
247 			bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
248 		}
249 		break;
250 
251 	default:
252 		bfa_sm_fault(event);
253 	}
254 }
255 
256 static void
257 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
258 {
259 }
260 
261 static void
262 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
263 {
264 	switch (event) {
265 	case RXF_E_FAIL:
266 	case RXF_E_FW_RESP:
267 		bna_rxf_cfg_reset(rxf);
268 		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
269 		break;
270 
271 	default:
272 		bfa_sm_fault(event);
273 	}
274 }
275 
276 static void
277 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
278 		enum bfi_enet_h2i_msgs req_type)
279 {
280 	struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
281 
282 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
283 	req->mh.num_entries = htons(
284 	bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
285 	memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
286 	bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
287 		sizeof(struct bfi_enet_ucast_req), &req->mh);
288 	bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
289 }
290 
291 static void
292 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
293 {
294 	struct bfi_enet_mcast_add_req *req =
295 		&rxf->bfi_enet_cmd.mcast_add_req;
296 
297 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
298 		0, rxf->rx->rid);
299 	req->mh.num_entries = htons(
300 	bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
301 	memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
302 	bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
303 		sizeof(struct bfi_enet_mcast_add_req), &req->mh);
304 	bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
305 }
306 
307 static void
308 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
309 {
310 	struct bfi_enet_mcast_del_req *req =
311 		&rxf->bfi_enet_cmd.mcast_del_req;
312 
313 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
314 		0, rxf->rx->rid);
315 	req->mh.num_entries = htons(
316 	bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
317 	req->handle = htons(handle);
318 	bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
319 		sizeof(struct bfi_enet_mcast_del_req), &req->mh);
320 	bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
321 }
322 
323 static void
324 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
325 {
326 	struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
327 
328 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
329 		BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
330 	req->mh.num_entries = htons(
331 		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
332 	req->enable = status;
333 	bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
334 		sizeof(struct bfi_enet_enable_req), &req->mh);
335 	bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
336 }
337 
338 static void
339 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
340 {
341 	struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
342 
343 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
344 		BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
345 	req->mh.num_entries = htons(
346 		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
347 	req->enable = status;
348 	bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
349 		sizeof(struct bfi_enet_enable_req), &req->mh);
350 	bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
351 }
352 
353 static void
354 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
355 {
356 	struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
357 	int i;
358 	int j;
359 
360 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
361 		BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
362 	req->mh.num_entries = htons(
363 		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
364 	req->block_idx = block_idx;
365 	for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
366 		j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
367 		if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
368 			req->bit_mask[i] =
369 				htonl(rxf->vlan_filter_table[j]);
370 		else
371 			req->bit_mask[i] = 0xFFFFFFFF;
372 	}
373 	bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
374 		sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
375 	bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
376 }
377 
378 static void
379 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
380 {
381 	struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
382 
383 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
384 		BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
385 	req->mh.num_entries = htons(
386 		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
387 	req->enable = rxf->vlan_strip_status;
388 	bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
389 		sizeof(struct bfi_enet_enable_req), &req->mh);
390 	bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
391 }
392 
393 static void
394 bna_bfi_rit_cfg(struct bna_rxf *rxf)
395 {
396 	struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
397 
398 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
399 		BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
400 	req->mh.num_entries = htons(
401 		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
402 	req->size = htons(rxf->rit_size);
403 	memcpy(&req->table[0], rxf->rit, rxf->rit_size);
404 	bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
405 		sizeof(struct bfi_enet_rit_req), &req->mh);
406 	bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
407 }
408 
409 static void
410 bna_bfi_rss_cfg(struct bna_rxf *rxf)
411 {
412 	struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
413 	int i;
414 
415 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
416 		BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
417 	req->mh.num_entries = htons(
418 		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
419 	req->cfg.type = rxf->rss_cfg.hash_type;
420 	req->cfg.mask = rxf->rss_cfg.hash_mask;
421 	for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
422 		req->cfg.key[i] =
423 			htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
424 	bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
425 		sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
426 	bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
427 }
428 
429 static void
430 bna_bfi_rss_enable(struct bna_rxf *rxf)
431 {
432 	struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
433 
434 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
435 		BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
436 	req->mh.num_entries = htons(
437 		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
438 	req->enable = rxf->rss_status;
439 	bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
440 		sizeof(struct bfi_enet_enable_req), &req->mh);
441 	bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
442 }
443 
444 /* This function gets the multicast MAC that has already been added to CAM */
445 static struct bna_mac *
446 bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
447 {
448 	struct bna_mac *mac;
449 	struct list_head *qe;
450 
451 	list_for_each(qe, &rxf->mcast_active_q) {
452 		mac = (struct bna_mac *)qe;
453 		if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
454 			return mac;
455 	}
456 
457 	list_for_each(qe, &rxf->mcast_pending_del_q) {
458 		mac = (struct bna_mac *)qe;
459 		if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
460 			return mac;
461 	}
462 
463 	return NULL;
464 }
465 
466 static struct bna_mcam_handle *
467 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
468 {
469 	struct bna_mcam_handle *mchandle;
470 	struct list_head *qe;
471 
472 	list_for_each(qe, &rxf->mcast_handle_q) {
473 		mchandle = (struct bna_mcam_handle *)qe;
474 		if (mchandle->handle == handle)
475 			return mchandle;
476 	}
477 
478 	return NULL;
479 }
480 
481 static void
482 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
483 {
484 	struct bna_mac *mcmac;
485 	struct bna_mcam_handle *mchandle;
486 
487 	mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
488 	mchandle = bna_rxf_mchandle_get(rxf, handle);
489 	if (mchandle == NULL) {
490 		mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
491 		mchandle->handle = handle;
492 		mchandle->refcnt = 0;
493 		list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
494 	}
495 	mchandle->refcnt++;
496 	mcmac->handle = mchandle;
497 }
498 
499 static int
500 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
501 		enum bna_cleanup_type cleanup)
502 {
503 	struct bna_mcam_handle *mchandle;
504 	int ret = 0;
505 
506 	mchandle = mac->handle;
507 	if (mchandle == NULL)
508 		return ret;
509 
510 	mchandle->refcnt--;
511 	if (mchandle->refcnt == 0) {
512 		if (cleanup == BNA_HARD_CLEANUP) {
513 			bna_bfi_mcast_del_req(rxf, mchandle->handle);
514 			ret = 1;
515 		}
516 		list_del(&mchandle->qe);
517 		bfa_q_qe_init(&mchandle->qe);
518 		bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
519 	}
520 	mac->handle = NULL;
521 
522 	return ret;
523 }
524 
525 static int
526 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
527 {
528 	struct bna_mac *mac = NULL;
529 	struct list_head *qe;
530 	int ret;
531 
532 	/* Delete multicast entries previousely added */
533 	while (!list_empty(&rxf->mcast_pending_del_q)) {
534 		bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
535 		bfa_q_qe_init(qe);
536 		mac = (struct bna_mac *)qe;
537 		ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
538 		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
539 		if (ret)
540 			return ret;
541 	}
542 
543 	/* Add multicast entries */
544 	if (!list_empty(&rxf->mcast_pending_add_q)) {
545 		bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
546 		bfa_q_qe_init(qe);
547 		mac = (struct bna_mac *)qe;
548 		list_add_tail(&mac->qe, &rxf->mcast_active_q);
549 		bna_bfi_mcast_add_req(rxf, mac);
550 		return 1;
551 	}
552 
553 	return 0;
554 }
555 
556 static int
557 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
558 {
559 	u8 vlan_pending_bitmask;
560 	int block_idx = 0;
561 
562 	if (rxf->vlan_pending_bitmask) {
563 		vlan_pending_bitmask = rxf->vlan_pending_bitmask;
564 		while (!(vlan_pending_bitmask & 0x1)) {
565 			block_idx++;
566 			vlan_pending_bitmask >>= 1;
567 		}
568 		rxf->vlan_pending_bitmask &= ~(1 << block_idx);
569 		bna_bfi_rx_vlan_filter_set(rxf, block_idx);
570 		return 1;
571 	}
572 
573 	return 0;
574 }
575 
576 static int
577 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
578 {
579 	struct list_head *qe;
580 	struct bna_mac *mac;
581 	int ret;
582 
583 	/* Throw away delete pending mcast entries */
584 	while (!list_empty(&rxf->mcast_pending_del_q)) {
585 		bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
586 		bfa_q_qe_init(qe);
587 		mac = (struct bna_mac *)qe;
588 		ret = bna_rxf_mcast_del(rxf, mac, cleanup);
589 		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
590 		if (ret)
591 			return ret;
592 	}
593 
594 	/* Move active mcast entries to pending_add_q */
595 	while (!list_empty(&rxf->mcast_active_q)) {
596 		bfa_q_deq(&rxf->mcast_active_q, &qe);
597 		bfa_q_qe_init(qe);
598 		list_add_tail(qe, &rxf->mcast_pending_add_q);
599 		mac = (struct bna_mac *)qe;
600 		if (bna_rxf_mcast_del(rxf, mac, cleanup))
601 			return 1;
602 	}
603 
604 	return 0;
605 }
606 
607 static int
608 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
609 {
610 	if (rxf->rss_pending) {
611 		if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
612 			rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
613 			bna_bfi_rit_cfg(rxf);
614 			return 1;
615 		}
616 
617 		if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
618 			rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
619 			bna_bfi_rss_cfg(rxf);
620 			return 1;
621 		}
622 
623 		if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
624 			rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
625 			bna_bfi_rss_enable(rxf);
626 			return 1;
627 		}
628 	}
629 
630 	return 0;
631 }
632 
633 static int
634 bna_rxf_cfg_apply(struct bna_rxf *rxf)
635 {
636 	if (bna_rxf_ucast_cfg_apply(rxf))
637 		return 1;
638 
639 	if (bna_rxf_mcast_cfg_apply(rxf))
640 		return 1;
641 
642 	if (bna_rxf_promisc_cfg_apply(rxf))
643 		return 1;
644 
645 	if (bna_rxf_allmulti_cfg_apply(rxf))
646 		return 1;
647 
648 	if (bna_rxf_vlan_cfg_apply(rxf))
649 		return 1;
650 
651 	if (bna_rxf_vlan_strip_cfg_apply(rxf))
652 		return 1;
653 
654 	if (bna_rxf_rss_cfg_apply(rxf))
655 		return 1;
656 
657 	return 0;
658 }
659 
660 /* Only software reset */
661 static int
662 bna_rxf_fltr_clear(struct bna_rxf *rxf)
663 {
664 	if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
665 		return 1;
666 
667 	if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
668 		return 1;
669 
670 	if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
671 		return 1;
672 
673 	if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
674 		return 1;
675 
676 	return 0;
677 }
678 
679 static void
680 bna_rxf_cfg_reset(struct bna_rxf *rxf)
681 {
682 	bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
683 	bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
684 	bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
685 	bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
686 	bna_rxf_vlan_cfg_soft_reset(rxf);
687 	bna_rxf_rss_cfg_soft_reset(rxf);
688 }
689 
690 static void
691 bna_rit_init(struct bna_rxf *rxf, int rit_size)
692 {
693 	struct bna_rx *rx = rxf->rx;
694 	struct bna_rxp *rxp;
695 	struct list_head *qe;
696 	int offset = 0;
697 
698 	rxf->rit_size = rit_size;
699 	list_for_each(qe, &rx->rxp_q) {
700 		rxp = (struct bna_rxp *)qe;
701 		rxf->rit[offset] = rxp->cq.ccb->id;
702 		offset++;
703 	}
704 
705 }
706 
707 void
708 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
709 {
710 	bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
711 }
712 
713 void
714 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
715 			struct bfi_msgq_mhdr *msghdr)
716 {
717 	struct bfi_enet_mcast_add_req *req =
718 		&rxf->bfi_enet_cmd.mcast_add_req;
719 	struct bfi_enet_mcast_add_rsp *rsp =
720 		(struct bfi_enet_mcast_add_rsp *)msghdr;
721 
722 	bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
723 		ntohs(rsp->handle));
724 	bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
725 }
726 
727 static void
728 bna_rxf_init(struct bna_rxf *rxf,
729 		struct bna_rx *rx,
730 		struct bna_rx_config *q_config,
731 		struct bna_res_info *res_info)
732 {
733 	rxf->rx = rx;
734 
735 	INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
736 	INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
737 	rxf->ucast_pending_set = 0;
738 	rxf->ucast_active_set = 0;
739 	INIT_LIST_HEAD(&rxf->ucast_active_q);
740 	rxf->ucast_pending_mac = NULL;
741 
742 	INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
743 	INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
744 	INIT_LIST_HEAD(&rxf->mcast_active_q);
745 	INIT_LIST_HEAD(&rxf->mcast_handle_q);
746 
747 	if (q_config->paused)
748 		rxf->flags |= BNA_RXF_F_PAUSED;
749 
750 	rxf->rit = (u8 *)
751 		res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
752 	bna_rit_init(rxf, q_config->num_paths);
753 
754 	rxf->rss_status = q_config->rss_status;
755 	if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
756 		rxf->rss_cfg = q_config->rss_config;
757 		rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
758 		rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
759 		rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
760 	}
761 
762 	rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
763 	memset(rxf->vlan_filter_table, 0,
764 			(sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
765 	rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
766 	rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
767 
768 	rxf->vlan_strip_status = q_config->vlan_strip_status;
769 
770 	bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
771 }
772 
773 static void
774 bna_rxf_uninit(struct bna_rxf *rxf)
775 {
776 	struct bna_mac *mac;
777 
778 	rxf->ucast_pending_set = 0;
779 	rxf->ucast_active_set = 0;
780 
781 	while (!list_empty(&rxf->ucast_pending_add_q)) {
782 		bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
783 		bfa_q_qe_init(&mac->qe);
784 		bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
785 	}
786 
787 	if (rxf->ucast_pending_mac) {
788 		bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
789 		bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
790 			rxf->ucast_pending_mac);
791 		rxf->ucast_pending_mac = NULL;
792 	}
793 
794 	while (!list_empty(&rxf->mcast_pending_add_q)) {
795 		bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
796 		bfa_q_qe_init(&mac->qe);
797 		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
798 	}
799 
800 	rxf->rxmode_pending = 0;
801 	rxf->rxmode_pending_bitmask = 0;
802 	if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
803 		rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
804 	if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
805 		rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
806 
807 	rxf->rss_pending = 0;
808 	rxf->vlan_strip_pending = false;
809 
810 	rxf->flags = 0;
811 
812 	rxf->rx = NULL;
813 }
814 
815 static void
816 bna_rx_cb_rxf_started(struct bna_rx *rx)
817 {
818 	bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
819 }
820 
821 static void
822 bna_rxf_start(struct bna_rxf *rxf)
823 {
824 	rxf->start_cbfn = bna_rx_cb_rxf_started;
825 	rxf->start_cbarg = rxf->rx;
826 	bfa_fsm_send_event(rxf, RXF_E_START);
827 }
828 
829 static void
830 bna_rx_cb_rxf_stopped(struct bna_rx *rx)
831 {
832 	bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
833 }
834 
835 static void
836 bna_rxf_stop(struct bna_rxf *rxf)
837 {
838 	rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
839 	rxf->stop_cbarg = rxf->rx;
840 	bfa_fsm_send_event(rxf, RXF_E_STOP);
841 }
842 
843 static void
844 bna_rxf_fail(struct bna_rxf *rxf)
845 {
846 	bfa_fsm_send_event(rxf, RXF_E_FAIL);
847 }
848 
849 enum bna_cb_status
850 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
851 		 void (*cbfn)(struct bnad *, struct bna_rx *))
852 {
853 	struct bna_rxf *rxf = &rx->rxf;
854 
855 	if (rxf->ucast_pending_mac == NULL) {
856 		rxf->ucast_pending_mac =
857 				bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
858 		if (rxf->ucast_pending_mac == NULL)
859 			return BNA_CB_UCAST_CAM_FULL;
860 		bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
861 	}
862 
863 	memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
864 	rxf->ucast_pending_set = 1;
865 	rxf->cam_fltr_cbfn = cbfn;
866 	rxf->cam_fltr_cbarg = rx->bna->bnad;
867 
868 	bfa_fsm_send_event(rxf, RXF_E_CONFIG);
869 
870 	return BNA_CB_SUCCESS;
871 }
872 
873 enum bna_cb_status
874 bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
875 		 void (*cbfn)(struct bnad *, struct bna_rx *))
876 {
877 	struct bna_rxf *rxf = &rx->rxf;
878 	struct bna_mac *mac;
879 
880 	/* Check if already added or pending addition */
881 	if (bna_mac_find(&rxf->mcast_active_q, addr) ||
882 		bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
883 		if (cbfn)
884 			cbfn(rx->bna->bnad, rx);
885 		return BNA_CB_SUCCESS;
886 	}
887 
888 	mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
889 	if (mac == NULL)
890 		return BNA_CB_MCAST_LIST_FULL;
891 	bfa_q_qe_init(&mac->qe);
892 	memcpy(mac->addr, addr, ETH_ALEN);
893 	list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
894 
895 	rxf->cam_fltr_cbfn = cbfn;
896 	rxf->cam_fltr_cbarg = rx->bna->bnad;
897 
898 	bfa_fsm_send_event(rxf, RXF_E_CONFIG);
899 
900 	return BNA_CB_SUCCESS;
901 }
902 
903 enum bna_cb_status
904 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
905 		     void (*cbfn)(struct bnad *, struct bna_rx *))
906 {
907 	struct bna_rxf *rxf = &rx->rxf;
908 	struct list_head list_head;
909 	struct list_head *qe;
910 	u8 *mcaddr;
911 	struct bna_mac *mac;
912 	int i;
913 
914 	/* Allocate nodes */
915 	INIT_LIST_HEAD(&list_head);
916 	for (i = 0, mcaddr = mclist; i < count; i++) {
917 		mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
918 		if (mac == NULL)
919 			goto err_return;
920 		bfa_q_qe_init(&mac->qe);
921 		memcpy(mac->addr, mcaddr, ETH_ALEN);
922 		list_add_tail(&mac->qe, &list_head);
923 
924 		mcaddr += ETH_ALEN;
925 	}
926 
927 	/* Purge the pending_add_q */
928 	while (!list_empty(&rxf->mcast_pending_add_q)) {
929 		bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
930 		bfa_q_qe_init(qe);
931 		mac = (struct bna_mac *)qe;
932 		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
933 	}
934 
935 	/* Schedule active_q entries for deletion */
936 	while (!list_empty(&rxf->mcast_active_q)) {
937 		bfa_q_deq(&rxf->mcast_active_q, &qe);
938 		mac = (struct bna_mac *)qe;
939 		bfa_q_qe_init(&mac->qe);
940 		list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
941 	}
942 
943 	/* Add the new entries */
944 	while (!list_empty(&list_head)) {
945 		bfa_q_deq(&list_head, &qe);
946 		mac = (struct bna_mac *)qe;
947 		bfa_q_qe_init(&mac->qe);
948 		list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
949 	}
950 
951 	rxf->cam_fltr_cbfn = cbfn;
952 	rxf->cam_fltr_cbarg = rx->bna->bnad;
953 	bfa_fsm_send_event(rxf, RXF_E_CONFIG);
954 
955 	return BNA_CB_SUCCESS;
956 
957 err_return:
958 	while (!list_empty(&list_head)) {
959 		bfa_q_deq(&list_head, &qe);
960 		mac = (struct bna_mac *)qe;
961 		bfa_q_qe_init(&mac->qe);
962 		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
963 	}
964 
965 	return BNA_CB_MCAST_LIST_FULL;
966 }
967 
968 void
969 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
970 {
971 	struct bna_rxf *rxf = &rx->rxf;
972 	int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
973 	int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
974 	int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
975 
976 	rxf->vlan_filter_table[index] |= bit;
977 	if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
978 		rxf->vlan_pending_bitmask |= (1 << group_id);
979 		bfa_fsm_send_event(rxf, RXF_E_CONFIG);
980 	}
981 }
982 
983 void
984 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
985 {
986 	struct bna_rxf *rxf = &rx->rxf;
987 	int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
988 	int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
989 	int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
990 
991 	rxf->vlan_filter_table[index] &= ~bit;
992 	if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
993 		rxf->vlan_pending_bitmask |= (1 << group_id);
994 		bfa_fsm_send_event(rxf, RXF_E_CONFIG);
995 	}
996 }
997 
998 static int
999 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
1000 {
1001 	struct bna_mac *mac = NULL;
1002 	struct list_head *qe;
1003 
1004 	/* Delete MAC addresses previousely added */
1005 	if (!list_empty(&rxf->ucast_pending_del_q)) {
1006 		bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1007 		bfa_q_qe_init(qe);
1008 		mac = (struct bna_mac *)qe;
1009 		bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1010 		bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1011 		return 1;
1012 	}
1013 
1014 	/* Set default unicast MAC */
1015 	if (rxf->ucast_pending_set) {
1016 		rxf->ucast_pending_set = 0;
1017 		memcpy(rxf->ucast_active_mac.addr,
1018 			rxf->ucast_pending_mac->addr, ETH_ALEN);
1019 		rxf->ucast_active_set = 1;
1020 		bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1021 			BFI_ENET_H2I_MAC_UCAST_SET_REQ);
1022 		return 1;
1023 	}
1024 
1025 	/* Add additional MAC entries */
1026 	if (!list_empty(&rxf->ucast_pending_add_q)) {
1027 		bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
1028 		bfa_q_qe_init(qe);
1029 		mac = (struct bna_mac *)qe;
1030 		list_add_tail(&mac->qe, &rxf->ucast_active_q);
1031 		bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
1032 		return 1;
1033 	}
1034 
1035 	return 0;
1036 }
1037 
1038 static int
1039 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1040 {
1041 	struct list_head *qe;
1042 	struct bna_mac *mac;
1043 
1044 	/* Throw away delete pending ucast entries */
1045 	while (!list_empty(&rxf->ucast_pending_del_q)) {
1046 		bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1047 		bfa_q_qe_init(qe);
1048 		mac = (struct bna_mac *)qe;
1049 		if (cleanup == BNA_SOFT_CLEANUP)
1050 			bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1051 		else {
1052 			bna_bfi_ucast_req(rxf, mac,
1053 				BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1054 			bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1055 			return 1;
1056 		}
1057 	}
1058 
1059 	/* Move active ucast entries to pending_add_q */
1060 	while (!list_empty(&rxf->ucast_active_q)) {
1061 		bfa_q_deq(&rxf->ucast_active_q, &qe);
1062 		bfa_q_qe_init(qe);
1063 		list_add_tail(qe, &rxf->ucast_pending_add_q);
1064 		if (cleanup == BNA_HARD_CLEANUP) {
1065 			mac = (struct bna_mac *)qe;
1066 			bna_bfi_ucast_req(rxf, mac,
1067 				BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1068 			return 1;
1069 		}
1070 	}
1071 
1072 	if (rxf->ucast_active_set) {
1073 		rxf->ucast_pending_set = 1;
1074 		rxf->ucast_active_set = 0;
1075 		if (cleanup == BNA_HARD_CLEANUP) {
1076 			bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1077 				BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1078 			return 1;
1079 		}
1080 	}
1081 
1082 	return 0;
1083 }
1084 
1085 static int
1086 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1087 {
1088 	struct bna *bna = rxf->rx->bna;
1089 
1090 	/* Enable/disable promiscuous mode */
1091 	if (is_promisc_enable(rxf->rxmode_pending,
1092 				rxf->rxmode_pending_bitmask)) {
1093 		/* move promisc configuration from pending -> active */
1094 		promisc_inactive(rxf->rxmode_pending,
1095 				rxf->rxmode_pending_bitmask);
1096 		rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1097 		bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1098 		return 1;
1099 	} else if (is_promisc_disable(rxf->rxmode_pending,
1100 				rxf->rxmode_pending_bitmask)) {
1101 		/* move promisc configuration from pending -> active */
1102 		promisc_inactive(rxf->rxmode_pending,
1103 				rxf->rxmode_pending_bitmask);
1104 		rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1105 		bna->promisc_rid = BFI_INVALID_RID;
1106 		bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1107 		return 1;
1108 	}
1109 
1110 	return 0;
1111 }
1112 
1113 static int
1114 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1115 {
1116 	struct bna *bna = rxf->rx->bna;
1117 
1118 	/* Clear pending promisc mode disable */
1119 	if (is_promisc_disable(rxf->rxmode_pending,
1120 				rxf->rxmode_pending_bitmask)) {
1121 		promisc_inactive(rxf->rxmode_pending,
1122 				rxf->rxmode_pending_bitmask);
1123 		rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1124 		bna->promisc_rid = BFI_INVALID_RID;
1125 		if (cleanup == BNA_HARD_CLEANUP) {
1126 			bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1127 			return 1;
1128 		}
1129 	}
1130 
1131 	/* Move promisc mode config from active -> pending */
1132 	if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1133 		promisc_enable(rxf->rxmode_pending,
1134 				rxf->rxmode_pending_bitmask);
1135 		rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1136 		if (cleanup == BNA_HARD_CLEANUP) {
1137 			bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1138 			return 1;
1139 		}
1140 	}
1141 
1142 	return 0;
1143 }
1144 
1145 static int
1146 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1147 {
1148 	/* Enable/disable allmulti mode */
1149 	if (is_allmulti_enable(rxf->rxmode_pending,
1150 				rxf->rxmode_pending_bitmask)) {
1151 		/* move allmulti configuration from pending -> active */
1152 		allmulti_inactive(rxf->rxmode_pending,
1153 				rxf->rxmode_pending_bitmask);
1154 		rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1155 		bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1156 		return 1;
1157 	} else if (is_allmulti_disable(rxf->rxmode_pending,
1158 					rxf->rxmode_pending_bitmask)) {
1159 		/* move allmulti configuration from pending -> active */
1160 		allmulti_inactive(rxf->rxmode_pending,
1161 				rxf->rxmode_pending_bitmask);
1162 		rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1163 		bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1164 		return 1;
1165 	}
1166 
1167 	return 0;
1168 }
1169 
1170 static int
1171 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1172 {
1173 	/* Clear pending allmulti mode disable */
1174 	if (is_allmulti_disable(rxf->rxmode_pending,
1175 				rxf->rxmode_pending_bitmask)) {
1176 		allmulti_inactive(rxf->rxmode_pending,
1177 				rxf->rxmode_pending_bitmask);
1178 		rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1179 		if (cleanup == BNA_HARD_CLEANUP) {
1180 			bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1181 			return 1;
1182 		}
1183 	}
1184 
1185 	/* Move allmulti mode config from active -> pending */
1186 	if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1187 		allmulti_enable(rxf->rxmode_pending,
1188 				rxf->rxmode_pending_bitmask);
1189 		rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1190 		if (cleanup == BNA_HARD_CLEANUP) {
1191 			bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1192 			return 1;
1193 		}
1194 	}
1195 
1196 	return 0;
1197 }
1198 
1199 static int
1200 bna_rxf_promisc_enable(struct bna_rxf *rxf)
1201 {
1202 	struct bna *bna = rxf->rx->bna;
1203 	int ret = 0;
1204 
1205 	if (is_promisc_enable(rxf->rxmode_pending,
1206 				rxf->rxmode_pending_bitmask) ||
1207 		(rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1208 		/* Do nothing if pending enable or already enabled */
1209 	} else if (is_promisc_disable(rxf->rxmode_pending,
1210 					rxf->rxmode_pending_bitmask)) {
1211 		/* Turn off pending disable command */
1212 		promisc_inactive(rxf->rxmode_pending,
1213 			rxf->rxmode_pending_bitmask);
1214 	} else {
1215 		/* Schedule enable */
1216 		promisc_enable(rxf->rxmode_pending,
1217 				rxf->rxmode_pending_bitmask);
1218 		bna->promisc_rid = rxf->rx->rid;
1219 		ret = 1;
1220 	}
1221 
1222 	return ret;
1223 }
1224 
1225 static int
1226 bna_rxf_promisc_disable(struct bna_rxf *rxf)
1227 {
1228 	struct bna *bna = rxf->rx->bna;
1229 	int ret = 0;
1230 
1231 	if (is_promisc_disable(rxf->rxmode_pending,
1232 				rxf->rxmode_pending_bitmask) ||
1233 		(!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1234 		/* Do nothing if pending disable or already disabled */
1235 	} else if (is_promisc_enable(rxf->rxmode_pending,
1236 					rxf->rxmode_pending_bitmask)) {
1237 		/* Turn off pending enable command */
1238 		promisc_inactive(rxf->rxmode_pending,
1239 				rxf->rxmode_pending_bitmask);
1240 		bna->promisc_rid = BFI_INVALID_RID;
1241 	} else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1242 		/* Schedule disable */
1243 		promisc_disable(rxf->rxmode_pending,
1244 				rxf->rxmode_pending_bitmask);
1245 		ret = 1;
1246 	}
1247 
1248 	return ret;
1249 }
1250 
1251 static int
1252 bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1253 {
1254 	int ret = 0;
1255 
1256 	if (is_allmulti_enable(rxf->rxmode_pending,
1257 			rxf->rxmode_pending_bitmask) ||
1258 			(rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1259 		/* Do nothing if pending enable or already enabled */
1260 	} else if (is_allmulti_disable(rxf->rxmode_pending,
1261 					rxf->rxmode_pending_bitmask)) {
1262 		/* Turn off pending disable command */
1263 		allmulti_inactive(rxf->rxmode_pending,
1264 			rxf->rxmode_pending_bitmask);
1265 	} else {
1266 		/* Schedule enable */
1267 		allmulti_enable(rxf->rxmode_pending,
1268 				rxf->rxmode_pending_bitmask);
1269 		ret = 1;
1270 	}
1271 
1272 	return ret;
1273 }
1274 
1275 static int
1276 bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1277 {
1278 	int ret = 0;
1279 
1280 	if (is_allmulti_disable(rxf->rxmode_pending,
1281 				rxf->rxmode_pending_bitmask) ||
1282 		(!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1283 		/* Do nothing if pending disable or already disabled */
1284 	} else if (is_allmulti_enable(rxf->rxmode_pending,
1285 					rxf->rxmode_pending_bitmask)) {
1286 		/* Turn off pending enable command */
1287 		allmulti_inactive(rxf->rxmode_pending,
1288 				rxf->rxmode_pending_bitmask);
1289 	} else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1290 		/* Schedule disable */
1291 		allmulti_disable(rxf->rxmode_pending,
1292 				rxf->rxmode_pending_bitmask);
1293 		ret = 1;
1294 	}
1295 
1296 	return ret;
1297 }
1298 
1299 static int
1300 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1301 {
1302 	if (rxf->vlan_strip_pending) {
1303 			rxf->vlan_strip_pending = false;
1304 			bna_bfi_vlan_strip_enable(rxf);
1305 			return 1;
1306 	}
1307 
1308 	return 0;
1309 }
1310 
1311 /* RX */
1312 
1313 #define	BNA_GET_RXQS(qcfg)	(((qcfg)->rxp_type == BNA_RXP_SINGLE) ?	\
1314 	(qcfg)->num_paths : ((qcfg)->num_paths * 2))
1315 
1316 #define	SIZE_TO_PAGES(size)	(((size) >> PAGE_SHIFT) + ((((size) &\
1317 	(PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1318 
1319 #define	call_rx_stop_cbfn(rx)						\
1320 do {								    \
1321 	if ((rx)->stop_cbfn) {						\
1322 		void (*cbfn)(void *, struct bna_rx *);	  \
1323 		void *cbarg;					    \
1324 		cbfn = (rx)->stop_cbfn;				 \
1325 		cbarg = (rx)->stop_cbarg;			       \
1326 		(rx)->stop_cbfn = NULL;					\
1327 		(rx)->stop_cbarg = NULL;				\
1328 		cbfn(cbarg, rx);					\
1329 	}							       \
1330 } while (0)
1331 
1332 #define call_rx_stall_cbfn(rx)						\
1333 do {									\
1334 	if ((rx)->rx_stall_cbfn)					\
1335 		(rx)->rx_stall_cbfn((rx)->bna->bnad, (rx));		\
1336 } while (0)
1337 
1338 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt)			\
1339 do {									\
1340 	struct bna_dma_addr cur_q_addr =				\
1341 		*((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr));	\
1342 	(bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb;	\
1343 	(bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb;	\
1344 	(bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb;		\
1345 	(bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb;		\
1346 	(bfi_q)->pages = htons((u16)(bna_qpt)->page_count);	\
1347 	(bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1348 } while (0)
1349 
1350 static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1351 static void bna_rx_enet_stop(struct bna_rx *rx);
1352 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1353 
1354 bfa_fsm_state_decl(bna_rx, stopped,
1355 	struct bna_rx, enum bna_rx_event);
1356 bfa_fsm_state_decl(bna_rx, start_wait,
1357 	struct bna_rx, enum bna_rx_event);
1358 bfa_fsm_state_decl(bna_rx, start_stop_wait,
1359 	struct bna_rx, enum bna_rx_event);
1360 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1361 	struct bna_rx, enum bna_rx_event);
1362 bfa_fsm_state_decl(bna_rx, started,
1363 	struct bna_rx, enum bna_rx_event);
1364 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1365 	struct bna_rx, enum bna_rx_event);
1366 bfa_fsm_state_decl(bna_rx, stop_wait,
1367 	struct bna_rx, enum bna_rx_event);
1368 bfa_fsm_state_decl(bna_rx, cleanup_wait,
1369 	struct bna_rx, enum bna_rx_event);
1370 bfa_fsm_state_decl(bna_rx, failed,
1371 	struct bna_rx, enum bna_rx_event);
1372 bfa_fsm_state_decl(bna_rx, quiesce_wait,
1373 	struct bna_rx, enum bna_rx_event);
1374 
1375 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1376 {
1377 	call_rx_stop_cbfn(rx);
1378 }
1379 
1380 static void bna_rx_sm_stopped(struct bna_rx *rx,
1381 				enum bna_rx_event event)
1382 {
1383 	switch (event) {
1384 	case RX_E_START:
1385 		bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1386 		break;
1387 
1388 	case RX_E_STOP:
1389 		call_rx_stop_cbfn(rx);
1390 		break;
1391 
1392 	case RX_E_FAIL:
1393 		/* no-op */
1394 		break;
1395 
1396 	default:
1397 		bfa_sm_fault(event);
1398 		break;
1399 	}
1400 }
1401 
1402 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1403 {
1404 	bna_bfi_rx_enet_start(rx);
1405 }
1406 
1407 void
1408 bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1409 {
1410 }
1411 
1412 static void
1413 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1414 {
1415 	switch (event) {
1416 	case RX_E_FAIL:
1417 	case RX_E_STOPPED:
1418 		bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1419 		rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1420 		break;
1421 
1422 	case RX_E_STARTED:
1423 		bna_rx_enet_stop(rx);
1424 		break;
1425 
1426 	default:
1427 		bfa_sm_fault(event);
1428 		break;
1429 	}
1430 }
1431 
1432 static void bna_rx_sm_start_wait(struct bna_rx *rx,
1433 				enum bna_rx_event event)
1434 {
1435 	switch (event) {
1436 	case RX_E_STOP:
1437 		bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
1438 		break;
1439 
1440 	case RX_E_FAIL:
1441 		bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1442 		break;
1443 
1444 	case RX_E_STARTED:
1445 		bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1446 		break;
1447 
1448 	default:
1449 		bfa_sm_fault(event);
1450 		break;
1451 	}
1452 }
1453 
1454 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1455 {
1456 	rx->rx_post_cbfn(rx->bna->bnad, rx);
1457 	bna_rxf_start(&rx->rxf);
1458 }
1459 
1460 void
1461 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1462 {
1463 }
1464 
1465 static void
1466 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1467 {
1468 	switch (event) {
1469 	case RX_E_FAIL:
1470 		bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1471 		bna_rxf_fail(&rx->rxf);
1472 		call_rx_stall_cbfn(rx);
1473 		rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1474 		break;
1475 
1476 	case RX_E_RXF_STARTED:
1477 		bna_rxf_stop(&rx->rxf);
1478 		break;
1479 
1480 	case RX_E_RXF_STOPPED:
1481 		bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1482 		call_rx_stall_cbfn(rx);
1483 		bna_rx_enet_stop(rx);
1484 		break;
1485 
1486 	default:
1487 		bfa_sm_fault(event);
1488 		break;
1489 	}
1490 
1491 }
1492 
1493 static void
1494 bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
1495 {
1496 }
1497 
1498 static void
1499 bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1500 {
1501 	switch (event) {
1502 	case RX_E_FAIL:
1503 	case RX_E_STOPPED:
1504 		bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1505 		break;
1506 
1507 	case RX_E_STARTED:
1508 		bna_rx_enet_stop(rx);
1509 		break;
1510 
1511 	default:
1512 		bfa_sm_fault(event);
1513 	}
1514 }
1515 
1516 void
1517 bna_rx_sm_started_entry(struct bna_rx *rx)
1518 {
1519 	struct bna_rxp *rxp;
1520 	struct list_head *qe_rxp;
1521 	int is_regular = (rx->type == BNA_RX_T_REGULAR);
1522 
1523 	/* Start IB */
1524 	list_for_each(qe_rxp, &rx->rxp_q) {
1525 		rxp = (struct bna_rxp *)qe_rxp;
1526 		bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1527 	}
1528 
1529 	bna_ethport_cb_rx_started(&rx->bna->ethport);
1530 }
1531 
1532 static void
1533 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1534 {
1535 	switch (event) {
1536 	case RX_E_STOP:
1537 		bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1538 		bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1539 		bna_rxf_stop(&rx->rxf);
1540 		break;
1541 
1542 	case RX_E_FAIL:
1543 		bfa_fsm_set_state(rx, bna_rx_sm_failed);
1544 		bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1545 		bna_rxf_fail(&rx->rxf);
1546 		call_rx_stall_cbfn(rx);
1547 		rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1548 		break;
1549 
1550 	default:
1551 		bfa_sm_fault(event);
1552 		break;
1553 	}
1554 }
1555 
1556 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1557 				enum bna_rx_event event)
1558 {
1559 	switch (event) {
1560 	case RX_E_STOP:
1561 		bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1562 		break;
1563 
1564 	case RX_E_FAIL:
1565 		bfa_fsm_set_state(rx, bna_rx_sm_failed);
1566 		bna_rxf_fail(&rx->rxf);
1567 		call_rx_stall_cbfn(rx);
1568 		rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1569 		break;
1570 
1571 	case RX_E_RXF_STARTED:
1572 		bfa_fsm_set_state(rx, bna_rx_sm_started);
1573 		break;
1574 
1575 	default:
1576 		bfa_sm_fault(event);
1577 		break;
1578 	}
1579 }
1580 
1581 void
1582 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1583 {
1584 }
1585 
1586 void
1587 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1588 {
1589 	switch (event) {
1590 	case RX_E_FAIL:
1591 	case RX_E_RXF_STOPPED:
1592 		/* No-op */
1593 		break;
1594 
1595 	case RX_E_CLEANUP_DONE:
1596 		bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1597 		break;
1598 
1599 	default:
1600 		bfa_sm_fault(event);
1601 		break;
1602 	}
1603 }
1604 
1605 static void
1606 bna_rx_sm_failed_entry(struct bna_rx *rx)
1607 {
1608 }
1609 
1610 static void
1611 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1612 {
1613 	switch (event) {
1614 	case RX_E_START:
1615 		bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1616 		break;
1617 
1618 	case RX_E_STOP:
1619 		bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1620 		break;
1621 
1622 	case RX_E_FAIL:
1623 	case RX_E_RXF_STARTED:
1624 	case RX_E_RXF_STOPPED:
1625 		/* No-op */
1626 		break;
1627 
1628 	case RX_E_CLEANUP_DONE:
1629 		bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1630 		break;
1631 
1632 	default:
1633 		bfa_sm_fault(event);
1634 		break;
1635 }	}
1636 
1637 static void
1638 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1639 {
1640 }
1641 
1642 static void
1643 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1644 {
1645 	switch (event) {
1646 	case RX_E_STOP:
1647 		bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1648 		break;
1649 
1650 	case RX_E_FAIL:
1651 		bfa_fsm_set_state(rx, bna_rx_sm_failed);
1652 		break;
1653 
1654 	case RX_E_CLEANUP_DONE:
1655 		bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1656 		break;
1657 
1658 	default:
1659 		bfa_sm_fault(event);
1660 		break;
1661 	}
1662 }
1663 
1664 static void
1665 bna_bfi_rx_enet_start(struct bna_rx *rx)
1666 {
1667 	struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1668 	struct bna_rxp *rxp = NULL;
1669 	struct bna_rxq *q0 = NULL, *q1 = NULL;
1670 	struct list_head *rxp_qe;
1671 	int i;
1672 
1673 	bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1674 		BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1675 	cfg_req->mh.num_entries = htons(
1676 		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1677 
1678 	cfg_req->num_queue_sets = rx->num_paths;
1679 	for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
1680 		i < rx->num_paths;
1681 		i++, rxp_qe = bfa_q_next(rxp_qe)) {
1682 		rxp = (struct bna_rxp *)rxp_qe;
1683 
1684 		GET_RXQS(rxp, q0, q1);
1685 		switch (rxp->type) {
1686 		case BNA_RXP_SLR:
1687 		case BNA_RXP_HDS:
1688 			/* Small RxQ */
1689 			bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1690 						&q1->qpt);
1691 			cfg_req->q_cfg[i].qs.rx_buffer_size =
1692 				htons((u16)q1->buffer_size);
1693 			/* Fall through */
1694 
1695 		case BNA_RXP_SINGLE:
1696 			/* Large/Single RxQ */
1697 			bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1698 						&q0->qpt);
1699 			q0->buffer_size =
1700 				bna_enet_mtu_get(&rx->bna->enet);
1701 			cfg_req->q_cfg[i].ql.rx_buffer_size =
1702 				htons((u16)q0->buffer_size);
1703 			break;
1704 
1705 		default:
1706 			BUG_ON(1);
1707 		}
1708 
1709 		bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1710 					&rxp->cq.qpt);
1711 
1712 		cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1713 			rxp->cq.ib.ib_seg_host_addr.lsb;
1714 		cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1715 			rxp->cq.ib.ib_seg_host_addr.msb;
1716 		cfg_req->q_cfg[i].ib.intr.msix_index =
1717 			htons((u16)rxp->cq.ib.intr_vector);
1718 	}
1719 
1720 	cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1721 	cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1722 	cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1723 	cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1724 	cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1725 				? BNA_STATUS_T_ENABLED :
1726 				BNA_STATUS_T_DISABLED;
1727 	cfg_req->ib_cfg.coalescing_timeout =
1728 			htonl((u32)rxp->cq.ib.coalescing_timeo);
1729 	cfg_req->ib_cfg.inter_pkt_timeout =
1730 			htonl((u32)rxp->cq.ib.interpkt_timeo);
1731 	cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1732 
1733 	switch (rxp->type) {
1734 	case BNA_RXP_SLR:
1735 		cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1736 		break;
1737 
1738 	case BNA_RXP_HDS:
1739 		cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1740 		cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1741 		cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1742 		cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1743 		break;
1744 
1745 	case BNA_RXP_SINGLE:
1746 		cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1747 		break;
1748 
1749 	default:
1750 		BUG_ON(1);
1751 	}
1752 	cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1753 
1754 	bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1755 		sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1756 	bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1757 }
1758 
1759 static void
1760 bna_bfi_rx_enet_stop(struct bna_rx *rx)
1761 {
1762 	struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1763 
1764 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1765 		BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1766 	req->mh.num_entries = htons(
1767 		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1768 	bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1769 		&req->mh);
1770 	bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1771 }
1772 
1773 static void
1774 bna_rx_enet_stop(struct bna_rx *rx)
1775 {
1776 	struct bna_rxp *rxp;
1777 	struct list_head		 *qe_rxp;
1778 
1779 	/* Stop IB */
1780 	list_for_each(qe_rxp, &rx->rxp_q) {
1781 		rxp = (struct bna_rxp *)qe_rxp;
1782 		bna_ib_stop(rx->bna, &rxp->cq.ib);
1783 	}
1784 
1785 	bna_bfi_rx_enet_stop(rx);
1786 }
1787 
1788 static int
1789 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1790 {
1791 	if ((rx_mod->rx_free_count == 0) ||
1792 		(rx_mod->rxp_free_count == 0) ||
1793 		(rx_mod->rxq_free_count == 0))
1794 		return 0;
1795 
1796 	if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1797 		if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1798 			(rx_mod->rxq_free_count < rx_cfg->num_paths))
1799 				return 0;
1800 	} else {
1801 		if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1802 			(rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1803 			return 0;
1804 	}
1805 
1806 	return 1;
1807 }
1808 
1809 static struct bna_rxq *
1810 bna_rxq_get(struct bna_rx_mod *rx_mod)
1811 {
1812 	struct bna_rxq *rxq = NULL;
1813 	struct list_head	*qe = NULL;
1814 
1815 	bfa_q_deq(&rx_mod->rxq_free_q, &qe);
1816 	rx_mod->rxq_free_count--;
1817 	rxq = (struct bna_rxq *)qe;
1818 	bfa_q_qe_init(&rxq->qe);
1819 
1820 	return rxq;
1821 }
1822 
1823 static void
1824 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1825 {
1826 	bfa_q_qe_init(&rxq->qe);
1827 	list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1828 	rx_mod->rxq_free_count++;
1829 }
1830 
1831 static struct bna_rxp *
1832 bna_rxp_get(struct bna_rx_mod *rx_mod)
1833 {
1834 	struct list_head	*qe = NULL;
1835 	struct bna_rxp *rxp = NULL;
1836 
1837 	bfa_q_deq(&rx_mod->rxp_free_q, &qe);
1838 	rx_mod->rxp_free_count--;
1839 	rxp = (struct bna_rxp *)qe;
1840 	bfa_q_qe_init(&rxp->qe);
1841 
1842 	return rxp;
1843 }
1844 
1845 static void
1846 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1847 {
1848 	bfa_q_qe_init(&rxp->qe);
1849 	list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1850 	rx_mod->rxp_free_count++;
1851 }
1852 
1853 static struct bna_rx *
1854 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1855 {
1856 	struct list_head	*qe = NULL;
1857 	struct bna_rx *rx = NULL;
1858 
1859 	if (type == BNA_RX_T_REGULAR) {
1860 		bfa_q_deq(&rx_mod->rx_free_q, &qe);
1861 	} else
1862 		bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
1863 
1864 	rx_mod->rx_free_count--;
1865 	rx = (struct bna_rx *)qe;
1866 	bfa_q_qe_init(&rx->qe);
1867 	list_add_tail(&rx->qe, &rx_mod->rx_active_q);
1868 	rx->type = type;
1869 
1870 	return rx;
1871 }
1872 
1873 static void
1874 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
1875 {
1876 	struct list_head *prev_qe = NULL;
1877 	struct list_head *qe;
1878 
1879 	bfa_q_qe_init(&rx->qe);
1880 
1881 	list_for_each(qe, &rx_mod->rx_free_q) {
1882 		if (((struct bna_rx *)qe)->rid < rx->rid)
1883 			prev_qe = qe;
1884 		else
1885 			break;
1886 	}
1887 
1888 	if (prev_qe == NULL) {
1889 		/* This is the first entry */
1890 		bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
1891 	} else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
1892 		/* This is the last entry */
1893 		list_add_tail(&rx->qe, &rx_mod->rx_free_q);
1894 	} else {
1895 		/* Somewhere in the middle */
1896 		bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
1897 		bfa_q_prev(&rx->qe) = prev_qe;
1898 		bfa_q_next(prev_qe) = &rx->qe;
1899 		bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
1900 	}
1901 
1902 	rx_mod->rx_free_count++;
1903 }
1904 
1905 static void
1906 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
1907 		struct bna_rxq *q1)
1908 {
1909 	switch (rxp->type) {
1910 	case BNA_RXP_SINGLE:
1911 		rxp->rxq.single.only = q0;
1912 		rxp->rxq.single.reserved = NULL;
1913 		break;
1914 	case BNA_RXP_SLR:
1915 		rxp->rxq.slr.large = q0;
1916 		rxp->rxq.slr.small = q1;
1917 		break;
1918 	case BNA_RXP_HDS:
1919 		rxp->rxq.hds.data = q0;
1920 		rxp->rxq.hds.hdr = q1;
1921 		break;
1922 	default:
1923 		break;
1924 	}
1925 }
1926 
1927 static void
1928 bna_rxq_qpt_setup(struct bna_rxq *rxq,
1929 		struct bna_rxp *rxp,
1930 		u32 page_count,
1931 		u32 page_size,
1932 		struct bna_mem_descr *qpt_mem,
1933 		struct bna_mem_descr *swqpt_mem,
1934 		struct bna_mem_descr *page_mem)
1935 {
1936 	u8 *kva;
1937 	u64 dma;
1938 	struct bna_dma_addr bna_dma;
1939 	int	i;
1940 
1941 	rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1942 	rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1943 	rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
1944 	rxq->qpt.page_count = page_count;
1945 	rxq->qpt.page_size = page_size;
1946 
1947 	rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
1948 	rxq->rcb->sw_q = page_mem->kva;
1949 
1950 	kva = page_mem->kva;
1951 	BNA_GET_DMA_ADDR(&page_mem->dma, dma);
1952 
1953 	for (i = 0; i < rxq->qpt.page_count; i++) {
1954 		rxq->rcb->sw_qpt[i] = kva;
1955 		kva += PAGE_SIZE;
1956 
1957 		BNA_SET_DMA_ADDR(dma, &bna_dma);
1958 		((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
1959 			bna_dma.lsb;
1960 		((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
1961 			bna_dma.msb;
1962 		dma += PAGE_SIZE;
1963 	}
1964 }
1965 
1966 static void
1967 bna_rxp_cqpt_setup(struct bna_rxp *rxp,
1968 		u32 page_count,
1969 		u32 page_size,
1970 		struct bna_mem_descr *qpt_mem,
1971 		struct bna_mem_descr *swqpt_mem,
1972 		struct bna_mem_descr *page_mem)
1973 {
1974 	u8 *kva;
1975 	u64 dma;
1976 	struct bna_dma_addr bna_dma;
1977 	int	i;
1978 
1979 	rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1980 	rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1981 	rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
1982 	rxp->cq.qpt.page_count = page_count;
1983 	rxp->cq.qpt.page_size = page_size;
1984 
1985 	rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
1986 	rxp->cq.ccb->sw_q = page_mem->kva;
1987 
1988 	kva = page_mem->kva;
1989 	BNA_GET_DMA_ADDR(&page_mem->dma, dma);
1990 
1991 	for (i = 0; i < rxp->cq.qpt.page_count; i++) {
1992 		rxp->cq.ccb->sw_qpt[i] = kva;
1993 		kva += PAGE_SIZE;
1994 
1995 		BNA_SET_DMA_ADDR(dma, &bna_dma);
1996 		((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
1997 			bna_dma.lsb;
1998 		((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
1999 			bna_dma.msb;
2000 		dma += PAGE_SIZE;
2001 	}
2002 }
2003 
2004 static void
2005 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
2006 {
2007 	struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2008 
2009 	bfa_wc_down(&rx_mod->rx_stop_wc);
2010 }
2011 
2012 static void
2013 bna_rx_mod_cb_rx_stopped_all(void *arg)
2014 {
2015 	struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2016 
2017 	if (rx_mod->stop_cbfn)
2018 		rx_mod->stop_cbfn(&rx_mod->bna->enet);
2019 	rx_mod->stop_cbfn = NULL;
2020 }
2021 
2022 static void
2023 bna_rx_start(struct bna_rx *rx)
2024 {
2025 	rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2026 	if (rx->rx_flags & BNA_RX_F_ENABLED)
2027 		bfa_fsm_send_event(rx, RX_E_START);
2028 }
2029 
2030 static void
2031 bna_rx_stop(struct bna_rx *rx)
2032 {
2033 	rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2034 	if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2035 		bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
2036 	else {
2037 		rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2038 		rx->stop_cbarg = &rx->bna->rx_mod;
2039 		bfa_fsm_send_event(rx, RX_E_STOP);
2040 	}
2041 }
2042 
2043 static void
2044 bna_rx_fail(struct bna_rx *rx)
2045 {
2046 	/* Indicate Enet is not enabled, and failed */
2047 	rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2048 	bfa_fsm_send_event(rx, RX_E_FAIL);
2049 }
2050 
2051 void
2052 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2053 {
2054 	struct bna_rx *rx;
2055 	struct list_head *qe;
2056 
2057 	rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
2058 	if (type == BNA_RX_T_LOOPBACK)
2059 		rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
2060 
2061 	list_for_each(qe, &rx_mod->rx_active_q) {
2062 		rx = (struct bna_rx *)qe;
2063 		if (rx->type == type)
2064 			bna_rx_start(rx);
2065 	}
2066 }
2067 
2068 void
2069 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2070 {
2071 	struct bna_rx *rx;
2072 	struct list_head *qe;
2073 
2074 	rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2075 	rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2076 
2077 	rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
2078 
2079 	bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2080 
2081 	list_for_each(qe, &rx_mod->rx_active_q) {
2082 		rx = (struct bna_rx *)qe;
2083 		if (rx->type == type) {
2084 			bfa_wc_up(&rx_mod->rx_stop_wc);
2085 			bna_rx_stop(rx);
2086 		}
2087 	}
2088 
2089 	bfa_wc_wait(&rx_mod->rx_stop_wc);
2090 }
2091 
2092 void
2093 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2094 {
2095 	struct bna_rx *rx;
2096 	struct list_head *qe;
2097 
2098 	rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2099 	rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2100 
2101 	list_for_each(qe, &rx_mod->rx_active_q) {
2102 		rx = (struct bna_rx *)qe;
2103 		bna_rx_fail(rx);
2104 	}
2105 }
2106 
2107 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2108 			struct bna_res_info *res_info)
2109 {
2110 	int	index;
2111 	struct bna_rx *rx_ptr;
2112 	struct bna_rxp *rxp_ptr;
2113 	struct bna_rxq *rxq_ptr;
2114 
2115 	rx_mod->bna = bna;
2116 	rx_mod->flags = 0;
2117 
2118 	rx_mod->rx = (struct bna_rx *)
2119 		res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2120 	rx_mod->rxp = (struct bna_rxp *)
2121 		res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2122 	rx_mod->rxq = (struct bna_rxq *)
2123 		res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2124 
2125 	/* Initialize the queues */
2126 	INIT_LIST_HEAD(&rx_mod->rx_free_q);
2127 	rx_mod->rx_free_count = 0;
2128 	INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2129 	rx_mod->rxq_free_count = 0;
2130 	INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2131 	rx_mod->rxp_free_count = 0;
2132 	INIT_LIST_HEAD(&rx_mod->rx_active_q);
2133 
2134 	/* Build RX queues */
2135 	for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2136 		rx_ptr = &rx_mod->rx[index];
2137 
2138 		bfa_q_qe_init(&rx_ptr->qe);
2139 		INIT_LIST_HEAD(&rx_ptr->rxp_q);
2140 		rx_ptr->bna = NULL;
2141 		rx_ptr->rid = index;
2142 		rx_ptr->stop_cbfn = NULL;
2143 		rx_ptr->stop_cbarg = NULL;
2144 
2145 		list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2146 		rx_mod->rx_free_count++;
2147 	}
2148 
2149 	/* build RX-path queue */
2150 	for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2151 		rxp_ptr = &rx_mod->rxp[index];
2152 		bfa_q_qe_init(&rxp_ptr->qe);
2153 		list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2154 		rx_mod->rxp_free_count++;
2155 	}
2156 
2157 	/* build RXQ queue */
2158 	for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2159 		rxq_ptr = &rx_mod->rxq[index];
2160 		bfa_q_qe_init(&rxq_ptr->qe);
2161 		list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2162 		rx_mod->rxq_free_count++;
2163 	}
2164 }
2165 
2166 void
2167 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2168 {
2169 	struct list_head		*qe;
2170 	int i;
2171 
2172 	i = 0;
2173 	list_for_each(qe, &rx_mod->rx_free_q)
2174 		i++;
2175 
2176 	i = 0;
2177 	list_for_each(qe, &rx_mod->rxp_free_q)
2178 		i++;
2179 
2180 	i = 0;
2181 	list_for_each(qe, &rx_mod->rxq_free_q)
2182 		i++;
2183 
2184 	rx_mod->bna = NULL;
2185 }
2186 
2187 void
2188 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2189 {
2190 	struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2191 	struct bna_rxp *rxp = NULL;
2192 	struct bna_rxq *q0 = NULL, *q1 = NULL;
2193 	struct list_head *rxp_qe;
2194 	int i;
2195 
2196 	bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2197 		sizeof(struct bfi_enet_rx_cfg_rsp));
2198 
2199 	rx->hw_id = cfg_rsp->hw_id;
2200 
2201 	for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
2202 		i < rx->num_paths;
2203 		i++, rxp_qe = bfa_q_next(rxp_qe)) {
2204 		rxp = (struct bna_rxp *)rxp_qe;
2205 		GET_RXQS(rxp, q0, q1);
2206 
2207 		/* Setup doorbells */
2208 		rxp->cq.ccb->i_dbell->doorbell_addr =
2209 			rx->bna->pcidev.pci_bar_kva
2210 			+ ntohl(cfg_rsp->q_handles[i].i_dbell);
2211 		rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2212 		q0->rcb->q_dbell =
2213 			rx->bna->pcidev.pci_bar_kva
2214 			+ ntohl(cfg_rsp->q_handles[i].ql_dbell);
2215 		q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2216 		if (q1) {
2217 			q1->rcb->q_dbell =
2218 			rx->bna->pcidev.pci_bar_kva
2219 			+ ntohl(cfg_rsp->q_handles[i].qs_dbell);
2220 			q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2221 		}
2222 
2223 		/* Initialize producer/consumer indexes */
2224 		(*rxp->cq.ccb->hw_producer_index) = 0;
2225 		rxp->cq.ccb->producer_index = 0;
2226 		q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2227 		if (q1)
2228 			q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2229 	}
2230 
2231 	bfa_fsm_send_event(rx, RX_E_STARTED);
2232 }
2233 
2234 void
2235 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2236 {
2237 	bfa_fsm_send_event(rx, RX_E_STOPPED);
2238 }
2239 
2240 void
2241 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2242 {
2243 	u32 cq_size, hq_size, dq_size;
2244 	u32 cpage_count, hpage_count, dpage_count;
2245 	struct bna_mem_info *mem_info;
2246 	u32 cq_depth;
2247 	u32 hq_depth;
2248 	u32 dq_depth;
2249 
2250 	dq_depth = q_cfg->q_depth;
2251 	hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
2252 	cq_depth = dq_depth + hq_depth;
2253 
2254 	BNA_TO_POWER_OF_2_HIGH(cq_depth);
2255 	cq_size = cq_depth * BFI_CQ_WI_SIZE;
2256 	cq_size = ALIGN(cq_size, PAGE_SIZE);
2257 	cpage_count = SIZE_TO_PAGES(cq_size);
2258 
2259 	BNA_TO_POWER_OF_2_HIGH(dq_depth);
2260 	dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2261 	dq_size = ALIGN(dq_size, PAGE_SIZE);
2262 	dpage_count = SIZE_TO_PAGES(dq_size);
2263 
2264 	if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2265 		BNA_TO_POWER_OF_2_HIGH(hq_depth);
2266 		hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2267 		hq_size = ALIGN(hq_size, PAGE_SIZE);
2268 		hpage_count = SIZE_TO_PAGES(hq_size);
2269 	} else
2270 		hpage_count = 0;
2271 
2272 	res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2273 	mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2274 	mem_info->mem_type = BNA_MEM_T_KVA;
2275 	mem_info->len = sizeof(struct bna_ccb);
2276 	mem_info->num = q_cfg->num_paths;
2277 
2278 	res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2279 	mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2280 	mem_info->mem_type = BNA_MEM_T_KVA;
2281 	mem_info->len = sizeof(struct bna_rcb);
2282 	mem_info->num = BNA_GET_RXQS(q_cfg);
2283 
2284 	res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2285 	mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2286 	mem_info->mem_type = BNA_MEM_T_DMA;
2287 	mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2288 	mem_info->num = q_cfg->num_paths;
2289 
2290 	res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2291 	mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2292 	mem_info->mem_type = BNA_MEM_T_KVA;
2293 	mem_info->len = cpage_count * sizeof(void *);
2294 	mem_info->num = q_cfg->num_paths;
2295 
2296 	res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2297 	mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2298 	mem_info->mem_type = BNA_MEM_T_DMA;
2299 	mem_info->len = PAGE_SIZE * cpage_count;
2300 	mem_info->num = q_cfg->num_paths;
2301 
2302 	res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2303 	mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2304 	mem_info->mem_type = BNA_MEM_T_DMA;
2305 	mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2306 	mem_info->num = q_cfg->num_paths;
2307 
2308 	res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2309 	mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2310 	mem_info->mem_type = BNA_MEM_T_KVA;
2311 	mem_info->len = dpage_count * sizeof(void *);
2312 	mem_info->num = q_cfg->num_paths;
2313 
2314 	res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2315 	mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2316 	mem_info->mem_type = BNA_MEM_T_DMA;
2317 	mem_info->len = PAGE_SIZE * dpage_count;
2318 	mem_info->num = q_cfg->num_paths;
2319 
2320 	res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2321 	mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2322 	mem_info->mem_type = BNA_MEM_T_DMA;
2323 	mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2324 	mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2325 
2326 	res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2327 	mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2328 	mem_info->mem_type = BNA_MEM_T_KVA;
2329 	mem_info->len = hpage_count * sizeof(void *);
2330 	mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2331 
2332 	res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2333 	mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2334 	mem_info->mem_type = BNA_MEM_T_DMA;
2335 	mem_info->len = PAGE_SIZE * hpage_count;
2336 	mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2337 
2338 	res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2339 	mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2340 	mem_info->mem_type = BNA_MEM_T_DMA;
2341 	mem_info->len = BFI_IBIDX_SIZE;
2342 	mem_info->num = q_cfg->num_paths;
2343 
2344 	res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2345 	mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2346 	mem_info->mem_type = BNA_MEM_T_KVA;
2347 	mem_info->len = BFI_ENET_RSS_RIT_MAX;
2348 	mem_info->num = 1;
2349 
2350 	res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2351 	res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2352 	res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2353 }
2354 
2355 struct bna_rx *
2356 bna_rx_create(struct bna *bna, struct bnad *bnad,
2357 		struct bna_rx_config *rx_cfg,
2358 		const struct bna_rx_event_cbfn *rx_cbfn,
2359 		struct bna_res_info *res_info,
2360 		void *priv)
2361 {
2362 	struct bna_rx_mod *rx_mod = &bna->rx_mod;
2363 	struct bna_rx *rx;
2364 	struct bna_rxp *rxp;
2365 	struct bna_rxq *q0;
2366 	struct bna_rxq *q1;
2367 	struct bna_intr_info *intr_info;
2368 	u32 page_count;
2369 	struct bna_mem_descr *ccb_mem;
2370 	struct bna_mem_descr *rcb_mem;
2371 	struct bna_mem_descr *unmapq_mem;
2372 	struct bna_mem_descr *cqpt_mem;
2373 	struct bna_mem_descr *cswqpt_mem;
2374 	struct bna_mem_descr *cpage_mem;
2375 	struct bna_mem_descr *hqpt_mem;
2376 	struct bna_mem_descr *dqpt_mem;
2377 	struct bna_mem_descr *hsqpt_mem;
2378 	struct bna_mem_descr *dsqpt_mem;
2379 	struct bna_mem_descr *hpage_mem;
2380 	struct bna_mem_descr *dpage_mem;
2381 	int i;
2382 	int dpage_count, hpage_count, rcb_idx;
2383 
2384 	if (!bna_rx_res_check(rx_mod, rx_cfg))
2385 		return NULL;
2386 
2387 	intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2388 	ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2389 	rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2390 	unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
2391 	cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2392 	cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2393 	cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2394 	hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2395 	dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2396 	hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2397 	dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2398 	hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2399 	dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2400 
2401 	page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2402 			PAGE_SIZE;
2403 
2404 	dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2405 			PAGE_SIZE;
2406 
2407 	hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2408 			PAGE_SIZE;
2409 
2410 	rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2411 	rx->bna = bna;
2412 	rx->rx_flags = 0;
2413 	INIT_LIST_HEAD(&rx->rxp_q);
2414 	rx->stop_cbfn = NULL;
2415 	rx->stop_cbarg = NULL;
2416 	rx->priv = priv;
2417 
2418 	rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2419 	rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2420 	rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2421 	rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2422 	rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2423 	/* Following callbacks are mandatory */
2424 	rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2425 	rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2426 
2427 	if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2428 		switch (rx->type) {
2429 		case BNA_RX_T_REGULAR:
2430 			if (!(rx->bna->rx_mod.flags &
2431 				BNA_RX_MOD_F_ENET_LOOPBACK))
2432 				rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2433 			break;
2434 		case BNA_RX_T_LOOPBACK:
2435 			if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2436 				rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2437 			break;
2438 		}
2439 	}
2440 
2441 	rx->num_paths = rx_cfg->num_paths;
2442 	for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) {
2443 		rxp = bna_rxp_get(rx_mod);
2444 		list_add_tail(&rxp->qe, &rx->rxp_q);
2445 		rxp->type = rx_cfg->rxp_type;
2446 		rxp->rx = rx;
2447 		rxp->cq.rx = rx;
2448 
2449 		q0 = bna_rxq_get(rx_mod);
2450 		if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2451 			q1 = NULL;
2452 		else
2453 			q1 = bna_rxq_get(rx_mod);
2454 
2455 		if (1 == intr_info->num)
2456 			rxp->vector = intr_info->idl[0].vector;
2457 		else
2458 			rxp->vector = intr_info->idl[i].vector;
2459 
2460 		/* Setup IB */
2461 
2462 		rxp->cq.ib.ib_seg_host_addr.lsb =
2463 		res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2464 		rxp->cq.ib.ib_seg_host_addr.msb =
2465 		res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2466 		rxp->cq.ib.ib_seg_host_addr_kva =
2467 		res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2468 		rxp->cq.ib.intr_type = intr_info->intr_type;
2469 		if (intr_info->intr_type == BNA_INTR_T_MSIX)
2470 			rxp->cq.ib.intr_vector = rxp->vector;
2471 		else
2472 			rxp->cq.ib.intr_vector = (1 << rxp->vector);
2473 		rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2474 		rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2475 		rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2476 
2477 		bna_rxp_add_rxqs(rxp, q0, q1);
2478 
2479 		/* Setup large Q */
2480 
2481 		q0->rx = rx;
2482 		q0->rxp = rxp;
2483 
2484 		q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2485 		q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
2486 		rcb_idx++;
2487 		q0->rcb->q_depth = rx_cfg->q_depth;
2488 		q0->rcb->rxq = q0;
2489 		q0->rcb->bnad = bna->bnad;
2490 		q0->rcb->id = 0;
2491 		q0->rx_packets = q0->rx_bytes = 0;
2492 		q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2493 
2494 		bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2495 			&dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
2496 
2497 		if (rx->rcb_setup_cbfn)
2498 			rx->rcb_setup_cbfn(bnad, q0->rcb);
2499 
2500 		/* Setup small Q */
2501 
2502 		if (q1) {
2503 			q1->rx = rx;
2504 			q1->rxp = rxp;
2505 
2506 			q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2507 			q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
2508 			rcb_idx++;
2509 			q1->rcb->q_depth = rx_cfg->q_depth;
2510 			q1->rcb->rxq = q1;
2511 			q1->rcb->bnad = bna->bnad;
2512 			q1->rcb->id = 1;
2513 			q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2514 					rx_cfg->hds_config.forced_offset
2515 					: rx_cfg->small_buff_size;
2516 			q1->rx_packets = q1->rx_bytes = 0;
2517 			q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2518 
2519 			bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2520 				&hqpt_mem[i], &hsqpt_mem[i],
2521 				&hpage_mem[i]);
2522 
2523 			if (rx->rcb_setup_cbfn)
2524 				rx->rcb_setup_cbfn(bnad, q1->rcb);
2525 		}
2526 
2527 		/* Setup CQ */
2528 
2529 		rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2530 		rxp->cq.ccb->q_depth =	rx_cfg->q_depth +
2531 					((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2532 					0 : rx_cfg->q_depth);
2533 		rxp->cq.ccb->cq = &rxp->cq;
2534 		rxp->cq.ccb->rcb[0] = q0->rcb;
2535 		q0->rcb->ccb = rxp->cq.ccb;
2536 		if (q1) {
2537 			rxp->cq.ccb->rcb[1] = q1->rcb;
2538 			q1->rcb->ccb = rxp->cq.ccb;
2539 		}
2540 		rxp->cq.ccb->hw_producer_index =
2541 			(u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2542 		rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2543 		rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2544 		rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2545 		rxp->cq.ccb->rx_coalescing_timeo =
2546 			rxp->cq.ib.coalescing_timeo;
2547 		rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2548 		rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2549 		rxp->cq.ccb->bnad = bna->bnad;
2550 		rxp->cq.ccb->id = i;
2551 
2552 		bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2553 			&cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
2554 
2555 		if (rx->ccb_setup_cbfn)
2556 			rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2557 	}
2558 
2559 	rx->hds_cfg = rx_cfg->hds_config;
2560 
2561 	bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2562 
2563 	bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2564 
2565 	rx_mod->rid_mask |= (1 << rx->rid);
2566 
2567 	return rx;
2568 }
2569 
2570 void
2571 bna_rx_destroy(struct bna_rx *rx)
2572 {
2573 	struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2574 	struct bna_rxq *q0 = NULL;
2575 	struct bna_rxq *q1 = NULL;
2576 	struct bna_rxp *rxp;
2577 	struct list_head *qe;
2578 
2579 	bna_rxf_uninit(&rx->rxf);
2580 
2581 	while (!list_empty(&rx->rxp_q)) {
2582 		bfa_q_deq(&rx->rxp_q, &rxp);
2583 		GET_RXQS(rxp, q0, q1);
2584 		if (rx->rcb_destroy_cbfn)
2585 			rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2586 		q0->rcb = NULL;
2587 		q0->rxp = NULL;
2588 		q0->rx = NULL;
2589 		bna_rxq_put(rx_mod, q0);
2590 
2591 		if (q1) {
2592 			if (rx->rcb_destroy_cbfn)
2593 				rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2594 			q1->rcb = NULL;
2595 			q1->rxp = NULL;
2596 			q1->rx = NULL;
2597 			bna_rxq_put(rx_mod, q1);
2598 		}
2599 		rxp->rxq.slr.large = NULL;
2600 		rxp->rxq.slr.small = NULL;
2601 
2602 		if (rx->ccb_destroy_cbfn)
2603 			rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2604 		rxp->cq.ccb = NULL;
2605 		rxp->rx = NULL;
2606 		bna_rxp_put(rx_mod, rxp);
2607 	}
2608 
2609 	list_for_each(qe, &rx_mod->rx_active_q) {
2610 		if (qe == &rx->qe) {
2611 			list_del(&rx->qe);
2612 			bfa_q_qe_init(&rx->qe);
2613 			break;
2614 		}
2615 	}
2616 
2617 	rx_mod->rid_mask &= ~(1 << rx->rid);
2618 
2619 	rx->bna = NULL;
2620 	rx->priv = NULL;
2621 	bna_rx_put(rx_mod, rx);
2622 }
2623 
2624 void
2625 bna_rx_enable(struct bna_rx *rx)
2626 {
2627 	if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2628 		return;
2629 
2630 	rx->rx_flags |= BNA_RX_F_ENABLED;
2631 	if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2632 		bfa_fsm_send_event(rx, RX_E_START);
2633 }
2634 
2635 void
2636 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2637 		void (*cbfn)(void *, struct bna_rx *))
2638 {
2639 	if (type == BNA_SOFT_CLEANUP) {
2640 		/* h/w should not be accessed. Treat we're stopped */
2641 		(*cbfn)(rx->bna->bnad, rx);
2642 	} else {
2643 		rx->stop_cbfn = cbfn;
2644 		rx->stop_cbarg = rx->bna->bnad;
2645 
2646 		rx->rx_flags &= ~BNA_RX_F_ENABLED;
2647 
2648 		bfa_fsm_send_event(rx, RX_E_STOP);
2649 	}
2650 }
2651 
2652 void
2653 bna_rx_cleanup_complete(struct bna_rx *rx)
2654 {
2655 	bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2656 }
2657 
2658 enum bna_cb_status
2659 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2660 		enum bna_rxmode bitmask,
2661 		void (*cbfn)(struct bnad *, struct bna_rx *))
2662 {
2663 	struct bna_rxf *rxf = &rx->rxf;
2664 	int need_hw_config = 0;
2665 
2666 	/* Error checks */
2667 
2668 	if (is_promisc_enable(new_mode, bitmask)) {
2669 		/* If promisc mode is already enabled elsewhere in the system */
2670 		if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2671 			(rx->bna->promisc_rid != rxf->rx->rid))
2672 			goto err_return;
2673 
2674 		/* If default mode is already enabled in the system */
2675 		if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2676 			goto err_return;
2677 
2678 		/* Trying to enable promiscuous and default mode together */
2679 		if (is_default_enable(new_mode, bitmask))
2680 			goto err_return;
2681 	}
2682 
2683 	if (is_default_enable(new_mode, bitmask)) {
2684 		/* If default mode is already enabled elsewhere in the system */
2685 		if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2686 			(rx->bna->default_mode_rid != rxf->rx->rid)) {
2687 				goto err_return;
2688 		}
2689 
2690 		/* If promiscuous mode is already enabled in the system */
2691 		if (rx->bna->promisc_rid != BFI_INVALID_RID)
2692 			goto err_return;
2693 	}
2694 
2695 	/* Process the commands */
2696 
2697 	if (is_promisc_enable(new_mode, bitmask)) {
2698 		if (bna_rxf_promisc_enable(rxf))
2699 			need_hw_config = 1;
2700 	} else if (is_promisc_disable(new_mode, bitmask)) {
2701 		if (bna_rxf_promisc_disable(rxf))
2702 			need_hw_config = 1;
2703 	}
2704 
2705 	if (is_allmulti_enable(new_mode, bitmask)) {
2706 		if (bna_rxf_allmulti_enable(rxf))
2707 			need_hw_config = 1;
2708 	} else if (is_allmulti_disable(new_mode, bitmask)) {
2709 		if (bna_rxf_allmulti_disable(rxf))
2710 			need_hw_config = 1;
2711 	}
2712 
2713 	/* Trigger h/w if needed */
2714 
2715 	if (need_hw_config) {
2716 		rxf->cam_fltr_cbfn = cbfn;
2717 		rxf->cam_fltr_cbarg = rx->bna->bnad;
2718 		bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2719 	} else if (cbfn)
2720 		(*cbfn)(rx->bna->bnad, rx);
2721 
2722 	return BNA_CB_SUCCESS;
2723 
2724 err_return:
2725 	return BNA_CB_FAIL;
2726 }
2727 
2728 void
2729 bna_rx_vlanfilter_enable(struct bna_rx *rx)
2730 {
2731 	struct bna_rxf *rxf = &rx->rxf;
2732 
2733 	if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2734 		rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2735 		rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2736 		bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2737 	}
2738 }
2739 
2740 void
2741 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2742 {
2743 	struct bna_rxp *rxp;
2744 	struct list_head *qe;
2745 
2746 	list_for_each(qe, &rx->rxp_q) {
2747 		rxp = (struct bna_rxp *)qe;
2748 		rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2749 		bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2750 	}
2751 }
2752 
2753 void
2754 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2755 {
2756 	int i, j;
2757 
2758 	for (i = 0; i < BNA_LOAD_T_MAX; i++)
2759 		for (j = 0; j < BNA_BIAS_T_MAX; j++)
2760 			bna->rx_mod.dim_vector[i][j] = vector[i][j];
2761 }
2762 
2763 void
2764 bna_rx_dim_update(struct bna_ccb *ccb)
2765 {
2766 	struct bna *bna = ccb->cq->rx->bna;
2767 	u32 load, bias;
2768 	u32 pkt_rt, small_rt, large_rt;
2769 	u8 coalescing_timeo;
2770 
2771 	if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2772 		(ccb->pkt_rate.large_pkt_cnt == 0))
2773 		return;
2774 
2775 	/* Arrive at preconfigured coalescing timeo value based on pkt rate */
2776 
2777 	small_rt = ccb->pkt_rate.small_pkt_cnt;
2778 	large_rt = ccb->pkt_rate.large_pkt_cnt;
2779 
2780 	pkt_rt = small_rt + large_rt;
2781 
2782 	if (pkt_rt < BNA_PKT_RATE_10K)
2783 		load = BNA_LOAD_T_LOW_4;
2784 	else if (pkt_rt < BNA_PKT_RATE_20K)
2785 		load = BNA_LOAD_T_LOW_3;
2786 	else if (pkt_rt < BNA_PKT_RATE_30K)
2787 		load = BNA_LOAD_T_LOW_2;
2788 	else if (pkt_rt < BNA_PKT_RATE_40K)
2789 		load = BNA_LOAD_T_LOW_1;
2790 	else if (pkt_rt < BNA_PKT_RATE_50K)
2791 		load = BNA_LOAD_T_HIGH_1;
2792 	else if (pkt_rt < BNA_PKT_RATE_60K)
2793 		load = BNA_LOAD_T_HIGH_2;
2794 	else if (pkt_rt < BNA_PKT_RATE_80K)
2795 		load = BNA_LOAD_T_HIGH_3;
2796 	else
2797 		load = BNA_LOAD_T_HIGH_4;
2798 
2799 	if (small_rt > (large_rt << 1))
2800 		bias = 0;
2801 	else
2802 		bias = 1;
2803 
2804 	ccb->pkt_rate.small_pkt_cnt = 0;
2805 	ccb->pkt_rate.large_pkt_cnt = 0;
2806 
2807 	coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2808 	ccb->rx_coalescing_timeo = coalescing_timeo;
2809 
2810 	/* Set it to IB */
2811 	bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
2812 }
2813 
2814 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
2815 	{12, 12},
2816 	{6, 10},
2817 	{5, 10},
2818 	{4, 8},
2819 	{3, 6},
2820 	{3, 6},
2821 	{2, 4},
2822 	{1, 2},
2823 };
2824 
2825 /* TX */
2826 
2827 #define call_tx_stop_cbfn(tx)						\
2828 do {									\
2829 	if ((tx)->stop_cbfn) {						\
2830 		void (*cbfn)(void *, struct bna_tx *);		\
2831 		void *cbarg;						\
2832 		cbfn = (tx)->stop_cbfn;					\
2833 		cbarg = (tx)->stop_cbarg;				\
2834 		(tx)->stop_cbfn = NULL;					\
2835 		(tx)->stop_cbarg = NULL;				\
2836 		cbfn(cbarg, (tx));					\
2837 	}								\
2838 } while (0)
2839 
2840 #define call_tx_prio_change_cbfn(tx)					\
2841 do {									\
2842 	if ((tx)->prio_change_cbfn) {					\
2843 		void (*cbfn)(struct bnad *, struct bna_tx *);	\
2844 		cbfn = (tx)->prio_change_cbfn;				\
2845 		(tx)->prio_change_cbfn = NULL;				\
2846 		cbfn((tx)->bna->bnad, (tx));				\
2847 	}								\
2848 } while (0)
2849 
2850 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
2851 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
2852 static void bna_tx_enet_stop(struct bna_tx *tx);
2853 
2854 enum bna_tx_event {
2855 	TX_E_START			= 1,
2856 	TX_E_STOP			= 2,
2857 	TX_E_FAIL			= 3,
2858 	TX_E_STARTED			= 4,
2859 	TX_E_STOPPED			= 5,
2860 	TX_E_PRIO_CHANGE		= 6,
2861 	TX_E_CLEANUP_DONE		= 7,
2862 	TX_E_BW_UPDATE			= 8,
2863 };
2864 
2865 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
2866 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
2867 bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
2868 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
2869 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
2870 			enum bna_tx_event);
2871 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
2872 			enum bna_tx_event);
2873 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
2874 			enum bna_tx_event);
2875 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
2876 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
2877 			enum bna_tx_event);
2878 
2879 static void
2880 bna_tx_sm_stopped_entry(struct bna_tx *tx)
2881 {
2882 	call_tx_stop_cbfn(tx);
2883 }
2884 
2885 static void
2886 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
2887 {
2888 	switch (event) {
2889 	case TX_E_START:
2890 		bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
2891 		break;
2892 
2893 	case TX_E_STOP:
2894 		call_tx_stop_cbfn(tx);
2895 		break;
2896 
2897 	case TX_E_FAIL:
2898 		/* No-op */
2899 		break;
2900 
2901 	case TX_E_PRIO_CHANGE:
2902 		call_tx_prio_change_cbfn(tx);
2903 		break;
2904 
2905 	case TX_E_BW_UPDATE:
2906 		/* No-op */
2907 		break;
2908 
2909 	default:
2910 		bfa_sm_fault(event);
2911 	}
2912 }
2913 
2914 static void
2915 bna_tx_sm_start_wait_entry(struct bna_tx *tx)
2916 {
2917 	bna_bfi_tx_enet_start(tx);
2918 }
2919 
2920 static void
2921 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
2922 {
2923 	switch (event) {
2924 	case TX_E_STOP:
2925 		tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
2926 		bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2927 		break;
2928 
2929 	case TX_E_FAIL:
2930 		tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
2931 		bfa_fsm_set_state(tx, bna_tx_sm_stopped);
2932 		break;
2933 
2934 	case TX_E_STARTED:
2935 		if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
2936 			tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
2937 				BNA_TX_F_BW_UPDATED);
2938 			bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2939 		} else
2940 			bfa_fsm_set_state(tx, bna_tx_sm_started);
2941 		break;
2942 
2943 	case TX_E_PRIO_CHANGE:
2944 		tx->flags |=  BNA_TX_F_PRIO_CHANGED;
2945 		break;
2946 
2947 	case TX_E_BW_UPDATE:
2948 		tx->flags |= BNA_TX_F_BW_UPDATED;
2949 		break;
2950 
2951 	default:
2952 		bfa_sm_fault(event);
2953 	}
2954 }
2955 
2956 static void
2957 bna_tx_sm_started_entry(struct bna_tx *tx)
2958 {
2959 	struct bna_txq *txq;
2960 	struct list_head		 *qe;
2961 	int is_regular = (tx->type == BNA_TX_T_REGULAR);
2962 
2963 	list_for_each(qe, &tx->txq_q) {
2964 		txq = (struct bna_txq *)qe;
2965 		txq->tcb->priority = txq->priority;
2966 		/* Start IB */
2967 		bna_ib_start(tx->bna, &txq->ib, is_regular);
2968 	}
2969 	tx->tx_resume_cbfn(tx->bna->bnad, tx);
2970 }
2971 
2972 static void
2973 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
2974 {
2975 	switch (event) {
2976 	case TX_E_STOP:
2977 		bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2978 		tx->tx_stall_cbfn(tx->bna->bnad, tx);
2979 		bna_tx_enet_stop(tx);
2980 		break;
2981 
2982 	case TX_E_FAIL:
2983 		bfa_fsm_set_state(tx, bna_tx_sm_failed);
2984 		tx->tx_stall_cbfn(tx->bna->bnad, tx);
2985 		tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
2986 		break;
2987 
2988 	case TX_E_PRIO_CHANGE:
2989 	case TX_E_BW_UPDATE:
2990 		bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2991 		break;
2992 
2993 	default:
2994 		bfa_sm_fault(event);
2995 	}
2996 }
2997 
2998 static void
2999 bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
3000 {
3001 }
3002 
3003 static void
3004 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3005 {
3006 	switch (event) {
3007 	case TX_E_FAIL:
3008 	case TX_E_STOPPED:
3009 		bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3010 		tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3011 		break;
3012 
3013 	case TX_E_STARTED:
3014 		/**
3015 		 * We are here due to start_wait -> stop_wait transition on
3016 		 * TX_E_STOP event
3017 		 */
3018 		bna_tx_enet_stop(tx);
3019 		break;
3020 
3021 	case TX_E_PRIO_CHANGE:
3022 	case TX_E_BW_UPDATE:
3023 		/* No-op */
3024 		break;
3025 
3026 	default:
3027 		bfa_sm_fault(event);
3028 	}
3029 }
3030 
3031 static void
3032 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
3033 {
3034 }
3035 
3036 static void
3037 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3038 {
3039 	switch (event) {
3040 	case TX_E_FAIL:
3041 	case TX_E_PRIO_CHANGE:
3042 	case TX_E_BW_UPDATE:
3043 		/* No-op */
3044 		break;
3045 
3046 	case TX_E_CLEANUP_DONE:
3047 		bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3048 		break;
3049 
3050 	default:
3051 		bfa_sm_fault(event);
3052 	}
3053 }
3054 
3055 static void
3056 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3057 {
3058 	tx->tx_stall_cbfn(tx->bna->bnad, tx);
3059 	bna_tx_enet_stop(tx);
3060 }
3061 
3062 static void
3063 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3064 {
3065 	switch (event) {
3066 	case TX_E_STOP:
3067 		bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3068 		break;
3069 
3070 	case TX_E_FAIL:
3071 		bfa_fsm_set_state(tx, bna_tx_sm_failed);
3072 		call_tx_prio_change_cbfn(tx);
3073 		tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3074 		break;
3075 
3076 	case TX_E_STOPPED:
3077 		bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
3078 		break;
3079 
3080 	case TX_E_PRIO_CHANGE:
3081 	case TX_E_BW_UPDATE:
3082 		/* No-op */
3083 		break;
3084 
3085 	default:
3086 		bfa_sm_fault(event);
3087 	}
3088 }
3089 
3090 static void
3091 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3092 {
3093 	call_tx_prio_change_cbfn(tx);
3094 	tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3095 }
3096 
3097 static void
3098 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3099 {
3100 	switch (event) {
3101 	case TX_E_STOP:
3102 		bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3103 		break;
3104 
3105 	case TX_E_FAIL:
3106 		bfa_fsm_set_state(tx, bna_tx_sm_failed);
3107 		break;
3108 
3109 	case TX_E_PRIO_CHANGE:
3110 	case TX_E_BW_UPDATE:
3111 		/* No-op */
3112 		break;
3113 
3114 	case TX_E_CLEANUP_DONE:
3115 		bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3116 		break;
3117 
3118 	default:
3119 		bfa_sm_fault(event);
3120 	}
3121 }
3122 
3123 static void
3124 bna_tx_sm_failed_entry(struct bna_tx *tx)
3125 {
3126 }
3127 
3128 static void
3129 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3130 {
3131 	switch (event) {
3132 	case TX_E_START:
3133 		bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3134 		break;
3135 
3136 	case TX_E_STOP:
3137 		bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3138 		break;
3139 
3140 	case TX_E_FAIL:
3141 		/* No-op */
3142 		break;
3143 
3144 	case TX_E_CLEANUP_DONE:
3145 		bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3146 		break;
3147 
3148 	default:
3149 		bfa_sm_fault(event);
3150 	}
3151 }
3152 
3153 static void
3154 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3155 {
3156 }
3157 
3158 static void
3159 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3160 {
3161 	switch (event) {
3162 	case TX_E_STOP:
3163 		bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3164 		break;
3165 
3166 	case TX_E_FAIL:
3167 		bfa_fsm_set_state(tx, bna_tx_sm_failed);
3168 		break;
3169 
3170 	case TX_E_CLEANUP_DONE:
3171 		bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3172 		break;
3173 
3174 	case TX_E_BW_UPDATE:
3175 		/* No-op */
3176 		break;
3177 
3178 	default:
3179 		bfa_sm_fault(event);
3180 	}
3181 }
3182 
3183 static void
3184 bna_bfi_tx_enet_start(struct bna_tx *tx)
3185 {
3186 	struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3187 	struct bna_txq *txq = NULL;
3188 	struct list_head *qe;
3189 	int i;
3190 
3191 	bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3192 		BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3193 	cfg_req->mh.num_entries = htons(
3194 		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3195 
3196 	cfg_req->num_queues = tx->num_txq;
3197 	for (i = 0, qe = bfa_q_first(&tx->txq_q);
3198 		i < tx->num_txq;
3199 		i++, qe = bfa_q_next(qe)) {
3200 		txq = (struct bna_txq *)qe;
3201 
3202 		bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3203 		cfg_req->q_cfg[i].q.priority = txq->priority;
3204 
3205 		cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3206 			txq->ib.ib_seg_host_addr.lsb;
3207 		cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3208 			txq->ib.ib_seg_host_addr.msb;
3209 		cfg_req->q_cfg[i].ib.intr.msix_index =
3210 			htons((u16)txq->ib.intr_vector);
3211 	}
3212 
3213 	cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3214 	cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3215 	cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3216 	cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3217 	cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3218 				? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3219 	cfg_req->ib_cfg.coalescing_timeout =
3220 			htonl((u32)txq->ib.coalescing_timeo);
3221 	cfg_req->ib_cfg.inter_pkt_timeout =
3222 			htonl((u32)txq->ib.interpkt_timeo);
3223 	cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3224 
3225 	cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3226 	cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3227 	cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED;
3228 	cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3229 
3230 	bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3231 		sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3232 	bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3233 }
3234 
3235 static void
3236 bna_bfi_tx_enet_stop(struct bna_tx *tx)
3237 {
3238 	struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3239 
3240 	bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3241 		BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3242 	req->mh.num_entries = htons(
3243 		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3244 	bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3245 		&req->mh);
3246 	bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3247 }
3248 
3249 static void
3250 bna_tx_enet_stop(struct bna_tx *tx)
3251 {
3252 	struct bna_txq *txq;
3253 	struct list_head		 *qe;
3254 
3255 	/* Stop IB */
3256 	list_for_each(qe, &tx->txq_q) {
3257 		txq = (struct bna_txq *)qe;
3258 		bna_ib_stop(tx->bna, &txq->ib);
3259 	}
3260 
3261 	bna_bfi_tx_enet_stop(tx);
3262 }
3263 
3264 static void
3265 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3266 		struct bna_mem_descr *qpt_mem,
3267 		struct bna_mem_descr *swqpt_mem,
3268 		struct bna_mem_descr *page_mem)
3269 {
3270 	u8 *kva;
3271 	u64 dma;
3272 	struct bna_dma_addr bna_dma;
3273 	int i;
3274 
3275 	txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3276 	txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3277 	txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3278 	txq->qpt.page_count = page_count;
3279 	txq->qpt.page_size = page_size;
3280 
3281 	txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3282 	txq->tcb->sw_q = page_mem->kva;
3283 
3284 	kva = page_mem->kva;
3285 	BNA_GET_DMA_ADDR(&page_mem->dma, dma);
3286 
3287 	for (i = 0; i < page_count; i++) {
3288 		txq->tcb->sw_qpt[i] = kva;
3289 		kva += PAGE_SIZE;
3290 
3291 		BNA_SET_DMA_ADDR(dma, &bna_dma);
3292 		((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3293 			bna_dma.lsb;
3294 		((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3295 			bna_dma.msb;
3296 		dma += PAGE_SIZE;
3297 	}
3298 }
3299 
3300 static struct bna_tx *
3301 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3302 {
3303 	struct list_head	*qe = NULL;
3304 	struct bna_tx *tx = NULL;
3305 
3306 	if (list_empty(&tx_mod->tx_free_q))
3307 		return NULL;
3308 	if (type == BNA_TX_T_REGULAR) {
3309 		bfa_q_deq(&tx_mod->tx_free_q, &qe);
3310 	} else {
3311 		bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
3312 	}
3313 	tx = (struct bna_tx *)qe;
3314 	bfa_q_qe_init(&tx->qe);
3315 	tx->type = type;
3316 
3317 	return tx;
3318 }
3319 
3320 static void
3321 bna_tx_free(struct bna_tx *tx)
3322 {
3323 	struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3324 	struct bna_txq *txq;
3325 	struct list_head *prev_qe;
3326 	struct list_head *qe;
3327 
3328 	while (!list_empty(&tx->txq_q)) {
3329 		bfa_q_deq(&tx->txq_q, &txq);
3330 		bfa_q_qe_init(&txq->qe);
3331 		txq->tcb = NULL;
3332 		txq->tx = NULL;
3333 		list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3334 	}
3335 
3336 	list_for_each(qe, &tx_mod->tx_active_q) {
3337 		if (qe == &tx->qe) {
3338 			list_del(&tx->qe);
3339 			bfa_q_qe_init(&tx->qe);
3340 			break;
3341 		}
3342 	}
3343 
3344 	tx->bna = NULL;
3345 	tx->priv = NULL;
3346 
3347 	prev_qe = NULL;
3348 	list_for_each(qe, &tx_mod->tx_free_q) {
3349 		if (((struct bna_tx *)qe)->rid < tx->rid)
3350 			prev_qe = qe;
3351 		else {
3352 			break;
3353 		}
3354 	}
3355 
3356 	if (prev_qe == NULL) {
3357 		/* This is the first entry */
3358 		bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
3359 	} else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
3360 		/* This is the last entry */
3361 		list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3362 	} else {
3363 		/* Somewhere in the middle */
3364 		bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
3365 		bfa_q_prev(&tx->qe) = prev_qe;
3366 		bfa_q_next(prev_qe) = &tx->qe;
3367 		bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
3368 	}
3369 }
3370 
3371 static void
3372 bna_tx_start(struct bna_tx *tx)
3373 {
3374 	tx->flags |= BNA_TX_F_ENET_STARTED;
3375 	if (tx->flags & BNA_TX_F_ENABLED)
3376 		bfa_fsm_send_event(tx, TX_E_START);
3377 }
3378 
3379 static void
3380 bna_tx_stop(struct bna_tx *tx)
3381 {
3382 	tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3383 	tx->stop_cbarg = &tx->bna->tx_mod;
3384 
3385 	tx->flags &= ~BNA_TX_F_ENET_STARTED;
3386 	bfa_fsm_send_event(tx, TX_E_STOP);
3387 }
3388 
3389 static void
3390 bna_tx_fail(struct bna_tx *tx)
3391 {
3392 	tx->flags &= ~BNA_TX_F_ENET_STARTED;
3393 	bfa_fsm_send_event(tx, TX_E_FAIL);
3394 }
3395 
3396 void
3397 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3398 {
3399 	struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3400 	struct bna_txq *txq = NULL;
3401 	struct list_head *qe;
3402 	int i;
3403 
3404 	bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3405 		sizeof(struct bfi_enet_tx_cfg_rsp));
3406 
3407 	tx->hw_id = cfg_rsp->hw_id;
3408 
3409 	for (i = 0, qe = bfa_q_first(&tx->txq_q);
3410 		i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
3411 		txq = (struct bna_txq *)qe;
3412 
3413 		/* Setup doorbells */
3414 		txq->tcb->i_dbell->doorbell_addr =
3415 			tx->bna->pcidev.pci_bar_kva
3416 			+ ntohl(cfg_rsp->q_handles[i].i_dbell);
3417 		txq->tcb->q_dbell =
3418 			tx->bna->pcidev.pci_bar_kva
3419 			+ ntohl(cfg_rsp->q_handles[i].q_dbell);
3420 		txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3421 
3422 		/* Initialize producer/consumer indexes */
3423 		(*txq->tcb->hw_consumer_index) = 0;
3424 		txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3425 	}
3426 
3427 	bfa_fsm_send_event(tx, TX_E_STARTED);
3428 }
3429 
3430 void
3431 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3432 {
3433 	bfa_fsm_send_event(tx, TX_E_STOPPED);
3434 }
3435 
3436 void
3437 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3438 {
3439 	struct bna_tx *tx;
3440 	struct list_head		*qe;
3441 
3442 	list_for_each(qe, &tx_mod->tx_active_q) {
3443 		tx = (struct bna_tx *)qe;
3444 		bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3445 	}
3446 }
3447 
3448 void
3449 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3450 {
3451 	u32 q_size;
3452 	u32 page_count;
3453 	struct bna_mem_info *mem_info;
3454 
3455 	res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3456 	mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3457 	mem_info->mem_type = BNA_MEM_T_KVA;
3458 	mem_info->len = sizeof(struct bna_tcb);
3459 	mem_info->num = num_txq;
3460 
3461 	q_size = txq_depth * BFI_TXQ_WI_SIZE;
3462 	q_size = ALIGN(q_size, PAGE_SIZE);
3463 	page_count = q_size >> PAGE_SHIFT;
3464 
3465 	res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3466 	mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3467 	mem_info->mem_type = BNA_MEM_T_DMA;
3468 	mem_info->len = page_count * sizeof(struct bna_dma_addr);
3469 	mem_info->num = num_txq;
3470 
3471 	res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3472 	mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3473 	mem_info->mem_type = BNA_MEM_T_KVA;
3474 	mem_info->len = page_count * sizeof(void *);
3475 	mem_info->num = num_txq;
3476 
3477 	res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3478 	mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3479 	mem_info->mem_type = BNA_MEM_T_DMA;
3480 	mem_info->len = PAGE_SIZE * page_count;
3481 	mem_info->num = num_txq;
3482 
3483 	res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3484 	mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3485 	mem_info->mem_type = BNA_MEM_T_DMA;
3486 	mem_info->len = BFI_IBIDX_SIZE;
3487 	mem_info->num = num_txq;
3488 
3489 	res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3490 	res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3491 			BNA_INTR_T_MSIX;
3492 	res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3493 }
3494 
3495 struct bna_tx *
3496 bna_tx_create(struct bna *bna, struct bnad *bnad,
3497 		struct bna_tx_config *tx_cfg,
3498 		const struct bna_tx_event_cbfn *tx_cbfn,
3499 		struct bna_res_info *res_info, void *priv)
3500 {
3501 	struct bna_intr_info *intr_info;
3502 	struct bna_tx_mod *tx_mod = &bna->tx_mod;
3503 	struct bna_tx *tx;
3504 	struct bna_txq *txq;
3505 	struct list_head *qe;
3506 	int page_count;
3507 	int i;
3508 
3509 	intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3510 	page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3511 					PAGE_SIZE;
3512 
3513 	/**
3514 	 * Get resources
3515 	 */
3516 
3517 	if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3518 		return NULL;
3519 
3520 	/* Tx */
3521 
3522 	tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3523 	if (!tx)
3524 		return NULL;
3525 	tx->bna = bna;
3526 	tx->priv = priv;
3527 
3528 	/* TxQs */
3529 
3530 	INIT_LIST_HEAD(&tx->txq_q);
3531 	for (i = 0; i < tx_cfg->num_txq; i++) {
3532 		if (list_empty(&tx_mod->txq_free_q))
3533 			goto err_return;
3534 
3535 		bfa_q_deq(&tx_mod->txq_free_q, &txq);
3536 		bfa_q_qe_init(&txq->qe);
3537 		list_add_tail(&txq->qe, &tx->txq_q);
3538 		txq->tx = tx;
3539 	}
3540 
3541 	/*
3542 	 * Initialize
3543 	 */
3544 
3545 	/* Tx */
3546 
3547 	tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3548 	tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3549 	/* Following callbacks are mandatory */
3550 	tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3551 	tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3552 	tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3553 
3554 	list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3555 
3556 	tx->num_txq = tx_cfg->num_txq;
3557 
3558 	tx->flags = 0;
3559 	if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3560 		switch (tx->type) {
3561 		case BNA_TX_T_REGULAR:
3562 			if (!(tx->bna->tx_mod.flags &
3563 				BNA_TX_MOD_F_ENET_LOOPBACK))
3564 				tx->flags |= BNA_TX_F_ENET_STARTED;
3565 			break;
3566 		case BNA_TX_T_LOOPBACK:
3567 			if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3568 				tx->flags |= BNA_TX_F_ENET_STARTED;
3569 			break;
3570 		}
3571 	}
3572 
3573 	/* TxQ */
3574 
3575 	i = 0;
3576 	list_for_each(qe, &tx->txq_q) {
3577 		txq = (struct bna_txq *)qe;
3578 		txq->tcb = (struct bna_tcb *)
3579 		res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3580 		txq->tx_packets = 0;
3581 		txq->tx_bytes = 0;
3582 
3583 		/* IB */
3584 		txq->ib.ib_seg_host_addr.lsb =
3585 		res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3586 		txq->ib.ib_seg_host_addr.msb =
3587 		res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3588 		txq->ib.ib_seg_host_addr_kva =
3589 		res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3590 		txq->ib.intr_type = intr_info->intr_type;
3591 		txq->ib.intr_vector = (intr_info->num == 1) ?
3592 					intr_info->idl[0].vector :
3593 					intr_info->idl[i].vector;
3594 		if (intr_info->intr_type == BNA_INTR_T_INTX)
3595 			txq->ib.intr_vector = (1 <<  txq->ib.intr_vector);
3596 		txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3597 		txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
3598 		txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3599 
3600 		/* TCB */
3601 
3602 		txq->tcb->q_depth = tx_cfg->txq_depth;
3603 		txq->tcb->unmap_q = (void *)
3604 		res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3605 		txq->tcb->hw_consumer_index =
3606 			(u32 *)txq->ib.ib_seg_host_addr_kva;
3607 		txq->tcb->i_dbell = &txq->ib.door_bell;
3608 		txq->tcb->intr_type = txq->ib.intr_type;
3609 		txq->tcb->intr_vector = txq->ib.intr_vector;
3610 		txq->tcb->txq = txq;
3611 		txq->tcb->bnad = bnad;
3612 		txq->tcb->id = i;
3613 
3614 		/* QPT, SWQPT, Pages */
3615 		bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
3616 			&res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3617 			&res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3618 			&res_info[BNA_TX_RES_MEM_T_PAGE].
3619 				  res_u.mem_info.mdl[i]);
3620 
3621 		/* Callback to bnad for setting up TCB */
3622 		if (tx->tcb_setup_cbfn)
3623 			(tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3624 
3625 		if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3626 			txq->priority = txq->tcb->id;
3627 		else
3628 			txq->priority = tx_mod->default_prio;
3629 
3630 		i++;
3631 	}
3632 
3633 	tx->txf_vlan_id = 0;
3634 
3635 	bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3636 
3637 	tx_mod->rid_mask |= (1 << tx->rid);
3638 
3639 	return tx;
3640 
3641 err_return:
3642 	bna_tx_free(tx);
3643 	return NULL;
3644 }
3645 
3646 void
3647 bna_tx_destroy(struct bna_tx *tx)
3648 {
3649 	struct bna_txq *txq;
3650 	struct list_head *qe;
3651 
3652 	list_for_each(qe, &tx->txq_q) {
3653 		txq = (struct bna_txq *)qe;
3654 		if (tx->tcb_destroy_cbfn)
3655 			(tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3656 	}
3657 
3658 	tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
3659 	bna_tx_free(tx);
3660 }
3661 
3662 void
3663 bna_tx_enable(struct bna_tx *tx)
3664 {
3665 	if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3666 		return;
3667 
3668 	tx->flags |= BNA_TX_F_ENABLED;
3669 
3670 	if (tx->flags & BNA_TX_F_ENET_STARTED)
3671 		bfa_fsm_send_event(tx, TX_E_START);
3672 }
3673 
3674 void
3675 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3676 		void (*cbfn)(void *, struct bna_tx *))
3677 {
3678 	if (type == BNA_SOFT_CLEANUP) {
3679 		(*cbfn)(tx->bna->bnad, tx);
3680 		return;
3681 	}
3682 
3683 	tx->stop_cbfn = cbfn;
3684 	tx->stop_cbarg = tx->bna->bnad;
3685 
3686 	tx->flags &= ~BNA_TX_F_ENABLED;
3687 
3688 	bfa_fsm_send_event(tx, TX_E_STOP);
3689 }
3690 
3691 void
3692 bna_tx_cleanup_complete(struct bna_tx *tx)
3693 {
3694 	bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3695 }
3696 
3697 static void
3698 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3699 {
3700 	struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3701 
3702 	bfa_wc_down(&tx_mod->tx_stop_wc);
3703 }
3704 
3705 static void
3706 bna_tx_mod_cb_tx_stopped_all(void *arg)
3707 {
3708 	struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3709 
3710 	if (tx_mod->stop_cbfn)
3711 		tx_mod->stop_cbfn(&tx_mod->bna->enet);
3712 	tx_mod->stop_cbfn = NULL;
3713 }
3714 
3715 void
3716 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3717 		struct bna_res_info *res_info)
3718 {
3719 	int i;
3720 
3721 	tx_mod->bna = bna;
3722 	tx_mod->flags = 0;
3723 
3724 	tx_mod->tx = (struct bna_tx *)
3725 		res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3726 	tx_mod->txq = (struct bna_txq *)
3727 		res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3728 
3729 	INIT_LIST_HEAD(&tx_mod->tx_free_q);
3730 	INIT_LIST_HEAD(&tx_mod->tx_active_q);
3731 
3732 	INIT_LIST_HEAD(&tx_mod->txq_free_q);
3733 
3734 	for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3735 		tx_mod->tx[i].rid = i;
3736 		bfa_q_qe_init(&tx_mod->tx[i].qe);
3737 		list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3738 		bfa_q_qe_init(&tx_mod->txq[i].qe);
3739 		list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3740 	}
3741 
3742 	tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3743 	tx_mod->default_prio = 0;
3744 	tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3745 	tx_mod->iscsi_prio = -1;
3746 }
3747 
3748 void
3749 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3750 {
3751 	struct list_head		*qe;
3752 	int i;
3753 
3754 	i = 0;
3755 	list_for_each(qe, &tx_mod->tx_free_q)
3756 		i++;
3757 
3758 	i = 0;
3759 	list_for_each(qe, &tx_mod->txq_free_q)
3760 		i++;
3761 
3762 	tx_mod->bna = NULL;
3763 }
3764 
3765 void
3766 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3767 {
3768 	struct bna_tx *tx;
3769 	struct list_head		*qe;
3770 
3771 	tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3772 	if (type == BNA_TX_T_LOOPBACK)
3773 		tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3774 
3775 	list_for_each(qe, &tx_mod->tx_active_q) {
3776 		tx = (struct bna_tx *)qe;
3777 		if (tx->type == type)
3778 			bna_tx_start(tx);
3779 	}
3780 }
3781 
3782 void
3783 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3784 {
3785 	struct bna_tx *tx;
3786 	struct list_head		*qe;
3787 
3788 	tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3789 	tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3790 
3791 	tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3792 
3793 	bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3794 
3795 	list_for_each(qe, &tx_mod->tx_active_q) {
3796 		tx = (struct bna_tx *)qe;
3797 		if (tx->type == type) {
3798 			bfa_wc_up(&tx_mod->tx_stop_wc);
3799 			bna_tx_stop(tx);
3800 		}
3801 	}
3802 
3803 	bfa_wc_wait(&tx_mod->tx_stop_wc);
3804 }
3805 
3806 void
3807 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3808 {
3809 	struct bna_tx *tx;
3810 	struct list_head		*qe;
3811 
3812 	tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3813 	tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3814 
3815 	list_for_each(qe, &tx_mod->tx_active_q) {
3816 		tx = (struct bna_tx *)qe;
3817 		bna_tx_fail(tx);
3818 	}
3819 }
3820 
3821 void
3822 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3823 {
3824 	struct bna_txq *txq;
3825 	struct list_head *qe;
3826 
3827 	list_for_each(qe, &tx->txq_q) {
3828 		txq = (struct bna_txq *)qe;
3829 		bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
3830 	}
3831 }
3832