xref: /illumos-gate/usr/src/uts/common/io/qede/qede_main.c (revision fcdb3229a31dd4ff700c69238814e326aad49098)
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1,  (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * Copyright 2025 Oxide Computer Company
25 */
26 
27 
28 #include "qede.h"
29 
30 ddi_device_acc_attr_t qede_regs_acc_attr = {
31 	DDI_DEVICE_ATTR_V1,     // devacc_attr_version;
32 	DDI_STRUCTURE_LE_ACC,   // devacc_attr_endian_flags;
33 	DDI_STRICTORDER_ACC,    // devacc_attr_dataorder;
34 	DDI_FLAGERR_ACC         // devacc_attr_access;
35 };
36 
37 ddi_device_acc_attr_t qede_desc_acc_attr = {
38 	DDI_DEVICE_ATTR_V0,    // devacc_attr_version;
39 	DDI_STRUCTURE_LE_ACC,  // devacc_attr_endian_flags;
40 	DDI_STRICTORDER_ACC    // devacc_attr_dataorder;
41 };
42 
43 /*
44  * DMA access attributes for BUFFERS.
45  */
46 ddi_device_acc_attr_t qede_buf_acc_attr =
47 {
48 	DDI_DEVICE_ATTR_V0,   // devacc_attr_version;
49 	DDI_NEVERSWAP_ACC,    // devacc_attr_endian_flags;
50 	DDI_STRICTORDER_ACC   // devacc_attr_dataorder;
51 };
52 
53 
54 ddi_dma_attr_t qede_desc_dma_attr =
55 {
56 	DMA_ATTR_V0,
57 	0x0000000000000000ull,
58 	0xFFFFFFFFFFFFFFFFull,
59 	0x00000000FFFFFFFFull,
60 	QEDE_PAGE_ALIGNMENT,
61 	0x00000FFF,
62 	0x00000001,
63 	0x00000000FFFFFFFFull,
64 	0xFFFFFFFFFFFFFFFFull,
65 	1,
66 	0x00000001,
67 	DDI_DMA_FLAGERR
68 };
69 
70 ddi_dma_attr_t qede_gen_buf_dma_attr =
71 {
72 	DMA_ATTR_V0,
73 	0x0000000000000000ull,
74 	0xFFFFFFFFFFFFFFFFull,
75 	0x00000000FFFFFFFFull,
76 	QEDE_PAGE_ALIGNMENT,
77 	0x00000FFF,
78 	0x00000001,
79 	0x00000000FFFFFFFFull,
80 	0xFFFFFFFFFFFFFFFFull,
81 	1,
82 	0x00000001,
83 	DDI_DMA_FLAGERR
84 };
85 
86 /*
87  * DMA attributes for transmit.
88  */
89 ddi_dma_attr_t qede_tx_buf_dma_attr =
90 {
91 	DMA_ATTR_V0,
92 	0x0000000000000000ull,
93 	0xFFFFFFFFFFFFFFFFull,
94 	0x00000000FFFFFFFFull,
95 	1,
96 	0x00000FFF,
97 	0x00000001,
98 	0x00000000FFFFFFFFull,
99 	0xFFFFFFFFFFFFFFFFull,
100 	ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1,
101 	0x00000001,
102 	DDI_DMA_FLAGERR
103 };
104 
105 
106 ddi_dma_attr_t qede_dma_attr_desc =
107 {
108 	DMA_ATTR_V0,		/* dma_attr_version */
109 	0,			/* dma_attr_addr_lo */
110 	0xffffffffffffffffull,	/* dma_attr_addr_hi */
111 	0x000fffffull,		/* dma_attr_count_max */
112 	4096,			/* dma_attr_align */
113 	0x000fffffull,		/* dma_attr_burstsizes */
114 	4,			/* dma_attr_minxfer */
115 	0xffffffffull,		/* dma_attr_maxxfer */
116 	0xffffffffull,		/* dma_attr_seg */
117 	1,			/* dma_attr_sgllen */
118 	1,			/* dma_attr_granular */
119 	DDI_DMA_FLAGERR		/* dma_attr_flags */
120 };
121 
122 static ddi_dma_attr_t qede_dma_attr_txbuf =
123 {
124 	DMA_ATTR_V0,		/* dma_attr_version */
125 	0,			/* dma_attr_addr_lo */
126 	0xffffffffffffffffull,	/* dma_attr_addr_hi */
127 	0x00000000FFFFFFFFull,	/* dma_attr_count_max */
128 	QEDE_PAGE_ALIGNMENT, /* dma_attr_align */
129 	0xfff8ull,		/* dma_attr_burstsizes */
130 	1,			/* dma_attr_minxfer */
131 	0xffffffffull,		/* dma_attr_maxxfer */
132 	0xFFFFFFFFFFFFFFFFull,	/* maximum segment size */
133 	1,			/* dma_attr_sgllen */
134 	1,			/* dma_attr_granular */
135 	0			/* dma_attr_flags */
136 };
137 
138 ddi_dma_attr_t qede_dma_attr_rxbuf =
139 {
140 	DMA_ATTR_V0,		/* dma_attr_version */
141 	0,			/* dma_attr_addr_lo */
142 	0xffffffffffffffffull,	/* dma_attr_addr_hi */
143 	0x00000000FFFFFFFFull,	/* dma counter max */
144 	QEDE_PAGE_ALIGNMENT,	/* dma_attr_align */
145 	0xfff8ull,		/* dma_attr_burstsizes */
146 	1,			/* dma_attr_minxfer */
147 	0xffffffffull,		/* dma_attr_maxxfer */
148 	0xFFFFFFFFFFFFFFFFull,	/* maximum segment size */
149 	1,			/* dma_attr_sgllen */
150 	1,			/* dma_attr_granular */
151 	DDI_DMA_RELAXED_ORDERING	/* dma_attr_flags */
152 };
153 
154 /* LINTED E_STATIC_UNUSED */
155 static ddi_dma_attr_t qede_dma_attr_cmddesc =
156 {
157 	DMA_ATTR_V0,		/* dma_attr_version */
158 	0,			/* dma_attr_addr_lo */
159 	0xffffffffffffffffull,	/* dma_attr_addr_hi */
160 	0xffffffffull,		/* dma_attr_count_max */
161 	1,			/* dma_attr_align */
162 	0xfff8ull,		/* dma_attr_burstsizes */
163 	1,			/* dma_attr_minxfer */
164 	0xffffffff,		/* dma_attr_maxxfer */
165 	0xffffffff,		/* dma_attr_seg */
166 	ETH_TX_MAX_BDS_PER_NON_LSO_PACKET,	/* dma_attr_sgllen */
167 	1,			/* dma_attr_granular */
168 	0			/* dma_attr_flags */
169 };
170 
171 
172 
173 /*
174  * Generic dma attribute for single sg
175  */
176 /* LINTED E_STATIC_UNUSED */
177 static ddi_dma_attr_t qede_gen_dma_attr_desc =
178 {
179 	DMA_ATTR_V0,            /* dma_attr_version */
180 	0,                      /* dma_attr_addr_lo */
181 	0xffffffffffffffffull,	/* dma_attr_addr_hi */
182 	0x000fffffull,          /* dma_attr_count_max */
183 	4096,                   /* dma_attr_align */
184 	0x000fffffull,          /* dma_attr_burstsizes */
185 	4,                      /* dma_attr_minxfer */
186 	0xffffffffull,          /* dma_attr_maxxfer */
187 	0xffffffffull,          /* dma_attr_seg */
188 	1,                      /* dma_attr_sgllen */
189 	1,                      /* dma_attr_granular */
190 	DDI_DMA_FLAGERR         /* dma_attr_flags */
191 };
192 
193 ddi_dma_attr_t qede_buf2k_dma_attr_txbuf =
194 {
195 	DMA_ATTR_V0,		/* dma_attr_version */
196 	0,			/* dma_attr_addr_lo */
197 	0xffffffffffffffffull,	/* dma_attr_addr_hi */
198 	0x00000000FFFFFFFFull,	/* dma_attr_count_max */
199 	BUF_2K_ALIGNMENT,	/* dma_attr_align */
200 	0xfff8ull,		/* dma_attr_burstsizes */
201 	1,			/* dma_attr_minxfer */
202 	0xffffffffull,		/* dma_attr_maxxfer */
203 	0xFFFFFFFFFFFFFFFFull,	/* maximum segment size */
204 	1,			/* dma_attr_sgllen */
205 	0x00000001,		/* dma_attr_granular */
206 	0			/* dma_attr_flags */
207 };
208 
209 char *
qede_get_ddi_fail(int status)210 qede_get_ddi_fail(int status)
211 {
212 	switch (status) {
213 	case DDI_FAILURE:
214 		return ("DDI_FAILURE");
215 	case DDI_NOT_WELL_FORMED:
216 		return ("DDI_NOT_WELL_FORMED");
217 	case DDI_EAGAIN:
218 		return ("DDI_EAGAIN");
219 	case DDI_EINVAL:
220 		return ("DDI_EINVAL");
221 	case DDI_ENOTSUP:
222 		return ("DDI_ENOTSUP");
223 	case DDI_EPENDING:
224 		return ("DDI_EPENDING");
225 	case DDI_EALREADY:
226 		return ("DDI_EALREADY");
227 	case DDI_ENOMEM:
228 		return ("DDI_ENOMEM");
229 	case DDI_EBUSY:
230 		return ("DDI_EBUSY");
231 	case DDI_ETRANSPORT:
232 		return ("DDI_ETRANSPORT");
233 	case DDI_ECONTEXT:
234 		return ("DDI_ECONTEXT");
235 	default:
236 		return ("ERROR CODE NOT FOUND!");
237 	}
238 }
239 
240 char *
qede_get_ecore_fail(int status)241 qede_get_ecore_fail(int status)
242 {
243 	switch (status) {
244 	case ECORE_UNKNOWN_ERROR:
245 		return ("ECORE_UNKNOWN_ERROR");
246 	case ECORE_NORESOURCES:
247 		return ("ECORE_NORESOURCES");
248 	case ECORE_NODEV:
249 		return ("ECORE_NODEV");
250 	case ECORE_ABORTED:
251 		return ("ECORE_ABORTED");
252 	case ECORE_AGAIN:
253 		return ("ECORE_AGAIN");
254 	case ECORE_NOTIMPL:
255 		return ("ECORE_NOTIMPL");
256 	case ECORE_EXISTS:
257 		return ("ECORE_EXISTS");
258 	case ECORE_IO:
259 		return ("ECORE_IO");
260 	case ECORE_TIMEOUT:
261 		return ("ECORE_TIMEOUT");
262 	case ECORE_INVAL:
263 		return ("ECORE_INVAL");
264 	case ECORE_BUSY:
265 		return ("ECORE_BUSY");
266 	case ECORE_NOMEM:
267 		return ("ECORE_NOMEM");
268 	case ECORE_SUCCESS:
269 		return ("ECORE_SUCCESS");
270 	case ECORE_PENDING:
271 		return ("ECORE_PENDING");
272 	default:
273 		return ("ECORE ERROR CODE NOT FOUND!");
274 	}
275 }
276 
277 #define QEDE_CHIP_NUM(_p)\
278  (((_p)->edev.chip_num) & 0xffff)
279 
280 char *
qede_chip_name(qede_t * qede)281 qede_chip_name(qede_t *qede)
282 {
283     switch (QEDE_CHIP_NUM(qede)) {
284         case 0x1634:
285 		return ("BCM57980E");
286 
287         case 0x1629:
288 		return ("BCM57980S");
289 
290         case 0x1630:
291 		return ("BCM57940_KR2");
292 
293 	case 0x8070:
294 		return ("ARROWHEAD");
295 
296 	case 0x8071:
297 		return ("ARROWHEAD");
298 
299 	case 0x8072:
300 		return ("ARROWHEAD");
301 
302 	case 0x8073:
303 		return ("ARROWHEAD");
304 
305         default:
306 		return ("UNKNOWN");
307     }
308 }
309 
310 
311 
312 
313 static void
qede_destroy_locks(qede_t * qede)314 qede_destroy_locks(qede_t *qede)
315 {
316 	qede_fastpath_t *fp = &qede->fp_array[0];
317 	qede_rx_ring_t *rx_ring;
318 	qede_tx_ring_t *tx_ring;
319 	int i, j;
320 
321 	mutex_destroy(&qede->drv_lock);
322 	mutex_destroy(&qede->watch_lock);
323 
324 	for (i = 0; i < qede->num_fp; i++, fp++) {
325 		mutex_destroy(&fp->fp_lock);
326 
327 		rx_ring = fp->rx_ring;
328 		mutex_destroy(&rx_ring->rx_lock);
329 		mutex_destroy(&rx_ring->rx_replen_lock);
330 
331 		for (j = 0; j < qede->num_tc; j++) {
332 			tx_ring = fp->tx_ring[j];
333 			mutex_destroy(&tx_ring->tx_lock);
334 		}
335 	}
336 	mutex_destroy(&qede->gld_lock);
337 	mutex_destroy(&qede->kstat_lock);
338 }
339 
340 static void
qede_init_locks(qede_t * qede)341 qede_init_locks(qede_t *qede)
342 {
343 	qede_intr_context_t *intr_ctx = &qede->intr_ctx;
344 	qede_fastpath_t *fp = &qede->fp_array[0];
345 	qede_rx_ring_t *rx_ring;
346 	qede_tx_ring_t *tx_ring;
347 	int i, tc;
348 
349 	mutex_init(&qede->drv_lock, NULL,
350 	    MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
351 	mutex_init(&qede->watch_lock, NULL,
352 	    MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
353 
354 	for (i = 0; i < qede->num_fp; i++, fp++) {
355 		mutex_init(&fp->fp_lock, NULL,
356 		    MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
357 
358 		rx_ring = fp->rx_ring;
359 		mutex_init(&rx_ring->rx_lock, NULL,
360 		    MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
361 		mutex_init(&rx_ring->rx_replen_lock, NULL,
362 		    MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
363 
364 		for (tc = 0; tc < qede->num_tc; tc++) {
365 			tx_ring = fp->tx_ring[tc];
366 			mutex_init(&tx_ring->tx_lock, NULL,
367 		    	    MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
368 		}
369 	}
370 
371 	mutex_init(&qede->gld_lock, NULL,
372 	    MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
373 	mutex_init(&qede->kstat_lock, NULL,
374 	    MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
375 }
376 
377 /* LINTED E_FUNC_ARG_UNUSED */
qede_free_io_structs(qede_t * qede)378 static void qede_free_io_structs(qede_t *qede)
379 {
380 }
381 
382 static int
qede_alloc_io_structs(qede_t * qede)383 qede_alloc_io_structs(qede_t *qede)
384 {
385 	qede_fastpath_t *fp;
386 	qede_rx_ring_t *rx_ring;
387 	qede_tx_ring_t *tx_array, *tx_ring;
388 	int i, tc;
389 
390 	/*
391 	 * Put rx ring + tx_ring pointers paired
392 	 * into the fp data structure array
393 	 */
394 	for (i = 0; i < qede->num_fp; i++) {
395 		fp = &qede->fp_array[i];
396 		rx_ring = &qede->rx_array[i];
397 
398 		for (tc = 0; tc < qede->num_tc; tc++) {
399 			tx_array = qede->tx_array[tc];
400 			tx_ring = &tx_array[i];
401 			fp->tx_ring[tc] = tx_ring;
402 		}
403 
404 		fp->rx_ring = rx_ring;
405 		rx_ring->group_index = 0;
406 	}
407 
408 	return (DDI_SUCCESS);
409 }
410 
411 static int
qede_get_config_params(qede_t * qede)412 qede_get_config_params(qede_t *qede)
413 {
414 	struct ecore_dev *edev = &qede->edev;
415 
416 	qede_cfg_init(qede);
417 
418 	qede->num_tc = DEFAULT_TRFK_CLASS_COUNT;
419 	qede->num_hwfns = edev->num_hwfns;
420 	qede->rx_buf_count = qede->rx_ring_size;
421 	qede->rx_buf_size = DEFAULT_RX_BUF_SIZE;
422 	qede_print("!%s:%d: qede->num_fp = %d\n", __func__, qede->instance,
423 		qede->num_fp);
424 	qede_print("!%s:%d: qede->rx_ring_size = %d\n", __func__,
425 		qede->instance, qede->rx_ring_size);
426 	qede_print("!%s:%d: qede->rx_buf_count = %d\n", __func__,
427 		qede->instance, qede->rx_buf_count);
428 	qede_print("!%s:%d: qede->rx_buf_size = %d\n", __func__,
429 		qede->instance, qede->rx_buf_size);
430 	qede_print("!%s:%d: qede->rx_copy_threshold = %d\n", __func__,
431 		qede->instance, qede->rx_copy_threshold);
432 	qede_print("!%s:%d: qede->tx_ring_size = %d\n", __func__,
433 		qede->instance, qede->tx_ring_size);
434 	qede_print("!%s:%d: qede->tx_copy_threshold = %d\n", __func__,
435 		qede->instance, qede->tx_bcopy_threshold);
436 	qede_print("!%s:%d: qede->lso_enable = %d\n", __func__,
437 		qede->instance, qede->lso_enable);
438 	qede_print("!%s:%d: qede->lro_enable = %d\n", __func__,
439 		qede->instance, qede->lro_enable);
440 	qede_print("!%s:%d: qede->jumbo_enable = %d\n", __func__,
441 		qede->instance, qede->jumbo_enable);
442 	qede_print("!%s:%d: qede->log_enable = %d\n", __func__,
443 		qede->instance, qede->log_enable);
444 	qede_print("!%s:%d: qede->checksum = %d\n", __func__,
445 		qede->instance, qede->checksum);
446 	qede_print("!%s:%d: qede->debug_level = 0x%x\n", __func__,
447 		qede->instance, qede->ecore_debug_level);
448 	qede_print("!%s:%d: qede->num_hwfns = %d\n", __func__,
449 		qede->instance,qede->num_hwfns);
450 
451 	//qede->tx_buf_size = qede->mtu + QEDE_MAX_ETHER_HDR;
452 	qede->tx_buf_size = BUF_2K_SIZE;
453 	return (DDI_SUCCESS);
454 }
455 
456 void
qede_config_debug(qede_t * qede)457 qede_config_debug(qede_t *qede)
458 {
459 
460 	struct ecore_dev *edev = &qede->edev;
461 	u32 dp_level = 0;
462 	u8 dp_module = 0;
463 
464 	dp_level = qede->ecore_debug_level;
465 	dp_module = qede->ecore_debug_module;
466 	ecore_init_dp(edev, dp_module, dp_level, NULL);
467 }
468 
469 
470 
471 static int
qede_set_operating_params(qede_t * qede)472 qede_set_operating_params(qede_t *qede)
473 {
474 	int status = 0;
475 	qede_intr_context_t *intr_ctx = &qede->intr_ctx;
476 
477 	/* Get qede.conf paramters from user */
478 	status = qede_get_config_params(qede);
479 	if (status != DDI_SUCCESS) {
480 		return (DDI_FAILURE);
481 	}
482 	/* config debug level */
483 	qede_config_debug(qede);
484 
485 
486 	intr_ctx->intr_vect_to_request =
487 		qede->num_fp + qede->num_hwfns;
488 	intr_ctx->intr_fp_vector_count = qede->num_fp - qede->num_hwfns;
489 
490 	/* set max number of Unicast list */
491 	qede->ucst_total = QEDE_MAX_UCST_CNT;
492 	qede->ucst_avail = QEDE_MAX_UCST_CNT;
493 	bzero(&qede->ucst_mac[0], sizeof (qede_mac_addr_t) * qede->ucst_total);
494 	qede->params.multi_promisc_fl = B_FALSE;
495 	qede->params.promisc_fl = B_FALSE;
496 	qede->mc_cnt = 0;
497 	qede->rx_low_buffer_threshold = RX_LOW_BUFFER_THRESHOLD;
498 
499 	return (status);
500 }
501 
502 /* Resume the interface */
503 static int
qede_resume(qede_t * qede)504 qede_resume(qede_t *qede)
505 {
506 	mutex_enter(&qede->drv_lock);
507 	cmn_err(CE_NOTE, "%s:%d Enter\n", __func__, qede->instance);
508 	qede->qede_state = QEDE_STATE_ATTACHED;
509 	mutex_exit(&qede->drv_lock);
510 	return (DDI_FAILURE);
511 }
512 
513 /*
514  * Write dword to doorbell from tx_path
515  * Avoid use of qede_t * pointer
516  */
517 void
qede_bar2_write32_tx_doorbell(qede_tx_ring_t * tx_ring,u32 val)518 qede_bar2_write32_tx_doorbell(qede_tx_ring_t *tx_ring, u32 val)
519 {
520 	u64 addr = (u64)tx_ring->doorbell_addr;
521 	ddi_put32(tx_ring->doorbell_handle, (u32 *)addr, val);
522 }
523 
524 static void
qede_unconfig_pci(qede_t * qede)525 qede_unconfig_pci(qede_t *qede)
526 {
527 	if (qede->doorbell_handle != NULL) {
528 		ddi_regs_map_free(&(qede->doorbell_handle));
529 		qede->doorbell_handle = NULL;
530 	}
531 
532 	if (qede->regs_handle != NULL) {
533 		ddi_regs_map_free(&qede->regs_handle);
534 		qede->regs_handle = NULL;
535 	}
536 	if (qede->pci_cfg_handle != NULL) {
537 		pci_config_teardown(&qede->pci_cfg_handle);
538 		qede->pci_cfg_handle = NULL;
539 	}
540 }
541 
542 static int
qede_config_pci(qede_t * qede)543 qede_config_pci(qede_t *qede)
544 {
545 	int ret;
546 
547 	ret = pci_config_setup(qede->dip, &qede->pci_cfg_handle);
548 	if (ret != DDI_SUCCESS) {
549 		cmn_err(CE_NOTE, "%s:%d Failed to get PCI config handle\n",
550 			__func__, qede->instance);
551 		return (DDI_FAILURE);
552 	}
553 
554 	/* get register size */
555 	ret = ddi_dev_regsize(qede->dip, 1, &qede->regview_size);
556 	if (ret != DDI_SUCCESS) {
557 		cmn_err(CE_WARN, "%s%d: failed to read reg size for bar0",
558 			__func__, qede->instance);
559 		goto err_exit;
560 	}
561 
562 	/* get doorbell size */
563 	ret = ddi_dev_regsize(qede->dip, 3, &qede->doorbell_size);
564 	if (ret != DDI_SUCCESS) {
565 		cmn_err(CE_WARN, "%s%d: failed to read doorbell size for bar2",
566 			__func__, qede->instance);
567 		goto err_exit;
568 	}
569 
570 	/* map register space */
571 	ret = ddi_regs_map_setup(
572 	/* Pointer to the device's dev_info structure. */
573 	    qede->dip,
574 	/*
575 	 * Index number to the register address space  set.
576 	 * A  value of 0 indicates PCI configuration space,
577 	 * while a value of 1 indicates the real  start  of
578 	 * device register sets.
579 	 */
580 	    1,
581 	/*
582 	 * A platform-dependent value that, when  added  to
583 	 * an  offset that is less than or equal to the len
584 	 * parameter (see below), is used for the  dev_addr
585 	 * argument   to   the  ddi_get,  ddi_mem_get,  and
586 	 * ddi_io_get/put routines.
587 	 */
588 	    &qede->regview,
589 	/*
590 	 * Offset into the register address space.
591 	 */
592 	    0,
593 	/* Length to be mapped. */
594 	    qede->regview_size,
595 	/*
596 	 * Pointer to a device access  attribute  structure
597 	 * of this mapping.
598 	 */
599 	    &qede_regs_acc_attr,
600 	/* Pointer to a data access handle. */
601 	    &qede->regs_handle);
602 
603 	if (ret != DDI_SUCCESS) {
604 		cmn_err(CE_WARN, "!qede(%d): failed to map registers, err %d",
605 		    qede->instance, ret);
606 		goto err_exit;
607 	}
608 
609 	qede->pci_bar0_base = (unsigned long)qede->regview;
610 
611 	/* map doorbell space */
612 	ret = ddi_regs_map_setup(qede->dip,
613 	    2,
614 	    &qede->doorbell,
615 	    0,
616 	    qede->doorbell_size,
617 	    &qede_regs_acc_attr,
618 	    &qede->doorbell_handle);
619 
620 	if (ret != DDI_SUCCESS) {
621 		cmn_err(CE_WARN, "qede%d: failed to map doorbell, err %d",
622 		    qede->instance, ret);
623 		goto err_exit;
624 	}
625 
626 	qede->pci_bar2_base = (unsigned long)qede->doorbell;
627 
628 	return (ret);
629 err_exit:
630 	qede_unconfig_pci(qede);
631 	return (DDI_FAILURE);
632 }
633 
634 static uint_t
qede_sp_handler(caddr_t arg1,caddr_t arg2)635 qede_sp_handler(caddr_t arg1, caddr_t arg2)
636 {
637 	/*LINTED E_BAD_PTR_CAST_ALIGN*/
638 	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)arg1;
639 	/* LINTED E_BAD_PTR_CAST_ALIGN */
640 	qede_vector_info_t *vect_info = (qede_vector_info_t *)arg2;
641 	struct ecore_dev *edev = p_hwfn->p_dev;
642 	qede_t *qede = (qede_t *)edev;
643 
644 	if ((arg1 == NULL) || (arg2 == NULL)) {
645 		cmn_err(CE_WARN, "qede_sp_handler: invalid parameters");
646 		/*
647 		 * MSIX intr should always
648 		 * return DDI_INTR_CLAIMED
649 		 */
650         	return (DDI_INTR_CLAIMED);
651 	}
652 
653 
654 	vect_info->in_isr = B_TRUE;
655 
656 	atomic_add_64((volatile uint64_t *)&qede->intrFired, 1);
657 	qede->intrSbCnt[vect_info->vect_index]++;
658 
659 
660 	ecore_int_sp_dpc((osal_int_ptr_t)p_hwfn);
661 
662 	vect_info->in_isr = B_FALSE;
663 
664     	return (DDI_INTR_CLAIMED);
665 }
666 
667 void
qede_enable_hw_intr(qede_fastpath_t * fp)668 qede_enable_hw_intr(qede_fastpath_t *fp)
669 {
670 	ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
671 	ddi_dma_sync(fp->sb_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
672 }
673 
674 void
qede_disable_hw_intr(qede_fastpath_t * fp)675 qede_disable_hw_intr(qede_fastpath_t *fp)
676 {
677 	ddi_dma_sync(fp->sb_dma_handle, 0, 0, DDI_DMA_SYNC_FORKERNEL);
678 	ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
679 }
680 
681 
682 static uint_t
qede_fp_handler(caddr_t arg1,caddr_t arg2)683 qede_fp_handler(caddr_t arg1, caddr_t arg2)
684 {
685 	/* LINTED E_BAD_PTR_CAST_ALIGN */
686 	qede_vector_info_t *vect_info = (qede_vector_info_t *)arg1;
687 	/* LINTED E_BAD_PTR_CAST_ALIGN */
688 	qede_t *qede = (qede_t *)arg2;
689 	qede_fastpath_t *fp;
690 	qede_rx_ring_t *rx_ring;
691 	mblk_t *mp;
692 	int work_done = 0;
693 
694 	if ((vect_info == NULL) || (vect_info->fp == NULL)) {
695 		cmn_err(CE_WARN, "qede_fp_handler: invalid parameters");
696         	return (DDI_INTR_UNCLAIMED);
697 	}
698 
699 	fp = (qede_fastpath_t *)vect_info->fp;
700 	rx_ring = fp->rx_ring;
701 
702 	mutex_enter(&fp->fp_lock);
703 
704 	atomic_add_64((volatile uint64_t *)&qede->intrFired, 1);
705 	qede->intrSbCnt[vect_info->vect_index]++;
706 
707 	mutex_enter(&fp->qede->drv_lock);
708 	qede_disable_hw_intr(fp);
709 	mutex_exit(&fp->qede->drv_lock);
710 
711 	mp = qede_process_fastpath(fp, QEDE_POLL_ALL,
712 	    QEDE_MAX_RX_PKTS_PER_INTR, &work_done);
713 
714 	if (mp)
715 #ifndef NO_CROSSBOW
716 	{
717 		mac_rx_ring(rx_ring->qede->mac_handle,
718 		    rx_ring->mac_ring_handle,
719 		    mp,
720 		    rx_ring->mr_gen_num);
721 	}
722 #else
723 	{
724 		mac_rx(qede->mac_handle, NULL, mp);
725 	}
726 #endif
727        else if (!mp && (work_done == 0)) {
728 		qede->intrSbNoChangeCnt[vect_info->vect_index]++;
729 	}
730 
731 
732 	mutex_enter(&fp->qede->drv_lock);
733 	/*
734 	 * The mac layer may disabled interrupts
735 	 * in the context of the mac_rx_ring call
736 	 * above while readying for poll process.
737 	 * In this case we do not want to
738 	 * enable them here.
739 	 */
740 	if (fp->disabled_by_poll == 0) {
741 		qede_enable_hw_intr(fp);
742 	}
743 	mutex_exit(&fp->qede->drv_lock);
744 
745 	mutex_exit(&fp->fp_lock);
746 
747 	return (work_done ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
748 }
749 
750 static int
qede_disable_intr(qede_t * qede,uint32_t index)751 qede_disable_intr(qede_t *qede, uint32_t index)
752 {
753 	int status;
754 	qede_intr_context_t *intr_ctx = &qede->intr_ctx;
755 
756 	status = ddi_intr_disable(intr_ctx->intr_hdl_array[index]);
757 	if (status != DDI_SUCCESS) {
758 		cmn_err(CE_WARN, "qede:%s: Failed ddi_intr_enable with %s"
759 		    " for index %d\n",
760 		    __func__, qede_get_ddi_fail(status), index);
761 		return (status);
762 	}
763 	atomic_and_32(&intr_ctx->intr_state, ~(1 << index));
764 
765 	return (status);
766 }
767 
768 static int
qede_enable_intr(qede_t * qede,int index)769 qede_enable_intr(qede_t *qede, int index)
770 {
771 	int status = 0;
772 
773 	qede_intr_context_t *intr_ctx = &qede->intr_ctx;
774 
775 	status = ddi_intr_enable(intr_ctx->intr_hdl_array[index]);
776 
777 	if (status != DDI_SUCCESS) {
778 		cmn_err(CE_WARN, "qede:%s: Failed ddi_intr_enable with %s"
779 		    " for index %d\n",
780 		    __func__, qede_get_ddi_fail(status), index);
781 		return (status);
782 	}
783 
784 	atomic_or_32(&intr_ctx->intr_state, (1 << index));
785 
786 	return (status);
787 }
788 
789 static int
qede_disable_all_fastpath_intrs(qede_t * qede)790 qede_disable_all_fastpath_intrs(qede_t *qede)
791 {
792 	int i, status;
793 
794 	for (i = qede->num_hwfns; i <= qede->num_fp; i++) {
795 		status = qede_disable_intr(qede, i);
796 		if (status != DDI_SUCCESS) {
797 			return (status);
798 		}
799 	}
800 	return (DDI_SUCCESS);
801 }
802 
803 static int
qede_enable_all_fastpath_intrs(qede_t * qede)804 qede_enable_all_fastpath_intrs(qede_t *qede)
805 {
806 	int status = 0, i;
807 
808 	for (i = qede->num_hwfns; i <= qede->num_fp; i++) {
809 		status = qede_enable_intr(qede, i);
810 		if (status != DDI_SUCCESS) {
811 			return (status);
812 		}
813 	}
814 	return (DDI_SUCCESS);
815 }
816 
817 static int
qede_disable_slowpath_intrs(qede_t * qede)818 qede_disable_slowpath_intrs(qede_t *qede)
819 {
820 	int i, status;
821 
822 	for (i = 0; i < qede->num_hwfns; i++) {
823 		status = qede_disable_intr(qede, i);
824 		if (status != DDI_SUCCESS) {
825 			return (status);
826 		}
827 	}
828 	return (DDI_SUCCESS);
829 }
830 
831 static int
qede_enable_slowpath_intrs(qede_t * qede)832 qede_enable_slowpath_intrs(qede_t *qede)
833 {
834 	int i, status;
835 
836 	for (i = 0; i < qede->num_hwfns; i++) {
837 		status = qede_enable_intr(qede, i);
838 		if (status != DDI_SUCCESS) {
839 			return (status);
840 		}
841 	}
842 	return (DDI_SUCCESS);
843 }
844 
845 static int
qede_prepare_edev(qede_t * qede)846 qede_prepare_edev(qede_t *qede)
847 {
848 	struct ecore_dev *edev = &qede->edev;
849 	struct ecore_hw_prepare_params p_params;
850 
851 	/*
852 	 * Setup the bar0 and bar2 base address
853 	 * in ecore_device
854 	 */
855 	edev->regview = (void *)qede->regview;
856 	edev->doorbells = (void *)qede->doorbell;
857 
858 	/* LINTED E_FUNC_RET_MAYBE_IGNORED2 */
859 	strcpy(edev->name, qede->name);
860 	ecore_init_struct(edev);
861 
862 	p_params.personality = ECORE_PCI_ETH;
863 	p_params.drv_resc_alloc = 0;
864 	p_params.chk_reg_fifo = 1;
865 	p_params.initiate_pf_flr = 1;
866 	//p_params->epoch = time(&epoch);
867 	p_params.allow_mdump = 1;
868 	p_params.b_relaxed_probe = 0;
869 	return (ecore_hw_prepare(edev, &p_params));
870 }
871 
872 static int
qede_config_edev(qede_t * qede)873 qede_config_edev(qede_t *qede)
874 {
875 	int status, i;
876 	struct ecore_dev *edev = &qede->edev;
877 	struct ecore_pf_params *params;
878 
879 	for (i = 0; i < qede->num_hwfns; i++) {
880 		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
881 		params = &p_hwfn->pf_params;
882 		memset((void *)params, 0, sizeof (struct ecore_pf_params));
883 		params->eth_pf_params.num_cons = 32;
884 	}
885 	status = ecore_resc_alloc(edev);
886 	if (status != ECORE_SUCCESS) {
887 		cmn_err(CE_NOTE, "%s: Could not allocate ecore resources\n",
888 		 __func__);
889 		return (DDI_ENOMEM);
890 	}
891 	ecore_resc_setup(edev);
892 	return (DDI_SUCCESS);
893 }
894 
895 static void
qede_unconfig_intrs(qede_t * qede)896 qede_unconfig_intrs(qede_t *qede)
897 {
898 	qede_intr_context_t *intr_ctx = &qede->intr_ctx;
899 	qede_vector_info_t *vect_info;
900 	int i, status = 0;
901 
902 	for (i = 0; i < intr_ctx->intr_vect_allocated; i++) {
903 		vect_info = &intr_ctx->intr_vect_info[i];
904 		if (intr_ctx->intr_vect_info[i].handler_added == B_TRUE) {
905 			status = ddi_intr_remove_handler(
906 				intr_ctx->intr_hdl_array[i]);
907 			if (status != DDI_SUCCESS) {
908 				cmn_err(CE_WARN, "qede:%s: Failed"
909 					" ddi_intr_remove_handler with %s"
910 					" for index %d\n",
911 				__func__, qede_get_ddi_fail(
912 				status), i);
913 			}
914 
915 			(void) ddi_intr_free(intr_ctx->intr_hdl_array[i]);
916 
917 			vect_info->handler_added = B_FALSE;
918 			intr_ctx->intr_hdl_array[i] = NULL;
919 		}
920 	}
921 }
922 
923 static int
qede_config_intrs(qede_t * qede)924 qede_config_intrs(qede_t *qede)
925 {
926 	qede_intr_context_t *intr_ctx = &qede->intr_ctx;
927 	qede_vector_info_t *vect_info;
928 	struct ecore_dev *edev = &qede->edev;
929 	int i, status = DDI_FAILURE;
930 	ddi_intr_handler_t *handler;
931 	void *arg1, *arg2;
932 
933 	/*
934 	 * Set up the interrupt handler argument
935 	 * for the slowpath
936 	 */
937 	for (i = 0; i < intr_ctx->intr_vect_allocated; i++) {
938 		vect_info = &intr_ctx->intr_vect_info[i];
939 		/* Store the table index */
940 		vect_info->vect_index = i;
941 		vect_info->qede = qede;
942 		/*
943 		 * Store the interrupt handler's argument.
944 		 * This will be the a pointer to ecore_dev->hwfns
945 		 * for slowpath, a pointer to the fastpath
946 		 * structure for fastpath.
947 		 */
948 		if (i < qede->num_hwfns) {
949 		   	vect_info->fp = (void *)&edev->hwfns[i];
950 			handler = qede_sp_handler;
951 			arg1 = (caddr_t)&qede->edev.hwfns[i];
952 			arg2 = (caddr_t)vect_info;
953 		} else {
954 			/*
955 			 * loop index includes hwfns
956 			 * so they need to be subtracked
957 			 * for fp_array
958 			 */
959 			vect_info->fp =
960 			    (void *)&qede->fp_array[i - qede->num_hwfns];
961 			handler = qede_fp_handler;
962 			arg1 = (caddr_t)vect_info;
963 			arg2 = (caddr_t)qede;
964 		}
965 
966 		status = ddi_intr_add_handler(
967 		    intr_ctx->intr_hdl_array[i],
968 		    handler,
969 		    arg1,
970 		    arg2);
971 		if (status != DDI_SUCCESS) {
972 			cmn_err(CE_WARN, "qede:%s: Failed "
973 			    " ddi_intr_add_handler with %s"
974 			    " for index %d\n",
975 			    __func__, qede_get_ddi_fail(
976 			    status), i);
977 			qede_unconfig_intrs(qede);
978 			return (DDI_FAILURE);
979 		}
980 		vect_info->handler_added = B_TRUE;
981 	}
982 
983 	return (status);
984 }
985 
986 static void
qede_free_intrs(qede_t * qede)987 qede_free_intrs(qede_t *qede)
988 {
989 	qede_intr_context_t *intr_ctx;
990 	int i, status;
991 
992 	ASSERT(qede != NULL);
993 	intr_ctx = &qede->intr_ctx;
994 	ASSERT(intr_ctx != NULL);
995 
996 	if (intr_ctx->intr_hdl_array) {
997 		for (i = 0; i < intr_ctx->intr_vect_allocated; i++) {
998 			if (intr_ctx->intr_hdl_array[i]) {
999 				status =
1000 				    ddi_intr_free(intr_ctx->intr_hdl_array[i]);
1001 				if (status != DDI_SUCCESS) {
1002 					cmn_err(CE_NOTE,
1003 					    "qede:%s: Failed ddi_intr_free"
1004 					    " with %s\n",
1005 					    __func__,
1006 					    qede_get_ddi_fail(status));
1007 				}
1008 			}
1009 		}
1010 		intr_ctx->intr_hdl_array = NULL;
1011 	}
1012 
1013 	if (intr_ctx->intr_hdl_array) {
1014 		kmem_free(intr_ctx->intr_hdl_array,
1015 		    intr_ctx->intr_hdl_array_size);
1016 		intr_ctx->intr_hdl_array = NULL;
1017 	}
1018 
1019 	if (intr_ctx->intr_vect_info) {
1020 		kmem_free(intr_ctx->intr_vect_info,
1021 		    intr_ctx->intr_vect_info_array_size);
1022 		intr_ctx->intr_vect_info = NULL;
1023 	}
1024 }
1025 
1026 static int
qede_alloc_intrs(qede_t * qede)1027 qede_alloc_intrs(qede_t *qede)
1028 {
1029 	int status, type_supported, num_supported;
1030 	int actual, num_available, num_to_request;
1031 	dev_info_t *dip;
1032 	qede_intr_context_t *intr_ctx = &qede->intr_ctx;
1033 
1034 	dip = qede->dip;
1035 
1036 	status = ddi_intr_get_supported_types(dip, &type_supported);
1037 	if (status != DDI_SUCCESS) {
1038 		cmn_err(CE_WARN,
1039 		    "qede:%s: Failed ddi_intr_get_supported_types with %s\n",
1040 		    __func__, qede_get_ddi_fail(status));
1041 		return (status);
1042 	}
1043 	intr_ctx->intr_types_available = type_supported;
1044 
1045 	if (type_supported & DDI_INTR_TYPE_MSIX) {
1046 		intr_ctx->intr_type_in_use = DDI_INTR_TYPE_MSIX;
1047 
1048 		/*
1049 		 * get the total number of vectors
1050 		 * supported by the device
1051 		 */
1052 		status = ddi_intr_get_nintrs(qede->dip,
1053 		             DDI_INTR_TYPE_MSIX, &num_supported);
1054 		if (status != DDI_SUCCESS) {
1055 			cmn_err(CE_WARN,
1056 			    "qede:%s: Failed ddi_intr_get_nintrs with %s\n",
1057 			    __func__, qede_get_ddi_fail(status));
1058 			return (status);
1059 		}
1060 		intr_ctx->intr_vect_supported = num_supported;
1061 
1062 		/*
1063 		 * get the total number of vectors
1064 		 * available for this instance
1065 		 */
1066 		status = ddi_intr_get_navail(dip, DDI_INTR_TYPE_MSIX,
1067 		             &num_available);
1068 		if (status != DDI_SUCCESS) {
1069 			cmn_err(CE_WARN,
1070 			    "qede:%s: Failed ddi_intr_get_navail with %s\n",
1071 			    __func__, qede_get_ddi_fail(status));
1072 			return (status);
1073 		}
1074 
1075                 if ((num_available < intr_ctx->intr_vect_to_request) &&
1076 			(num_available >= 2)) {
1077 			qede->num_fp = num_available - qede->num_hwfns;
1078 			cmn_err(CE_NOTE,
1079 			    "qede:%s: allocated %d interrupts"
1080 			    " requested was %d\n",
1081 			    __func__, num_available,
1082 			    intr_ctx->intr_vect_to_request);
1083 			intr_ctx->intr_vect_to_request = num_available;
1084 		} else if(num_available < 2) {
1085 			cmn_err(CE_WARN,
1086 			    "qede:%s: Failed ddi_intr_get_navail with %s\n",
1087 				__func__, qede_get_ddi_fail(status));
1088 			return (DDI_FAILURE);
1089 		}
1090 
1091 		intr_ctx->intr_vect_available = num_available;
1092 		num_to_request = intr_ctx->intr_vect_to_request;
1093 		intr_ctx->intr_hdl_array_size = num_to_request *
1094 		    sizeof (ddi_intr_handle_t);
1095 		intr_ctx->intr_vect_info_array_size = num_to_request *
1096 		    sizeof (qede_vector_info_t);
1097 
1098 		/* Allocate an array big enough for maximum supported */
1099 		intr_ctx->intr_hdl_array = kmem_zalloc(
1100 		    intr_ctx->intr_hdl_array_size, KM_SLEEP);
1101 
1102 		intr_ctx->intr_vect_info = kmem_zalloc(
1103 		    intr_ctx->intr_vect_info_array_size, KM_SLEEP);
1104 
1105 		/*
1106 		 * Use strict allocation. It will fail if we do not get
1107 		 * exactly what we want.  Later we can shift through with
1108 		 * power of two like this:
1109 		 *   for (i = intr_ctx->intr_requested; i > 0; i >>= 1)
1110 		 * (Though we would need to account for the slowpath vector)
1111 		 */
1112 		status = ddi_intr_alloc(qede->dip,
1113 			intr_ctx->intr_hdl_array,
1114 			DDI_INTR_TYPE_MSIX,
1115 			0,
1116 			num_to_request,
1117 			&actual,
1118 			DDI_INTR_ALLOC_STRICT);
1119 		if (status != DDI_SUCCESS) {
1120 			cmn_err(CE_WARN,
1121 			    "qede:%s: Failed to allocate"
1122 			    " %d interrupts with %s\n",
1123 			    __func__, num_to_request,
1124 			    qede_get_ddi_fail(status));
1125 			cmn_err(CE_WARN,
1126 			    "qede:%s: Only %d interrupts available.\n",
1127 			    __func__, actual);
1128 			goto err_exit;
1129 		}
1130 		intr_ctx->intr_vect_allocated = num_to_request;
1131 
1132 		status = ddi_intr_get_pri(intr_ctx->intr_hdl_array[0],
1133 			    &intr_ctx->intr_pri);
1134 		if (status != DDI_SUCCESS) {
1135 			cmn_err(CE_WARN,
1136 			    "qede:%s: Failed ddi_intr_get_pri with %s\n",
1137 			    __func__, qede_get_ddi_fail(status));
1138 			goto err_exit;
1139 		}
1140 
1141 		status = ddi_intr_get_cap(intr_ctx->intr_hdl_array[0],
1142 			    &intr_ctx->intr_cap);
1143 		if (status != DDI_SUCCESS) {
1144 			cmn_err(CE_WARN,
1145 			    "qede:%s: Failed ddi_intr_get_cap with %s\n",
1146 				__func__, qede_get_ddi_fail(status));
1147 			goto err_exit;
1148 		}
1149 
1150 	} else {
1151 		/* For now we only support type MSIX */
1152 		cmn_err(CE_WARN,
1153 		    "qede:%s: Failed to allocate intr_ctx->intr_hdl_array\n",
1154 			__func__);
1155 		return (DDI_FAILURE);
1156 	}
1157 
1158 	intr_ctx->intr_mode = ECORE_INT_MODE_MSIX;
1159 	return (status);
1160 err_exit:
1161 	qede_free_intrs(qede);
1162 	return (status);
1163 }
1164 
1165 static void
1166 /* LINTED E_FUNC_ARG_UNUSED */
qede_unconfig_fm(qede_t * qede)1167 qede_unconfig_fm(qede_t *qede)
1168 {
1169 }
1170 
1171 /* LINTED E_FUNC_ARG_UNUSED */
1172 static int
qede_fm_err_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)1173 qede_fm_err_cb(dev_info_t *dip, ddi_fm_error_t *err,
1174     const void *impl_data)
1175 {
1176         pci_ereport_post(dip, err, NULL);
1177         return (err->fme_status);
1178 }
1179 
1180 
1181 static int
qede_config_fm(qede_t * qede)1182 qede_config_fm(qede_t * qede)
1183 {
1184         ddi_iblock_cookie_t iblk;
1185 
1186         cmn_err(CE_NOTE, "Entered qede_config_fm\n");
1187         qede_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1188         qede_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1189         qede_buf_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1190         qede_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1191         qede_gen_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1192         qede_tx_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1193         qede_dma_attr_desc.dma_attr_flags = DDI_DMA_FLAGERR;
1194         qede_dma_attr_txbuf.dma_attr_flags = DDI_DMA_FLAGERR;
1195         qede_dma_attr_rxbuf.dma_attr_flags = DDI_DMA_FLAGERR;
1196         qede_dma_attr_cmddesc.dma_attr_flags = DDI_DMA_FLAGERR;
1197         qede_gen_dma_attr_desc.dma_attr_flags = DDI_DMA_FLAGERR;
1198         qede_buf2k_dma_attr_txbuf.dma_attr_flags = DDI_DMA_FLAGERR;
1199 
1200         ddi_fm_init(qede->dip, &qede->fm_cap, &iblk);
1201 
1202         if (DDI_FM_EREPORT_CAP(qede->fm_cap) ||
1203             DDI_FM_ERRCB_CAP(qede->fm_cap)) {
1204                 pci_ereport_setup(qede->dip);
1205         }
1206 
1207         if (DDI_FM_ERRCB_CAP(qede->fm_cap)) {
1208                 ddi_fm_handler_register(qede->dip,
1209                     qede_fm_err_cb, (void *)qede);
1210         }
1211         return (DDI_SUCCESS);
1212 
1213 }
1214 
1215 int
qede_dma_mem_alloc(qede_t * qede,int size,uint_t dma_flags,caddr_t * address,ddi_dma_cookie_t * cookie,ddi_dma_handle_t * dma_handle,ddi_acc_handle_t * handlep,ddi_dma_attr_t * dma_attr,ddi_device_acc_attr_t * dev_acc_attr)1216 qede_dma_mem_alloc(qede_t *qede,
1217     int size, uint_t dma_flags, caddr_t *address, ddi_dma_cookie_t *cookie,
1218     ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *handlep,
1219     ddi_dma_attr_t *dma_attr, ddi_device_acc_attr_t *dev_acc_attr)
1220 {
1221 	int err;
1222 	uint32_t ncookies;
1223 	size_t ring_len;
1224 
1225 	*dma_handle = NULL;
1226 
1227 	if (size <= 0) {
1228 		return (DDI_ENOMEM);
1229 	}
1230 
1231 	err = ddi_dma_alloc_handle(qede->dip,
1232 	    dma_attr,
1233 	    DDI_DMA_DONTWAIT, NULL, dma_handle);
1234 	if (err != DDI_SUCCESS) {
1235 		cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1236 		    "ddi_dma_alloc_handle FAILED: %d", qede->instance, err);
1237 		*dma_handle = NULL;
1238 		return (DDI_ENOMEM);
1239 	}
1240 
1241 	err = ddi_dma_mem_alloc(*dma_handle,
1242 	    size, dev_acc_attr,
1243 	    dma_flags,
1244 	    DDI_DMA_DONTWAIT, NULL, address, &ring_len,
1245 	    handlep);
1246 	if (err != DDI_SUCCESS) {
1247 		cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1248 		    "ddi_dma_mem_alloc FAILED: %d, request size: %d",
1249 		    qede->instance, err, size);
1250 		ddi_dma_free_handle(dma_handle);
1251 		*dma_handle = NULL;
1252 		*handlep = NULL;
1253 		return (DDI_ENOMEM);
1254 	}
1255 
1256 	if (ring_len < size) {
1257 		cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1258 		    "could not allocate required: %d, request size: %d",
1259 		    qede->instance, err, size);
1260 		ddi_dma_mem_free(handlep);
1261 		ddi_dma_free_handle(dma_handle);
1262 		*dma_handle = NULL;
1263 		*handlep = NULL;
1264 		return (DDI_FAILURE);
1265 	}
1266 
1267 	(void) memset(*address, 0, size);
1268 
1269 	if (((err = ddi_dma_addr_bind_handle(*dma_handle,
1270 	    NULL, *address, ring_len,
1271 	    dma_flags,
1272 	    DDI_DMA_DONTWAIT, NULL,
1273 	    cookie, &ncookies)) != DDI_DMA_MAPPED) ||
1274 	    (ncookies != 1)) {
1275 		cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1276 		    "ddi_dma_addr_bind_handle Failed: %d",
1277 		    qede->instance, err);
1278 		ddi_dma_mem_free(handlep);
1279 		ddi_dma_free_handle(dma_handle);
1280 		*dma_handle = NULL;
1281 		*handlep = NULL;
1282 		return (DDI_FAILURE);
1283 	}
1284 
1285 	return (DDI_SUCCESS);
1286 }
1287 
1288 void
qede_pci_free_consistent(ddi_dma_handle_t * dma_handle,ddi_acc_handle_t * acc_handle)1289 qede_pci_free_consistent(ddi_dma_handle_t *dma_handle,
1290     ddi_acc_handle_t *acc_handle)
1291 {
1292 	int err;
1293 
1294 	if (*dma_handle != NULL) {
1295 		err = ddi_dma_unbind_handle(*dma_handle);
1296 		if (err != DDI_SUCCESS) {
1297 			cmn_err(CE_WARN, "!pci_free_consistent: "
1298 			    "Error unbinding memory, err %d", err);
1299 			return;
1300 		}
1301 	} else {
1302 		goto exit;
1303 	}
1304 	ddi_dma_mem_free(acc_handle);
1305 	ddi_dma_free_handle(dma_handle);
1306 exit:
1307 	*dma_handle = NULL;
1308 	*acc_handle = NULL;
1309 }
1310 
1311 static int
qede_vport_stop(qede_t * qede)1312 qede_vport_stop(qede_t *qede)
1313 {
1314 	struct ecore_dev *edev = &qede->edev;
1315 	struct ecore_hwfn *p_hwfn;
1316 	int i, status = ECORE_BUSY;
1317 
1318 	for (i = 0; i < edev->num_hwfns; i++) {
1319 		p_hwfn = &edev->hwfns[i];
1320 
1321 		if (qede->vport_state[i] !=
1322 		    QEDE_VPORT_STARTED) {
1323 			qede_info(qede, "vport %d not started", i);
1324 			continue;
1325 		}
1326 
1327 		status = ecore_sp_vport_stop(p_hwfn,
1328 			p_hwfn->hw_info.opaque_fid,
1329 			i); /* vport needs fix */
1330 		if (status != ECORE_SUCCESS) {
1331 			cmn_err(CE_WARN, "!qede_vport_stop: "
1332 			    "FAILED for hwfn%d ", i);
1333 			return (DDI_FAILURE);
1334 		}
1335 		cmn_err(CE_WARN, "!qede_vport_stop: "
1336 		    "SUCCESS for hwfn%d ", i);
1337 
1338 		qede->vport_state[i] =
1339 		    QEDE_VPORT_STOPPED;
1340 	}
1341 
1342 	return (status);
1343 }
1344 
1345 static uint8_t
qede_get_active_rss_params(qede_t * qede,u8 hwfn_id)1346 qede_get_active_rss_params(qede_t *qede, u8 hwfn_id)
1347 {
1348 	struct ecore_rss_params rss_params;
1349 	qede_fastpath_t *fp;
1350 	int i;
1351 	const uint64_t hash_key[] =
1352 	{
1353 		0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
1354 		0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
1355 		0x255b0ec26d5a56daULL
1356 	};
1357 	uint8_t enable_rss = 0;
1358 
1359 	bzero(&rss_params, sizeof (rss_params));
1360 	if (qede->num_fp > 1) {
1361 		qede_info(qede, "Configuring RSS parameters");
1362 		enable_rss = 1;
1363 	} else {
1364 		qede_info(qede, "RSS configuration not needed");
1365 		enable_rss = 0;
1366 		goto exit;
1367 	}
1368 
1369 	rss_params.update_rss_config = 1;
1370 	rss_params.rss_enable = 1;
1371 	rss_params.update_rss_capabilities = 1;
1372 	rss_params.update_rss_ind_table = 1;
1373 	rss_params.update_rss_key = 1;
1374 
1375 	rss_params.rss_caps = ECORE_RSS_IPV4 |
1376 	    ECORE_RSS_IPV6 |
1377 	    ECORE_RSS_IPV4_TCP |
1378 	    ECORE_RSS_IPV6_TCP |
1379 	    ECORE_RSS_IPV4_UDP |
1380 	    ECORE_RSS_IPV6_UDP;
1381 
1382 	rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
1383 
1384 	bcopy(&hash_key[0], &rss_params.rss_key[0],
1385 		sizeof (rss_params.rss_key));
1386 
1387 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1388 		fp = &qede->fp_array[i % qede->num_fp];
1389 		rss_params.rss_ind_table[i] = (void *)(fp->rx_ring->p_cid);
1390 	}
1391 exit:
1392 	bcopy(&rss_params, &qede->rss_params[hwfn_id], sizeof (rss_params));
1393 	return (enable_rss);
1394 }
1395 
1396 static int
qede_vport_update(qede_t * qede,enum qede_vport_state state)1397 qede_vport_update(qede_t *qede,
1398     enum qede_vport_state state)
1399 {
1400 	struct ecore_dev *edev = &qede->edev;
1401 	struct ecore_hwfn *p_hwfn;
1402 	struct ecore_sp_vport_update_params *vport_params;
1403 	struct ecore_sge_tpa_params tpa_params;
1404 	int  status = DDI_SUCCESS;
1405 	bool new_state;
1406 	uint8_t i;
1407 
1408 	cmn_err(CE_NOTE, "qede_vport_update: "
1409 	    "Enter, state = %s%s%s%s%s",
1410 	    state == QEDE_VPORT_STARTED ? "QEDE_VPORT_STARTED" : "",
1411 	    state == QEDE_VPORT_ON ? "QEDE_VPORT_ON" : "",
1412 	    state == QEDE_VPORT_OFF ? "QEDE_VPORT_OFF" : "",
1413 	    state == QEDE_VPORT_STOPPED ? "QEDE_VPORT_STOPPED" : "",
1414 	    state == QEDE_VPORT_UNKNOWN ? "" : "");
1415 
1416 	/*
1417 	 * Update only does on and off.
1418 	 * For now we combine TX and RX
1419 	 * together.  Later we can split them
1420 	 * and set other params as well.
1421 	 */
1422 	if (state == QEDE_VPORT_ON) {
1423 	    new_state = true;
1424 	} else if (state == QEDE_VPORT_OFF) {
1425 	    new_state = false;
1426 	} else {
1427 		cmn_err(CE_WARN, "qede_vport_update: "
1428 		    "invalid, state = %d", state);
1429 		return (DDI_EINVAL);
1430 	}
1431 
1432 	for (i = 0; i < edev->num_hwfns; i++) {
1433 		p_hwfn = &edev->hwfns[i];
1434 		vport_params = &qede->vport_params[i];
1435 
1436 		vport_params->opaque_fid =
1437 		    p_hwfn->hw_info.opaque_fid;
1438 		vport_params->vport_id =
1439 		    i;
1440 
1441 		vport_params->update_vport_active_rx_flg =
1442 		    1;
1443                 if (new_state)
1444                         vport_params->vport_active_rx_flg = 1;
1445                 else
1446                         vport_params->vport_active_rx_flg = 0;
1447 
1448 		vport_params->update_vport_active_tx_flg =
1449 		    1;
1450                 if (new_state)
1451                         vport_params->vport_active_tx_flg = 1;
1452                 else
1453                         vport_params->vport_active_tx_flg = 0;
1454 
1455 		vport_params->update_inner_vlan_removal_flg =
1456 		    0;
1457 		vport_params->inner_vlan_removal_flg =
1458 		    0;
1459 		vport_params->update_default_vlan_enable_flg =
1460 		    0;
1461 		vport_params->default_vlan_enable_flg =
1462 		    0;
1463 		vport_params->update_default_vlan_flg =
1464 		    1;
1465 		vport_params->default_vlan =
1466 		    0;
1467 		vport_params->update_tx_switching_flg =
1468 		    0;
1469 		vport_params->tx_switching_flg =
1470 		    0;
1471 		vport_params->update_approx_mcast_flg =
1472 		    0;
1473 		vport_params->update_anti_spoofing_en_flg =
1474 		    0;
1475 		vport_params->anti_spoofing_en = 0;
1476 		vport_params->update_accept_any_vlan_flg =
1477 		    1;
1478 		vport_params->accept_any_vlan = 1;
1479 
1480 		vport_params->accept_flags.update_rx_mode_config = 1;
1481 		vport_params->accept_flags.update_tx_mode_config = 1;
1482 		vport_params->accept_flags.rx_accept_filter =
1483 		    ECORE_ACCEPT_BCAST |
1484 		    ECORE_ACCEPT_UCAST_UNMATCHED |
1485 		    ECORE_ACCEPT_MCAST_UNMATCHED;
1486 		vport_params->accept_flags.tx_accept_filter =
1487 		    ECORE_ACCEPT_BCAST |
1488 		    ECORE_ACCEPT_UCAST_UNMATCHED |
1489 		    ECORE_ACCEPT_MCAST_UNMATCHED;
1490 
1491 		vport_params->sge_tpa_params = NULL;
1492 
1493 		if (qede->lro_enable && new_state) {
1494 			qede_print("!%s(%d): enabling LRO ",
1495 				__func__, qede->instance);
1496 
1497 			memset(&tpa_params, 0,
1498 			    sizeof (struct ecore_sge_tpa_params));
1499 			tpa_params.max_buffers_per_cqe = 5;
1500 			tpa_params.update_tpa_en_flg = 1;
1501 			tpa_params.tpa_ipv4_en_flg = 1;
1502 			tpa_params.tpa_ipv6_en_flg = 1;
1503 			tpa_params.tpa_ipv4_tunn_en_flg = 0;
1504 			tpa_params.tpa_ipv6_tunn_en_flg = 0;
1505 			tpa_params.update_tpa_param_flg = 1;
1506 			tpa_params.tpa_pkt_split_flg = 0;
1507 			tpa_params.tpa_hdr_data_split_flg = 0;
1508 			tpa_params.tpa_gro_consistent_flg = 0;
1509 			tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
1510 			tpa_params.tpa_max_size = 65535;
1511 			tpa_params.tpa_min_size_to_start = qede->mtu/2;
1512 			tpa_params.tpa_min_size_to_cont = qede->mtu/2;
1513 			vport_params->sge_tpa_params = &tpa_params;
1514 		}
1515 
1516 		/*
1517 		 * Get the rss_params to be configured
1518 		 */
1519 		if (qede_get_active_rss_params(qede, i /* hwfn id */)) {
1520 			vport_params->rss_params = &qede->rss_params[i];
1521 		} else {
1522 			vport_params->rss_params = NULL;
1523 		}
1524 
1525 		status = ecore_sp_vport_update(p_hwfn,
1526 		    vport_params,
1527 		    ECORE_SPQ_MODE_EBLOCK,
1528 		    NULL);
1529 
1530 		if (status != ECORE_SUCCESS) {
1531 			cmn_err(CE_WARN, "ecore_sp_vport_update: "
1532 			    "FAILED for hwfn%d "
1533 			    " with ", i);
1534 			return (DDI_FAILURE);
1535 		}
1536 		cmn_err(CE_NOTE, "!ecore_sp_vport_update: "
1537 		    "SUCCESS for hwfn%d ", i);
1538 	}
1539 	return (DDI_SUCCESS);
1540 }
1541 
1542 
1543 static int
qede_vport_start(qede_t * qede)1544 qede_vport_start(qede_t *qede)
1545 {
1546 	struct ecore_dev *edev = &qede->edev;
1547 	struct ecore_hwfn *p_hwfn;
1548 	struct ecore_sp_vport_start_params params;
1549 	uint8_t i;
1550 	int  status = ECORE_BUSY;
1551 
1552 	for (i = 0; i < edev->num_hwfns; i++) {
1553 		p_hwfn = &edev->hwfns[i];
1554 		if ((qede->vport_state[i] !=
1555 		    QEDE_VPORT_UNKNOWN) &&
1556 		    (qede->vport_state[i] !=
1557 		    QEDE_VPORT_STOPPED)) {
1558 		    continue;
1559 		}
1560 
1561 		params.tpa_mode = ECORE_TPA_MODE_NONE;
1562 		params.remove_inner_vlan = 0;
1563 		params.tx_switching = 0;
1564 		params.handle_ptp_pkts = 0;
1565 		params.only_untagged = 0;
1566 		params.drop_ttl0 = 1;
1567 		params.max_buffers_per_cqe = 16;
1568 		params.concrete_fid = p_hwfn->hw_info.concrete_fid;
1569 		params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1570 		params.vport_id = i;
1571 		params.mtu = qede->mtu;
1572 		status = ecore_sp_vport_start(p_hwfn, &params);
1573 		if (status != ECORE_SUCCESS) {
1574 			cmn_err(CE_WARN, "qede_vport_start: "
1575 			    "FAILED for hwfn%d", i);
1576 			return (DDI_FAILURE);
1577 		}
1578 		cmn_err(CE_NOTE, "!ecore_sp_vport_start: "
1579 		    "SUCCESS for hwfn%d ", i);
1580 
1581 		ecore_hw_start_fastpath(p_hwfn);
1582 		qede->vport_state[i] = QEDE_VPORT_STARTED;
1583 	}
1584 	ecore_reset_vport_stats(edev);
1585 	return (status);
1586 }
1587 
1588 void
qede_update_rx_q_producer(qede_rx_ring_t * rx_ring)1589 qede_update_rx_q_producer(qede_rx_ring_t *rx_ring)
1590 {
1591 	u16 bd_prod = ecore_chain_get_prod_idx(&rx_ring->rx_bd_ring);
1592 	u16 cqe_prod = ecore_chain_get_prod_idx(&rx_ring->rx_cqe_ring);
1593 	/* LINTED E_FUNC_SET_NOT_USED */
1594         struct eth_rx_prod_data rx_prod_cmd = { 0 };
1595 
1596 
1597 	rx_prod_cmd.bd_prod = HOST_TO_LE_32(bd_prod);
1598 	rx_prod_cmd.cqe_prod = HOST_TO_LE_32(cqe_prod);
1599 	UPDATE_RX_PROD(rx_ring, rx_prod_cmd);
1600 }
1601 
1602 static int
qede_fastpath_stop_queues(qede_t * qede)1603 qede_fastpath_stop_queues(qede_t *qede)
1604 {
1605 	int i, j;
1606 	int status = DDI_FAILURE;
1607 	struct ecore_dev *edev;
1608 	struct ecore_hwfn *p_hwfn;
1609 	struct ecore_queue_cid *p_tx_cid, *p_rx_cid;
1610 
1611 	qede_fastpath_t *fp;
1612 	qede_rx_ring_t *rx_ring;
1613 	qede_tx_ring_t *tx_ring;
1614 
1615 	ASSERT(qede != NULL);
1616 	/* ASSERT(qede->edev != NULL); */
1617 
1618 	edev = &qede->edev;
1619 
1620 	status = qede_vport_update(qede, QEDE_VPORT_OFF);
1621 	if (status != DDI_SUCCESS) {
1622 		cmn_err(CE_WARN, "FAILED to "
1623 		    "update vports");
1624 		return (DDI_FAILURE);
1625 	}
1626 
1627 	for (i = 0; i < qede->num_fp; i++) {
1628 		fp = &qede->fp_array[i];
1629 		rx_ring = fp->rx_ring;
1630 		p_hwfn = &edev->hwfns[fp->fp_hw_eng_index];
1631 		for (j = 0; j < qede->num_tc; j++) {
1632 			tx_ring = fp->tx_ring[j];
1633 			if (tx_ring->queue_started == B_TRUE) {
1634 				cmn_err(CE_WARN, "Stopping tx queue "
1635 				    "%d:%d. ", i, j);
1636 				p_tx_cid = tx_ring->p_cid;
1637 				status = ecore_eth_tx_queue_stop(p_hwfn,
1638 					(void *)p_tx_cid);
1639 				if (status != ECORE_SUCCESS) {
1640 					cmn_err(CE_WARN, "FAILED to "
1641 			    	    	    "stop tx queue %d:%d", i, j);
1642 					return (DDI_FAILURE);
1643 				}
1644 				tx_ring->queue_started = B_FALSE;
1645 				cmn_err(CE_NOTE, "tx_ring %d:%d stopped\n", i,
1646 				    j);
1647 			}
1648 		}
1649 
1650 		if (rx_ring->queue_started == B_TRUE) {
1651 			cmn_err(CE_WARN, "Stopping rx queue "
1652 			    "%d. ", i);
1653 			p_rx_cid = rx_ring->p_cid;
1654 			status = ecore_eth_rx_queue_stop(p_hwfn,
1655 			    (void *)p_rx_cid, B_TRUE, B_FALSE);
1656 			if (status != ECORE_SUCCESS) {
1657 				cmn_err(CE_WARN, "FAILED to "
1658 			    	    "stop rx queue %d "
1659 			    	    "with ecore status %s",
1660 				    i, qede_get_ecore_fail(status));
1661 				return (DDI_FAILURE);
1662 			}
1663 			rx_ring->queue_started = B_FALSE;
1664 			cmn_err(CE_NOTE, "rx_ring%d stopped\n", i);
1665 		}
1666 	}
1667 
1668 	status = qede_vport_stop(qede);
1669 	if (status != DDI_SUCCESS) {
1670 		cmn_err(CE_WARN, "qede_vport_stop "
1671 		    "FAILED to stop vports");
1672 		return (DDI_FAILURE);
1673 	}
1674 
1675 	ecore_hw_stop_fastpath(edev);
1676 
1677 	return (DDI_SUCCESS);
1678 }
1679 
1680 static int
qede_fastpath_start_queues(qede_t * qede)1681 qede_fastpath_start_queues(qede_t *qede)
1682 {
1683 	int i, j;
1684 	int status = DDI_FAILURE;
1685 	struct ecore_dev *edev;
1686 	struct ecore_hwfn *p_hwfn;
1687 	struct ecore_queue_start_common_params params;
1688 	struct ecore_txq_start_ret_params tx_ret_params;
1689 	struct ecore_rxq_start_ret_params rx_ret_params;
1690 	qede_fastpath_t *fp;
1691 	qede_rx_ring_t *rx_ring;
1692 	qede_tx_ring_t *tx_ring;
1693 	dma_addr_t p_phys_table;
1694         u16 page_cnt;
1695 
1696 	ASSERT(qede != NULL);
1697 	/* ASSERT(qede->edev != NULL); */
1698 	edev = &qede->edev;
1699 
1700 	status = qede_vport_start(qede);
1701 	if (status != DDI_SUCCESS) {
1702 		cmn_err(CE_WARN, "Failed to "
1703 		    "start vports");
1704 		return (DDI_FAILURE);
1705 	}
1706 
1707 	for (i = 0; i < qede->num_fp; i++) {
1708 		fp = &qede->fp_array[i];
1709 		rx_ring = fp->rx_ring;
1710 		p_hwfn = &edev->hwfns[fp->fp_hw_eng_index];
1711 
1712 		params.vport_id = fp->vport_id;
1713 		params.queue_id = fp->rx_queue_index;
1714 		params.stats_id = fp->stats_id;
1715 		params.p_sb = fp->sb_info;
1716 		params.sb_idx = RX_PI;
1717 		p_phys_table = ecore_chain_get_pbl_phys(&rx_ring->rx_cqe_ring);
1718 		page_cnt = ecore_chain_get_page_cnt(&rx_ring->rx_cqe_ring);
1719 
1720 		status = ecore_eth_rx_queue_start(p_hwfn,
1721 		    p_hwfn->hw_info.opaque_fid,
1722 		    &params,
1723 		    qede->rx_buf_size,
1724 		    rx_ring->rx_bd_ring.p_phys_addr,
1725 		    p_phys_table,
1726 		    page_cnt,
1727 		    &rx_ret_params);
1728 
1729 		rx_ring->hw_rxq_prod_addr = rx_ret_params.p_prod;
1730 		rx_ring->p_cid = rx_ret_params.p_handle;
1731 		if (status != DDI_SUCCESS) {
1732 			cmn_err(CE_WARN, "ecore_sp_eth_rx_queue_start "
1733 		            "FAILED for rxq%d", i);
1734 			return (DDI_FAILURE);
1735 		}
1736 		rx_ring->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
1737 
1738 		OSAL_MSLEEP(20);
1739 		*rx_ring->hw_cons_ptr = 0;
1740 
1741 		qede_update_rx_q_producer(rx_ring);
1742 		rx_ring->queue_started = B_TRUE;
1743 		cmn_err(CE_NOTE, "rx_ring%d started\n", i);
1744 
1745 		for (j = 0; j < qede->num_tc; j++) {
1746 			tx_ring = fp->tx_ring[j];
1747 
1748 			params.vport_id = fp->vport_id;
1749 			params.queue_id = tx_ring->tx_queue_index;
1750 			params.stats_id = fp->stats_id;
1751 			params.p_sb = fp->sb_info;
1752 			params.sb_idx = TX_PI(j);
1753 
1754 			p_phys_table = ecore_chain_get_pbl_phys(
1755 			    &tx_ring->tx_bd_ring);
1756 			page_cnt = ecore_chain_get_page_cnt(
1757 			    &tx_ring->tx_bd_ring);
1758 			status = ecore_eth_tx_queue_start(p_hwfn,
1759 			    p_hwfn->hw_info.opaque_fid,
1760 			    &params,
1761 			    0,
1762 			    p_phys_table,
1763 			    page_cnt,
1764 			    &tx_ret_params);
1765 			tx_ring->doorbell_addr = tx_ret_params.p_doorbell;
1766 			tx_ring->p_cid = tx_ret_params.p_handle;
1767 			if (status != DDI_SUCCESS) {
1768 				cmn_err(CE_WARN, "ecore_sp_eth_tx_queue_start "
1769 				    "FAILED for txq%d:%d", i,j);
1770 				return (DDI_FAILURE);
1771 			}
1772 			tx_ring->hw_cons_ptr =
1773 			    &fp->sb_info->sb_virt->pi_array[TX_PI(j)];
1774 			/* LINTED E_CONSTANT_CONDITION */
1775 			SET_FIELD(tx_ring->tx_db.data.params,
1776 			    ETH_DB_DATA_DEST, DB_DEST_XCM);
1777 			/* LINTED E_CONSTANT_CONDITION */
1778 			SET_FIELD(tx_ring->tx_db.data.params,
1779 			    ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1780 			/* LINTED E_CONSTANT_CONDITION */
1781 			SET_FIELD(tx_ring->tx_db.data.params,
1782 			    ETH_DB_DATA_AGG_VAL_SEL, DQ_XCM_ETH_TX_BD_PROD_CMD);
1783 			tx_ring->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
1784 			tx_ring->queue_started = B_TRUE;
1785 			cmn_err(CE_NOTE, "tx_ring %d:%d started\n", i, j);
1786 		}
1787 	}
1788 
1789 	status = qede_vport_update(qede, QEDE_VPORT_ON);
1790 	if (status != DDI_SUCCESS) {
1791 		cmn_err(CE_WARN, "Failed to "
1792 		    "update vports");
1793 		return (DDI_FAILURE);
1794 	}
1795 	return (status);
1796 }
1797 
1798 static void
qede_free_mag_elem(qede_rx_ring_t * rx_ring,qede_rx_buffer_t * rx_buffer,struct eth_rx_bd * bd)1799 qede_free_mag_elem(qede_rx_ring_t *rx_ring, qede_rx_buffer_t *rx_buffer,
1800     struct eth_rx_bd *bd)
1801 {
1802 	int i;
1803 
1804 	if (bd != NULL) {
1805 		bzero(bd, sizeof (*bd));
1806 	}
1807 
1808 	if (rx_buffer->mp != NULL) {
1809 		freemsg(rx_buffer->mp);
1810 		rx_buffer->mp = NULL;
1811 	}
1812 }
1813 
1814 static void
qede_free_lro_rx_buffers(qede_rx_ring_t * rx_ring)1815 qede_free_lro_rx_buffers(qede_rx_ring_t *rx_ring)
1816 {
1817 	int i, j;
1818 	qede_lro_info_t *lro_info;
1819 
1820 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1821 		lro_info = &rx_ring->lro_info[i];
1822 		if (lro_info->agg_state == QEDE_AGG_STATE_NONE) {
1823 			continue;
1824 		}
1825 		for (j = 0; j < QEDE_MAX_BD_PER_AGG; j++) {
1826 			if (lro_info->rx_buffer[j] == NULL) {
1827 				break;
1828 			}
1829 			qede_recycle_copied_rx_buffer(
1830 			    lro_info->rx_buffer[j]);
1831 			lro_info->rx_buffer[j] = NULL;
1832 		}
1833 		lro_info->agg_state = QEDE_AGG_STATE_NONE;
1834 	}
1835 }
1836 
1837 static void
qede_free_rx_buffers_legacy(qede_t * qede,qede_rx_buf_area_t * rx_buf_area)1838 qede_free_rx_buffers_legacy(qede_t *qede, qede_rx_buf_area_t *rx_buf_area)
1839 {
1840 	int i, j;
1841 	u32 ref_cnt, bufs_per_page;
1842 	qede_rx_buffer_t *rx_buffer, *first_rx_buf_in_page = 0;
1843 	qede_rx_ring_t *rx_ring = rx_buf_area->rx_ring;
1844 	bool free_rx_buffer;
1845 
1846 	bufs_per_page = rx_buf_area->bufs_per_page;
1847 
1848 	rx_buffer = &rx_buf_area->rx_buf_pool[0];
1849 
1850 	if (rx_buf_area) {
1851 		for (i = 0; i < rx_ring->rx_buf_count; i += bufs_per_page) {
1852 			free_rx_buffer = true;
1853 			for (j = 0; j < bufs_per_page; j++) {
1854 				if (!j) {
1855 					first_rx_buf_in_page = rx_buffer;
1856 				}
1857 				if (rx_buffer->ref_cnt != 0) {
1858 					ref_cnt = atomic_dec_32_nv(
1859 					    &rx_buffer->ref_cnt);
1860 					if (ref_cnt == 0) {
1861 						/*
1862 						 * Buffer is now
1863 						 * completely free
1864 						 */
1865 						if (rx_buffer->mp) {
1866 							freemsg(rx_buffer->mp);
1867 							rx_buffer->mp = NULL;
1868 						}
1869 					} else {
1870 						/*
1871 						 * Since Buffer still
1872 						 * held up in Stack,
1873 						 * we cant free the whole page
1874 						 */
1875 						free_rx_buffer = false;
1876 					}
1877 				}
1878 				rx_buffer++;
1879 			}
1880 
1881 			if (free_rx_buffer) {
1882 				qede_pci_free_consistent(
1883 				    &first_rx_buf_in_page->dma_info.dma_handle,
1884 			    	    &first_rx_buf_in_page->dma_info.acc_handle);
1885 			}
1886 		}
1887 
1888 		/*
1889 		 * If no more buffers are with the stack
1890 		 *  then free the buf pools
1891 		 */
1892 		if (rx_buf_area->buf_upstream == 0) {
1893 			mutex_destroy(&rx_buf_area->active_buf_list.lock);
1894 			mutex_destroy(&rx_buf_area->passive_buf_list.lock);
1895 
1896 			kmem_free(rx_buf_area, sizeof (qede_rx_buf_area_t));
1897 			rx_buf_area = NULL;
1898 			if (atomic_cas_32(&qede->detach_unsafe, 2, 2)) {
1899 				atomic_dec_32(&qede->detach_unsafe);
1900 			}
1901 		}
1902 	}
1903 }
1904 
1905 
1906 static void
qede_free_rx_buffers(qede_t * qede,qede_rx_ring_t * rx_ring)1907 qede_free_rx_buffers(qede_t *qede, qede_rx_ring_t *rx_ring)
1908 {
1909 	qede_free_lro_rx_buffers(rx_ring);
1910 	qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
1911 	qede_free_rx_buffers_legacy(qede, rx_buf_area);
1912 }
1913 
1914 static void
qede_free_rx_ring_phys(qede_t * qede,qede_fastpath_t * fp)1915 qede_free_rx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
1916 {
1917 	qede_rx_ring_t *rx_ring;
1918 
1919 	ASSERT(qede != NULL);
1920 	ASSERT(fp != NULL);
1921 
1922 
1923 	rx_ring = fp->rx_ring;
1924 	rx_ring->rx_buf_area->inactive = 1;
1925 
1926 	qede_free_rx_buffers(qede, rx_ring);
1927 
1928 
1929 	if (rx_ring->rx_bd_ring.p_virt_addr) {
1930 		ecore_chain_free(&qede->edev, &rx_ring->rx_bd_ring);
1931 		rx_ring->rx_bd_ring.p_virt_addr = NULL;
1932 	}
1933 
1934 	if (rx_ring->rx_cqe_ring.p_virt_addr) {
1935 		ecore_chain_free(&qede->edev, &rx_ring->rx_cqe_ring);
1936 		rx_ring->rx_cqe_ring.p_virt_addr = NULL;
1937 		if (rx_ring->rx_cqe_ring.pbl_sp.p_virt_table) {
1938 			rx_ring->rx_cqe_ring.pbl_sp.p_virt_table = NULL;
1939 		}
1940 	}
1941 	rx_ring->hw_cons_ptr = NULL;
1942 	rx_ring->hw_rxq_prod_addr = NULL;
1943 	rx_ring->sw_rx_cons = 0;
1944 	rx_ring->sw_rx_prod = 0;
1945 
1946 }
1947 
1948 
1949 static int
qede_init_bd(qede_t * qede,qede_rx_ring_t * rx_ring)1950 qede_init_bd(qede_t *qede, qede_rx_ring_t *rx_ring)
1951 {
1952 	struct eth_rx_bd *bd = NULL;
1953 	int ret = DDI_SUCCESS;
1954 	int i;
1955 	qede_rx_buffer_t *rx_buffer;
1956 	qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
1957 	qede_rx_buf_list_t *active_buf_list = &rx_buf_area->active_buf_list;
1958 
1959 	for (i = 0; i < rx_ring->rx_buf_count; i++) {
1960 		rx_buffer = &rx_buf_area->rx_buf_pool[i];
1961 		active_buf_list->buf_list[i] = rx_buffer;
1962 		active_buf_list->num_entries++;
1963 		bd = ecore_chain_produce(&rx_ring->rx_bd_ring);
1964 		if (bd == NULL) {
1965 			qede_print_err("!%s(%d): invalid NULL bd in "
1966 			    "rx_bd_ring", __func__, qede->instance);
1967 			ret = DDI_FAILURE;
1968 			goto err;
1969 		}
1970 
1971 		bd->addr.lo = HOST_TO_LE_32(U64_LO(
1972 				rx_buffer->dma_info.phys_addr));
1973 		bd->addr.hi = HOST_TO_LE_32(U64_HI(
1974 				rx_buffer->dma_info.phys_addr));
1975 
1976 	}
1977 	active_buf_list->tail = 0;
1978 err:
1979 	return (ret);
1980 }
1981 
1982 
1983 qede_rx_buffer_t *
qede_get_from_active_list(qede_rx_ring_t * rx_ring,uint32_t * num_entries)1984 qede_get_from_active_list(qede_rx_ring_t *rx_ring,
1985     uint32_t *num_entries)
1986 {
1987 	qede_rx_buffer_t *rx_buffer;
1988 	qede_rx_buf_list_t *active_buf_list =
1989 	    &rx_ring->rx_buf_area->active_buf_list;
1990 	u16 head = active_buf_list->head;
1991 
1992 	rx_buffer = active_buf_list->buf_list[head];
1993 	active_buf_list->buf_list[head] = NULL;
1994 	head = (head + 1) & RX_RING_MASK;
1995 
1996 	if (rx_buffer) {
1997 		atomic_dec_32(&active_buf_list->num_entries);
1998 		atomic_inc_32(&rx_ring->rx_buf_area->buf_upstream);
1999 		atomic_inc_32(&rx_buffer->ref_cnt);
2000 		rx_buffer->buf_state = RX_BUF_STATE_WITH_OS;
2001 
2002 		if (rx_buffer->mp == NULL) {
2003 			rx_buffer->mp =
2004 			    desballoc(rx_buffer->dma_info.virt_addr,
2005 			    rx_ring->rx_buf_size, 0, &rx_buffer->recycle);
2006 		}
2007 	}
2008 
2009 	*num_entries = active_buf_list->num_entries;
2010 	active_buf_list->head = head;
2011 
2012 	return (rx_buffer);
2013 }
2014 
2015 qede_rx_buffer_t *
qede_get_from_passive_list(qede_rx_ring_t * rx_ring)2016 qede_get_from_passive_list(qede_rx_ring_t *rx_ring)
2017 {
2018 	qede_rx_buf_list_t *passive_buf_list =
2019 	    &rx_ring->rx_buf_area->passive_buf_list;
2020 	qede_rx_buffer_t *rx_buffer;
2021 	u32 head;
2022 
2023 	mutex_enter(&passive_buf_list->lock);
2024 	head = passive_buf_list->head;
2025 	if (passive_buf_list->buf_list[head] == NULL) {
2026 		mutex_exit(&passive_buf_list->lock);
2027 		return (NULL);
2028 	}
2029 
2030 	rx_buffer = passive_buf_list->buf_list[head];
2031 	passive_buf_list->buf_list[head] = NULL;
2032 
2033 	passive_buf_list->head = (passive_buf_list->head + 1) & RX_RING_MASK;
2034 	mutex_exit(&passive_buf_list->lock);
2035 
2036 	atomic_dec_32(&passive_buf_list->num_entries);
2037 
2038 	return (rx_buffer);
2039 }
2040 
2041 void
qede_put_to_active_list(qede_rx_ring_t * rx_ring,qede_rx_buffer_t * rx_buffer)2042 qede_put_to_active_list(qede_rx_ring_t *rx_ring, qede_rx_buffer_t *rx_buffer)
2043 {
2044 	qede_rx_buf_list_t *active_buf_list =
2045 	    &rx_ring->rx_buf_area->active_buf_list;
2046 	u16 tail = active_buf_list->tail;
2047 
2048 	active_buf_list->buf_list[tail] = rx_buffer;
2049 	tail = (tail + 1) & RX_RING_MASK;
2050 
2051 	active_buf_list->tail = tail;
2052 	atomic_inc_32(&active_buf_list->num_entries);
2053 }
2054 
2055 void
qede_replenish_rx_buffers(qede_rx_ring_t * rx_ring)2056 qede_replenish_rx_buffers(qede_rx_ring_t *rx_ring)
2057 {
2058 	qede_rx_buffer_t *rx_buffer;
2059 	int count = 0;
2060 	struct eth_rx_bd *bd;
2061 
2062         /*
2063          * Only replenish when we have at least
2064          * 1/4th of the ring to do.  We don't want
2065          * to incur many lock contentions and
2066          * cycles for just a few buffers.
2067          * We don't bother with the passive area lock
2068          * here because we're just getting an
2069          * estimate.  Also, we only pull from
2070          * the passive list in this function.
2071          */
2072 
2073 	/*
2074 	 * Use a replenish lock because we can do the
2075 	 * replenish operation at the end of
2076 	 * processing the rx_ring, but also when
2077 	 * we get buffers back from the upper
2078 	 * layers.
2079 	 */
2080 	if (mutex_tryenter(&rx_ring->rx_replen_lock) == 0) {
2081 		qede_info(rx_ring->qede, "!%s(%d): Failed to take"
2082 			" replenish_lock",
2083 			__func__, rx_ring->qede->instance);
2084 		return;
2085 	}
2086 
2087 	rx_buffer = qede_get_from_passive_list(rx_ring);
2088 
2089 	while (rx_buffer != NULL) {
2090 		bd = ecore_chain_produce(&rx_ring->rx_bd_ring);
2091 		if (bd == NULL) {
2092 			qede_info(rx_ring->qede, "!%s(%d): bd = null",
2093 				__func__, rx_ring->qede->instance);
2094 			qede_put_to_passive_list(rx_ring, rx_buffer);
2095 			break;
2096 		}
2097 
2098 		bd->addr.lo = HOST_TO_LE_32(U64_LO(
2099 				rx_buffer->dma_info.phys_addr));
2100 		bd->addr.hi = HOST_TO_LE_32(
2101 				U64_HI(rx_buffer->dma_info.phys_addr));
2102 
2103 		/*
2104 		 * Put the buffer in active list since it will be
2105 		 * posted to fw now
2106 		 */
2107 		qede_put_to_active_list(rx_ring, rx_buffer);
2108 		rx_buffer->buf_state = RX_BUF_STATE_WITH_FW;
2109 		count++;
2110 		rx_buffer = qede_get_from_passive_list(rx_ring);
2111 	}
2112 	mutex_exit(&rx_ring->rx_replen_lock);
2113 }
2114 
2115 /*
2116  * Put the rx_buffer to the passive_buf_list
2117  */
2118 int
qede_put_to_passive_list(qede_rx_ring_t * rx_ring,qede_rx_buffer_t * rx_buffer)2119 qede_put_to_passive_list(qede_rx_ring_t *rx_ring, qede_rx_buffer_t *rx_buffer)
2120 {
2121 	qede_rx_buf_list_t *passive_buf_list =
2122 	    &rx_ring->rx_buf_area->passive_buf_list;
2123 	qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
2124 	int tail = 0;
2125 
2126 	mutex_enter(&passive_buf_list->lock);
2127 
2128 	tail = passive_buf_list->tail;
2129 	passive_buf_list->tail = (passive_buf_list->tail + 1) & RX_RING_MASK;
2130 
2131 	rx_buf_area->passive_buf_list.buf_list[tail] = rx_buffer;
2132 	atomic_inc_32(&passive_buf_list->num_entries);
2133 
2134 	if (passive_buf_list->num_entries > rx_ring->rx_buf_count) {
2135 		/* Sanity check */
2136 		qede_info(rx_ring->qede, "ERROR: num_entries (%d)"
2137 		    " > max count (%d)",
2138 		    passive_buf_list->num_entries,
2139 		    rx_ring->rx_buf_count);
2140 	}
2141 	mutex_exit(&passive_buf_list->lock);
2142 	return (passive_buf_list->num_entries);
2143 }
2144 
2145 void
qede_recycle_rx_buffer(char * arg)2146 qede_recycle_rx_buffer(char *arg)
2147 {
2148 	/* LINTED E_BAD_PTR_CAST_ALIGN */
2149 	qede_rx_buffer_t *rx_buffer = (qede_rx_buffer_t *)arg;
2150 	qede_rx_ring_t *rx_ring = rx_buffer->rx_ring;
2151 	qede_rx_buf_area_t *rx_buf_area = rx_buffer->rx_buf_area;
2152 	qede_t *qede = rx_ring->qede;
2153 	u32 buf_upstream = 0, ref_cnt;
2154 	u32 num_entries;
2155 
2156 	if (rx_buffer->ref_cnt == 0) {
2157 		return;
2158 	}
2159 
2160 	/*
2161 	 * Since the data buffer associated with the mblk is free'ed
2162 	 * by upper layer, allocate it again to contain proper
2163 	 * free_func pointer
2164 	 */
2165     	rx_buffer->mp = desballoc(rx_buffer->dma_info.virt_addr,
2166 	    rx_ring->rx_buf_size, 0, &rx_buffer->recycle);
2167 
2168 	ref_cnt = atomic_dec_32_nv(&rx_buffer->ref_cnt);
2169 	if (ref_cnt == 1) {
2170 		/* Put the buffer into passive_buf_list to be reused */
2171 		num_entries = qede_put_to_passive_list(rx_ring, rx_buffer);
2172 		if(num_entries >= 32) {
2173 			if(mutex_tryenter(&rx_ring->rx_lock) != 0) {
2174 				qede_replenish_rx_buffers(rx_ring);
2175 				qede_update_rx_q_producer(rx_ring);
2176 				mutex_exit(&rx_ring->rx_lock);
2177 			}
2178 		}
2179 	} else if (ref_cnt == 0) {
2180 		/*
2181 		 * This is a buffer from a previous load instance of
2182 		 * rx_buf_area. Free the rx_buffer and if no more
2183 		 * buffers are upstream from this rx_buf_area instance
2184 		 * then free the rx_buf_area;
2185 		 */
2186 		if (rx_buffer->mp != NULL) {
2187 			freemsg(rx_buffer->mp);
2188 			rx_buffer->mp = NULL;
2189 		}
2190 		mutex_enter(&qede->drv_lock);
2191 
2192 		buf_upstream = atomic_cas_32(&rx_buf_area->buf_upstream, 1, 1);
2193 		if (buf_upstream >= 1) {
2194 			atomic_dec_32(&rx_buf_area->buf_upstream);
2195 		}
2196 		if (rx_buf_area->inactive && (rx_buf_area->buf_upstream == 0)) {
2197 			qede_free_rx_buffers_legacy(qede, rx_buf_area);
2198 		}
2199 
2200 		mutex_exit(&qede->drv_lock);
2201 	} else {
2202 		/* Sanity check */
2203 		qede_info(rx_ring->qede, "rx_buffer %p"
2204 		    " ref_cnt %d is invalid",
2205 		    rx_buffer, ref_cnt);
2206 	}
2207 }
2208 
2209 void
qede_recycle_copied_rx_buffer(qede_rx_buffer_t * rx_buffer)2210 qede_recycle_copied_rx_buffer(qede_rx_buffer_t *rx_buffer)
2211 {
2212 	qede_rx_ring_t *rx_ring = rx_buffer->rx_ring;
2213 	qede_rx_buf_area_t *rx_buf_area = rx_buffer->rx_buf_area;
2214 	qede_t *qede = rx_ring->qede;
2215 	u32 buf_upstream = 0, ref_cnt;
2216 
2217 	if (rx_buffer->ref_cnt == 0) {
2218 		/*
2219 		 * Can happen if the buffer is being free'd
2220 		 * in the stop routine
2221 		 */
2222 		qede_info(qede, "!%s(%d): rx_buffer->ref_cnt = 0",
2223 		    __func__, qede->instance);
2224 		return;
2225 	}
2226 
2227 	buf_upstream = atomic_cas_32(&rx_buf_area->buf_upstream, 1, 1);
2228 	if (buf_upstream >= 1) {
2229 		atomic_dec_32(&rx_buf_area->buf_upstream);
2230 	}
2231 
2232 	/*
2233 	 * Since the data buffer associated with the mblk is free'ed
2234 	 * by upper layer, allocate it again to contain proper
2235 	 * free_func pointer
2236 	 * Though we could also be recycling a buffer that got copied,
2237 	 * so in that case the mp would still be intact.
2238 	 */
2239 
2240 	ref_cnt = atomic_dec_32_nv(&rx_buffer->ref_cnt);
2241 	if (ref_cnt == 1) {
2242 		qede_put_to_passive_list(rx_ring, rx_buffer);
2243 		/* Put the buffer into passive_buf_list to be reused */
2244 	} else if (ref_cnt == 0) {
2245 		/*
2246 		 * This is a buffer from a previous load instance of
2247 		 * rx_buf_area. Free the rx_buffer and if no more
2248 		 * buffers are upstream from this rx_buf_area instance
2249 		 * then free the rx_buf_area;
2250 		 */
2251 		qede_info(rx_ring->qede, "Free up rx_buffer %p, index %d"
2252 		    " ref_cnt %d from a previous driver iteration",
2253 		    rx_buffer, rx_buffer->index, ref_cnt);
2254 		if (rx_buffer->mp != NULL) {
2255 			freemsg(rx_buffer->mp);
2256 			rx_buffer->mp = NULL;
2257 		}
2258 
2259 		if (rx_buf_area->inactive && (rx_buf_area->buf_upstream == 0)) {
2260 			mutex_enter(&qede->drv_lock);
2261 			qede_free_rx_buffers_legacy(qede, rx_buf_area);
2262 			mutex_exit(&qede->drv_lock);
2263 		}
2264 	} else {
2265 		/* Sanity check */
2266 		qede_info(rx_ring->qede, "rx_buffer %p"
2267 		    " ref_cnt %d is invalid",
2268 		    rx_buffer, ref_cnt);
2269 	}
2270 }
2271 
2272 
2273 static int
qede_alloc_rx_buffers(qede_t * qede,qede_rx_ring_t * rx_ring)2274 qede_alloc_rx_buffers(qede_t *qede, qede_rx_ring_t *rx_ring)
2275 {
2276 	int ret = DDI_SUCCESS, i, j;
2277 	qede_rx_buffer_t *rx_buffer;
2278 	qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
2279 	u32 bufs_per_page, buf_size;
2280 	int page_size = (int)ddi_ptob(qede->dip, 1);
2281 	qede_dma_info_t *dma_info;
2282 	ddi_dma_cookie_t temp_cookie;
2283 	int allocated = 0;
2284 	u64 dma_addr;
2285 	u8 *vaddr;
2286 	ddi_dma_handle_t dma_handle;
2287 	ddi_acc_handle_t acc_handle;
2288 
2289 	if (rx_ring->rx_buf_size > page_size) {
2290 		bufs_per_page = 1;
2291 		buf_size = rx_ring->rx_buf_size;
2292 	} else {
2293 		bufs_per_page =
2294 		    (page_size) / DEFAULT_RX_BUF_SIZE;
2295 		buf_size = page_size;
2296 	}
2297 
2298 	rx_buffer = &rx_buf_area->rx_buf_pool[0];
2299 	rx_buf_area->bufs_per_page = bufs_per_page;
2300 
2301 	mutex_init(&rx_buf_area->active_buf_list.lock, NULL,
2302 	    MUTEX_DRIVER, 0);
2303 	mutex_init(&rx_buf_area->passive_buf_list.lock, NULL,
2304 	    MUTEX_DRIVER, 0);
2305 
2306 	for (i = 0; i < rx_ring->rx_buf_count; i += bufs_per_page) {
2307 		dma_info = &rx_buffer->dma_info;
2308 
2309 		ret = qede_dma_mem_alloc(qede,
2310 			buf_size,
2311 			DDI_DMA_READ | DDI_DMA_STREAMING | DDI_DMA_CONSISTENT,
2312 			(caddr_t *)&dma_info->virt_addr,
2313 			&temp_cookie,
2314 			&dma_info->dma_handle,
2315 			&dma_info->acc_handle,
2316 			&qede_dma_attr_rxbuf,
2317 			&qede_buf_acc_attr);
2318 		if (ret != DDI_SUCCESS) {
2319 			goto err;
2320 		}
2321 
2322 		allocated++;
2323 		vaddr = dma_info->virt_addr;
2324 		dma_addr = temp_cookie.dmac_laddress;
2325 		dma_handle = dma_info->dma_handle;
2326 		acc_handle = dma_info->acc_handle;
2327 
2328 		for (j = 0; j < bufs_per_page; j++) {
2329 			dma_info = &rx_buffer->dma_info;
2330 			dma_info->virt_addr = vaddr;
2331 			dma_info->phys_addr = dma_addr;
2332 			dma_info->dma_handle = dma_handle;
2333 			dma_info->acc_handle = acc_handle;
2334 			dma_info->offset = j * rx_ring->rx_buf_size;
2335 			/* Populate the recycle func and arg for the buffer */
2336 			rx_buffer->recycle.free_func = qede_recycle_rx_buffer;
2337 			rx_buffer->recycle.free_arg = (caddr_t)rx_buffer;
2338 
2339 			rx_buffer->mp = desballoc(dma_info->virt_addr,
2340 				    	rx_ring->rx_buf_size, 0,
2341 				    	&rx_buffer->recycle);
2342 			if (rx_buffer->mp == NULL) {
2343 				qede_warn(qede, "desballoc() failed, index %d",
2344 				     i);
2345 			}
2346 			rx_buffer->rx_ring = rx_ring;
2347 			rx_buffer->rx_buf_area = rx_buf_area;
2348 			rx_buffer->index = i + j;
2349 			rx_buffer->ref_cnt = 1;
2350 			rx_buffer++;
2351 
2352 			vaddr += rx_ring->rx_buf_size;
2353 			dma_addr += rx_ring->rx_buf_size;
2354 		}
2355 		rx_ring->sw_rx_prod++;
2356 	}
2357 
2358 	/*
2359 	 * Fill the rx_bd_ring with the allocated
2360 	 * buffers
2361 	 */
2362 	ret = qede_init_bd(qede, rx_ring);
2363 	if (ret != DDI_SUCCESS) {
2364 		goto err;
2365 	}
2366 
2367 	rx_buf_area->buf_upstream = 0;
2368 
2369 	return (ret);
2370 err:
2371 	qede_free_rx_buffers(qede, rx_ring);
2372 	return (ret);
2373 }
2374 
2375 static int
qede_alloc_rx_ring_phys(qede_t * qede,qede_fastpath_t * fp)2376 qede_alloc_rx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
2377 {
2378 	qede_rx_ring_t *rx_ring;
2379 	qede_rx_buf_area_t *rx_buf_area;
2380 	size_t size;
2381 
2382 	ASSERT(qede != NULL);
2383 	ASSERT(fp != NULL);
2384 
2385 	rx_ring = fp->rx_ring;
2386 
2387 	atomic_inc_32(&qede->detach_unsafe);
2388 	/*
2389 	 * Allocate rx_buf_area for the plumb instance
2390 	 */
2391 	rx_buf_area = kmem_zalloc(sizeof (*rx_buf_area), KM_SLEEP);
2392 	if (rx_buf_area == NULL) {
2393 		qede_info(qede, "!%s(%d): Cannot alloc rx_buf_area",
2394 			__func__, qede->instance);
2395 		return (DDI_FAILURE);
2396 	}
2397 
2398 	rx_buf_area->inactive = 0;
2399 	rx_buf_area->rx_ring = rx_ring;
2400 	rx_ring->rx_buf_area = rx_buf_area;
2401 	/* Rx Buffer descriptor queue */
2402 	if (ecore_chain_alloc(&qede->edev,
2403 			ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2404 			ECORE_CHAIN_MODE_NEXT_PTR,
2405 			ECORE_CHAIN_CNT_TYPE_U16,
2406 			qede->rx_ring_size,
2407 			sizeof (struct eth_rx_bd),
2408 			&rx_ring->rx_bd_ring,
2409 			NULL) != ECORE_SUCCESS) {
2410 		cmn_err(CE_WARN, "Failed to allocate "
2411 		    "ecore cqe chain");
2412 		return (DDI_FAILURE);
2413 	}
2414 
2415 	/* Rx Completion Descriptor queue */
2416 	if (ecore_chain_alloc(&qede->edev,
2417 			ECORE_CHAIN_USE_TO_CONSUME,
2418 			ECORE_CHAIN_MODE_PBL,
2419 			ECORE_CHAIN_CNT_TYPE_U16,
2420 			qede->rx_ring_size,
2421 			sizeof (union eth_rx_cqe),
2422 			&rx_ring->rx_cqe_ring,
2423 			NULL) != ECORE_SUCCESS) {
2424 		cmn_err(CE_WARN, "Failed to allocate "
2425 		    "ecore bd chain");
2426 		return (DDI_FAILURE);
2427 	}
2428 
2429 	/* Rx Data buffers */
2430 	if (qede_alloc_rx_buffers(qede, rx_ring) != DDI_SUCCESS) {
2431 		qede_print_err("!%s(%d): Failed to alloc rx buffers",
2432 		    __func__, qede->instance);
2433 		return (DDI_FAILURE);
2434 	}
2435 	return (DDI_SUCCESS);
2436 }
2437 
2438 static void
qede_free_tx_bd_ring(qede_t * qede,qede_fastpath_t * fp)2439 qede_free_tx_bd_ring(qede_t *qede, qede_fastpath_t *fp)
2440 {
2441 	int i;
2442 	qede_tx_ring_t *tx_ring;
2443 
2444 	ASSERT(qede != NULL);
2445 	ASSERT(fp != NULL);
2446 
2447 	for (i = 0; i < qede->num_tc; i++) {
2448 		tx_ring = fp->tx_ring[i];
2449 
2450 		if (tx_ring->tx_bd_ring.p_virt_addr) {
2451 			ecore_chain_free(&qede->edev, &tx_ring->tx_bd_ring);
2452 			tx_ring->tx_bd_ring.p_virt_addr = NULL;
2453 		}
2454 		tx_ring->hw_cons_ptr = NULL;
2455 		tx_ring->sw_tx_cons = 0;
2456 		tx_ring->sw_tx_prod = 0;
2457 
2458 	}
2459 }
2460 
2461 static u32
qede_alloc_tx_bd_ring(qede_t * qede,qede_tx_ring_t * tx_ring)2462 qede_alloc_tx_bd_ring(qede_t *qede, qede_tx_ring_t *tx_ring)
2463 {
2464 	u32 ret = 0;
2465 
2466 	ret = ecore_chain_alloc(&qede->edev,
2467 	    ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2468 	    ECORE_CHAIN_MODE_PBL,
2469 	    ECORE_CHAIN_CNT_TYPE_U16,
2470 	    tx_ring->bd_ring_size,
2471 	    sizeof (union eth_tx_bd_types),
2472 	    &tx_ring->tx_bd_ring,
2473 	    NULL);
2474 	if (ret) {
2475 		cmn_err(CE_WARN, "!%s(%d): Failed to alloc tx bd chain",
2476 		    __func__, qede->instance);
2477 		goto error;
2478 	}
2479 
2480 
2481 error:
2482 	return (ret);
2483 }
2484 
2485 static void
qede_free_tx_bcopy_buffers(qede_tx_ring_t * tx_ring)2486 qede_free_tx_bcopy_buffers(qede_tx_ring_t *tx_ring)
2487 {
2488 	qede_tx_bcopy_pkt_t *bcopy_pkt;
2489 	int i;
2490 
2491 	for (i = 0; i < tx_ring->tx_ring_size; i++) {
2492 		bcopy_pkt = &tx_ring->bcopy_list.bcopy_pool[i];
2493 		if(bcopy_pkt->dma_handle != NULL)
2494 			(void) ddi_dma_unbind_handle(bcopy_pkt->dma_handle);
2495 		if(bcopy_pkt->acc_handle != NULL) {
2496 			ddi_dma_mem_free(&bcopy_pkt->acc_handle);
2497 			bcopy_pkt->acc_handle = NULL;
2498 		}
2499 		if(bcopy_pkt->dma_handle != NULL) {
2500 			ddi_dma_free_handle(&bcopy_pkt->dma_handle);
2501 			bcopy_pkt->dma_handle = NULL;
2502 		}
2503 		if (bcopy_pkt) {
2504 			if (bcopy_pkt->mp) {
2505 				freemsg(bcopy_pkt->mp);
2506 			}
2507 		}
2508 	}
2509 
2510 	if (tx_ring->bcopy_list.bcopy_pool != NULL) {
2511 		kmem_free(tx_ring->bcopy_list.bcopy_pool,
2512 		    tx_ring->bcopy_list.size);
2513 		tx_ring->bcopy_list.bcopy_pool = NULL;
2514 	}
2515 
2516 	mutex_destroy(&tx_ring->bcopy_list.lock);
2517 }
2518 
2519 static u32
qede_alloc_tx_bcopy_buffers(qede_t * qede,qede_tx_ring_t * tx_ring)2520 qede_alloc_tx_bcopy_buffers(qede_t *qede, qede_tx_ring_t *tx_ring)
2521 {
2522 	u32 ret = DDI_SUCCESS;
2523 	int page_size = (int)ddi_ptob(qede->dip, 1);
2524 	size_t size;
2525 	qede_tx_bcopy_pkt_t *bcopy_pkt, *bcopy_list;
2526 	int i;
2527 	qede_dma_info_t dma_info;
2528 	ddi_dma_cookie_t temp_cookie;
2529 
2530 	/*
2531 	 * If the tx_buffers size if less than the page size
2532 	 * then try to use multiple copy buffers inside the
2533 	 * same page. Otherwise use the whole page (or more)
2534 	 * for the copy buffers
2535 	 */
2536 	if (qede->tx_buf_size > page_size) {
2537 		size = qede->tx_buf_size;
2538 	} else {
2539 		size = page_size;
2540 	}
2541 
2542 	size = sizeof (qede_tx_bcopy_pkt_t) * qede->tx_ring_size;
2543 	bcopy_list = kmem_zalloc(size, KM_SLEEP);
2544 	if (bcopy_list == NULL) {
2545 		qede_warn(qede, "!%s(%d): Failed to allocate bcopy_list",
2546 		    __func__, qede->instance);
2547 		ret = DDI_FAILURE;
2548 		goto exit;
2549 	}
2550 
2551 	tx_ring->bcopy_list.size = size;
2552 	tx_ring->bcopy_list.bcopy_pool = bcopy_list;
2553 	bcopy_pkt = bcopy_list;
2554 
2555 	tx_ring->bcopy_list.head = 0;
2556 	tx_ring->bcopy_list.tail = 0;
2557 	mutex_init(&tx_ring->bcopy_list.lock, NULL, MUTEX_DRIVER, 0);
2558 
2559 	for (i = 0; i < qede->tx_ring_size; i++) {
2560 
2561 		ret = qede_dma_mem_alloc(qede,
2562 					qede->tx_buf_size,
2563 					DDI_DMA_READ | DDI_DMA_STREAMING | DDI_DMA_CONSISTENT,
2564 					(caddr_t *)&dma_info.virt_addr,
2565 					&temp_cookie,
2566 					&dma_info.dma_handle,
2567 					&dma_info.acc_handle,
2568 					&qede_dma_attr_txbuf,
2569 					&qede_buf_acc_attr);
2570 		if(ret) {
2571 			ret = DDI_FAILURE;
2572 			goto exit;
2573 		}
2574 
2575 
2576 		bcopy_pkt->virt_addr = dma_info.virt_addr;
2577 		bcopy_pkt->phys_addr = temp_cookie.dmac_laddress;
2578 		bcopy_pkt->dma_handle = dma_info.dma_handle;
2579 		bcopy_pkt->acc_handle = dma_info.acc_handle;
2580 
2581 		tx_ring->bcopy_list.free_list[i] = bcopy_pkt;
2582 		bcopy_pkt++;
2583 	}
2584 
2585 exit:
2586 	return (ret);
2587 }
2588 
2589 static void
qede_free_tx_dma_handles(qede_t * qede,qede_tx_ring_t * tx_ring)2590 qede_free_tx_dma_handles(qede_t *qede, qede_tx_ring_t *tx_ring)
2591 {
2592 	qede_dma_handle_entry_t *dmah_entry;
2593 	int i;
2594 
2595 	for (i = 0; i < tx_ring->tx_ring_size; i++) {
2596 		dmah_entry = &tx_ring->dmah_list.dmah_pool[i];
2597 		if (dmah_entry) {
2598 			if (dmah_entry->dma_handle != NULL) {
2599 				ddi_dma_free_handle(&dmah_entry->dma_handle);
2600 				dmah_entry->dma_handle = NULL;
2601 			} else {
2602 				qede_info(qede, "dmah_entry %p, handle is NULL",
2603 				     dmah_entry);
2604 			}
2605 		}
2606 	}
2607 
2608 	if (tx_ring->dmah_list.dmah_pool != NULL) {
2609 		kmem_free(tx_ring->dmah_list.dmah_pool,
2610 		    tx_ring->dmah_list.size);
2611 		tx_ring->dmah_list.dmah_pool = NULL;
2612 	}
2613 
2614 	mutex_destroy(&tx_ring->dmah_list.lock);
2615 }
2616 
2617 static u32
qede_alloc_tx_dma_handles(qede_t * qede,qede_tx_ring_t * tx_ring)2618 qede_alloc_tx_dma_handles(qede_t *qede, qede_tx_ring_t *tx_ring)
2619 {
2620 	int i;
2621 	size_t size;
2622 	u32 ret = DDI_SUCCESS;
2623 	qede_dma_handle_entry_t *dmah_entry, *dmah_list;
2624 
2625 	size = sizeof (qede_dma_handle_entry_t) * qede->tx_ring_size;
2626 	dmah_list = kmem_zalloc(size, KM_SLEEP);
2627 	if (dmah_list == NULL) {
2628 		qede_warn(qede, "!%s(%d): Failed to allocated dmah_list",
2629 		    __func__, qede->instance);
2630                 /* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2631 		ret = DDI_FAILURE;
2632 		goto exit;
2633 	}
2634 
2635 	tx_ring->dmah_list.size = size;
2636 	tx_ring->dmah_list.dmah_pool = dmah_list;
2637 	dmah_entry = dmah_list;
2638 
2639 	tx_ring->dmah_list.head = 0;
2640 	tx_ring->dmah_list.tail = 0;
2641 	mutex_init(&tx_ring->dmah_list.lock, NULL, MUTEX_DRIVER, 0);
2642 
2643 	/*
2644 	 *
2645 	 */
2646 	for (i = 0; i < qede->tx_ring_size; i++) {
2647 		ret = ddi_dma_alloc_handle(qede->dip,
2648 		    &qede_tx_buf_dma_attr,
2649 		    DDI_DMA_DONTWAIT,
2650 		    NULL,
2651 		    &dmah_entry->dma_handle);
2652 		if (ret != DDI_SUCCESS) {
2653 			qede_print_err("!%s(%d): dma alloc handle failed "
2654 			    "for index %d",
2655 			    __func__, qede->instance, i);
2656 			/* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2657 			ret = DDI_FAILURE;
2658 			goto exit;
2659 		}
2660 
2661 		tx_ring->dmah_list.free_list[i] = dmah_entry;
2662 		dmah_entry++;
2663 	}
2664 exit:
2665 	return (ret);
2666 }
2667 
2668 static u32
qede_alloc_tx_ring_phys(qede_t * qede,qede_fastpath_t * fp)2669 qede_alloc_tx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
2670 {
2671 	int i;
2672 	qede_tx_ring_t *tx_ring;
2673 	u32 ret = DDI_SUCCESS;
2674 	size_t size;
2675 	qede_tx_recycle_list_t *recycle_list;
2676 
2677 	ASSERT(qede != NULL);
2678 	ASSERT(fp != NULL);
2679 
2680 	for (i = 0; i < qede->num_tc; i++) {
2681 		tx_ring = fp->tx_ring[i];
2682 		tx_ring->bd_ring_size = qede->tx_ring_size;
2683 
2684 		/*
2685 		 * Allocate the buffer descriptor chain
2686 		 */
2687 		ret = qede_alloc_tx_bd_ring(qede, tx_ring);
2688 		if (ret) {
2689 			cmn_err(CE_WARN, "!%s(%d): failed, %s",
2690 			    __func__, qede->instance, qede_get_ddi_fail(ret));
2691 			return (ret);
2692 		}
2693 
2694 		/*
2695 		 * Allocate copy mode buffers
2696 		 */
2697 		ret = qede_alloc_tx_bcopy_buffers(qede, tx_ring);
2698 		if (ret) {
2699 			qede_print_err("!%s(%d): Failed to alloc tx copy "
2700 			    "buffers", __func__, qede->instance);
2701 			/* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2702 			ret = DDI_FAILURE;
2703 			goto exit;
2704 		}
2705 
2706 		/*
2707 		 * Allocate dma handles for mapped mode
2708 		 */
2709 		ret = qede_alloc_tx_dma_handles(qede, tx_ring);
2710 		if (ret) {
2711 			qede_print_err("!%s(%d): Failed to alloc tx dma "
2712 			    "handles", __func__, qede->instance);
2713 			/* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2714 			ret = DDI_FAILURE;
2715 			goto exit;
2716 		}
2717 
2718 		/* Allocate tx_recycle list */
2719 		size = sizeof (qede_tx_recycle_list_t) * qede->tx_ring_size;
2720 		recycle_list = kmem_zalloc(size, KM_SLEEP);
2721 		if (recycle_list == NULL) {
2722 			qede_warn(qede, "!%s(%d): Failed to allocate"
2723 			    " tx_recycle_list", __func__, qede->instance);
2724 			/* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2725 			ret = DDI_FAILURE;
2726 			goto exit;
2727 		}
2728 
2729 		tx_ring->tx_recycle_list = recycle_list;
2730 	}
2731 exit:
2732 	return (ret);
2733 }
2734 
2735 static void
2736 /* LINTED E_FUNC_ARG_UNUSED */
qede_free_sb_phys(qede_t * qede,qede_fastpath_t * fp)2737 qede_free_sb_phys(qede_t *qede, qede_fastpath_t *fp)
2738 {
2739 	qede_pci_free_consistent(&fp->sb_dma_handle, &fp->sb_acc_handle);
2740 	fp->sb_virt = NULL;
2741 	fp->sb_phys = 0;
2742 }
2743 
2744 static int
qede_alloc_sb_phys(qede_t * qede,qede_fastpath_t * fp)2745 qede_alloc_sb_phys(qede_t *qede, qede_fastpath_t *fp)
2746 {
2747 	int status;
2748 	int sb_id;
2749 	struct ecore_dev *edev = &qede->edev;
2750 	struct ecore_hwfn *p_hwfn;
2751 	qede_vector_info_t *vect_info = fp->vect_info;
2752 	ddi_dma_cookie_t sb_cookie;
2753 
2754 	ASSERT(qede != NULL);
2755 	ASSERT(fp != NULL);
2756 
2757 	/*
2758 	 * In the case of multiple hardware engines,
2759 	 * interrupts are spread across all of them.
2760 	 * In the case of only one engine, all
2761 	 * interrupts are handled by that engine.
2762 	 * In the case of 2 engines, each has half
2763 	 * of the interrupts.
2764 	 */
2765 	sb_id = vect_info->vect_index;
2766 	p_hwfn = &edev->hwfns[sb_id % qede->num_hwfns];
2767 
2768 	/* Allocate dma mem. for status_block */
2769 	status = qede_dma_mem_alloc(qede,
2770 	    sizeof (struct status_block),
2771 	    (DDI_DMA_RDWR | DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
2772 	    (caddr_t *)&fp->sb_virt,
2773 	    &sb_cookie,
2774 	    &fp->sb_dma_handle,
2775 	    &fp->sb_acc_handle,
2776 	    &qede_desc_dma_attr,
2777 	    &qede_desc_acc_attr);
2778 
2779 	if (status != DDI_SUCCESS) {
2780 		qede_info(qede, "Failed to allocate status_block dma mem");
2781 		return (status);
2782 	}
2783 
2784 	fp->sb_phys = sb_cookie.dmac_laddress;
2785 
2786 
2787 	status = ecore_int_sb_init(p_hwfn,
2788 			p_hwfn->p_main_ptt,
2789 			fp->sb_info,
2790 			(void *)fp->sb_virt,
2791 			fp->sb_phys,
2792 			fp->fp_index);
2793 	if (status != ECORE_SUCCESS) {
2794 		cmn_err(CE_WARN, "Failed ecore_int_sb_init");
2795 		return (DDI_FAILURE);
2796 	}
2797 
2798 	return (status);
2799 }
2800 
2801 static void
qede_free_tx_ring_phys(qede_t * qede,qede_fastpath_t * fp)2802 qede_free_tx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
2803 {
2804 	qede_tx_ring_t *tx_ring;
2805 	int i;
2806 
2807 	for (i = 0; i < qede->num_tc; i++) {
2808 		tx_ring = fp->tx_ring[i];
2809 		qede_free_tx_dma_handles(qede, tx_ring);
2810 		qede_free_tx_bcopy_buffers(tx_ring);
2811 		qede_free_tx_bd_ring(qede, fp);
2812 
2813 		if (tx_ring->tx_recycle_list) {
2814 			kmem_free(tx_ring->tx_recycle_list,
2815 			    sizeof (qede_tx_recycle_list_t)
2816 			    * qede->tx_ring_size);
2817 		}
2818 	}
2819 }
2820 
2821 static void
qede_fastpath_free_phys_mem(qede_t * qede)2822 qede_fastpath_free_phys_mem(qede_t *qede)
2823 {
2824 	int  i;
2825 	qede_fastpath_t *fp;
2826 
2827 	for (i = 0; i < qede->num_fp; i++) {
2828 		fp = &qede->fp_array[i];
2829 
2830 		qede_free_rx_ring_phys(qede, fp);
2831 		qede_free_tx_ring_phys(qede, fp);
2832 		qede_free_sb_phys(qede, fp);
2833 	}
2834 }
2835 
2836 /*
2837  * Save dma_handles associated with the fastpath elements
2838  * allocate by ecore for doing dma_sync in the fast_path
2839  */
2840 static int
qede_save_fp_dma_handles(qede_t * qede,qede_fastpath_t * fp)2841 qede_save_fp_dma_handles(qede_t *qede, qede_fastpath_t *fp)
2842 {
2843 	int ret, i;
2844 	qede_rx_ring_t *rx_ring;
2845 	qede_tx_ring_t *tx_ring;
2846 
2847 	rx_ring = fp->rx_ring;
2848 
2849 	/* Rx bd ring dma_handle */
2850 	ret = qede_osal_find_dma_handle_for_block(qede,
2851 	    (void *)rx_ring->rx_bd_ring.p_phys_addr,
2852 	    &rx_ring->rx_bd_dmah);
2853 	if (ret != DDI_SUCCESS) {
2854 		qede_print_err("!%s(%d): Cannot find dma_handle for "
2855 		    "rx_bd_ring, addr %p", __func__, qede->instance,
2856 		    rx_ring->rx_bd_ring.p_phys_addr);
2857 		goto exit;
2858 	}
2859 
2860 	/* rx cqe ring dma_handle */
2861 	ret = qede_osal_find_dma_handle_for_block(qede,
2862 	    (void *)rx_ring->rx_cqe_ring.p_phys_addr,
2863 	    &rx_ring->rx_cqe_dmah);
2864 	if (ret != DDI_SUCCESS) {
2865 		qede_print_err("!%s(%d): Cannot find dma_handle for "
2866 		    "rx_cqe_ring, addr %p", __func__, qede->instance,
2867 		    rx_ring->rx_cqe_ring.p_phys_addr);
2868 		goto exit;
2869 	}
2870 	/* rx cqe ring pbl */
2871 	ret = qede_osal_find_dma_handle_for_block(qede,
2872 	    (void *)rx_ring->rx_cqe_ring.pbl_sp.p_phys_table,
2873 	    &rx_ring->rx_cqe_pbl_dmah);
2874 	if (ret) {
2875 		qede_print_err("!%s(%d): Cannot find dma_handle for "
2876 		    "rx_cqe pbl, addr %p", __func__, qede->instance,
2877 		    rx_ring->rx_cqe_ring.pbl_sp.p_phys_table);
2878 		goto exit;
2879 	}
2880 
2881 	/* tx_bd ring dma_handle(s) */
2882 	for (i = 0; i < qede->num_tc; i++) {
2883 		tx_ring = fp->tx_ring[i];
2884 
2885 		ret = qede_osal_find_dma_handle_for_block(qede,
2886 		    (void *)tx_ring->tx_bd_ring.p_phys_addr,
2887 		    &tx_ring->tx_bd_dmah);
2888 		if (ret != DDI_SUCCESS) {
2889 			qede_print_err("!%s(%d): Cannot find dma_handle "
2890 			    "for tx_bd_ring, addr %p", __func__,
2891 			    qede->instance,
2892 			    tx_ring->tx_bd_ring.p_phys_addr);
2893 			goto exit;
2894 		}
2895 
2896 		ret = qede_osal_find_dma_handle_for_block(qede,
2897 		    (void *)tx_ring->tx_bd_ring.pbl_sp.p_phys_table,
2898 		    &tx_ring->tx_pbl_dmah);
2899 		if (ret) {
2900 			qede_print_err("!%s(%d): Cannot find dma_handle for "
2901 			    "tx_bd pbl, addr %p", __func__, qede->instance,
2902 			    tx_ring->tx_bd_ring.pbl_sp.p_phys_table);
2903 			goto exit;
2904 		}
2905 	}
2906 
2907 exit:
2908 	return (ret);
2909 }
2910 
2911 int
qede_fastpath_alloc_phys_mem(qede_t * qede)2912 qede_fastpath_alloc_phys_mem(qede_t *qede)
2913 {
2914 	int status = 0, i;
2915 	qede_fastpath_t *fp;
2916 
2917 	for (i = 0; i < qede->num_fp; i++) {
2918 		fp = &qede->fp_array[i];
2919 
2920 		status = qede_alloc_sb_phys(qede, fp);
2921 		if (status != DDI_SUCCESS) {
2922 			goto err;
2923 		}
2924 
2925 		status = qede_alloc_rx_ring_phys(qede, fp);
2926 		if (status != DDI_SUCCESS) {
2927 			goto err;
2928 		}
2929 
2930 		status = qede_alloc_tx_ring_phys(qede, fp);
2931 		if (status != DDI_SUCCESS) {
2932 			goto err;
2933 		}
2934 		status = qede_save_fp_dma_handles(qede, fp);
2935 		if (status != DDI_SUCCESS) {
2936 			goto err;
2937 		}
2938 	}
2939 	return (status);
2940 err:
2941 	qede_fastpath_free_phys_mem(qede);
2942 	return (status);
2943 }
2944 
2945 static int
qede_fastpath_config(qede_t * qede)2946 qede_fastpath_config(qede_t *qede)
2947 {
2948 	int i, j;
2949 	qede_fastpath_t *fp;
2950 	qede_rx_ring_t *rx_ring;
2951 	qede_tx_ring_t *tx_ring;
2952 	qede_vector_info_t *vect_info;
2953 	int num_fp, num_hwfns;
2954 
2955 	ASSERT(qede != NULL);
2956 
2957 	num_fp = qede->num_fp;
2958 	num_hwfns = qede->num_hwfns;
2959 
2960 	vect_info = &qede->intr_ctx.intr_vect_info[num_hwfns];
2961 	fp = &qede->fp_array[0];
2962 	tx_ring = &qede->tx_array[0][0];
2963 
2964 	for (i = 0; i < num_fp; i++, fp++, vect_info++) {
2965 		fp->sb_info = &qede->sb_array[i];
2966 		fp->qede = qede;
2967 		fp->fp_index = i;
2968 		/*
2969 		 * With a single hwfn, all fp's hwfn index should be zero
2970 		 * for all fp entries. If there are two engines this
2971 		 * index should altenate between 0 and 1.
2972 		 */
2973 		fp->fp_hw_eng_index = fp->fp_index % num_hwfns;
2974 		fp->vport_id = 0;
2975 		fp->stats_id = 0;
2976 		fp->rss_id = fp->fp_index;
2977 		fp->rx_queue_index = fp->fp_index;
2978 		fp->vect_info = vect_info;
2979 		/*
2980 		 * After vport update, interrupts will be
2981 		 * running, so we need to intialize our
2982 		 * enable/disable gate as such.
2983 		 */
2984 		fp->disabled_by_poll = 0;
2985 
2986 		/* rx_ring setup */
2987 		rx_ring = &qede->rx_array[i];
2988 		fp->rx_ring = rx_ring;
2989 		rx_ring->fp = fp;
2990 		rx_ring->rx_buf_count = qede->rx_buf_count;
2991 		rx_ring->rx_buf_size = qede->rx_buf_size;
2992 		rx_ring->qede = qede;
2993 		rx_ring->sw_rx_cons = 0;
2994 		rx_ring->rx_copy_threshold = qede->rx_copy_threshold;
2995 		rx_ring->rx_low_buffer_threshold =
2996 		    qede->rx_low_buffer_threshold;
2997 		rx_ring->queue_started = B_FALSE;
2998 
2999 		/* tx_ring setup */
3000 		for (j = 0; j < qede->num_tc; j++) {
3001 			tx_ring = &qede->tx_array[j][i];
3002 			fp->tx_ring[j] = tx_ring;
3003 			tx_ring->qede = qede;
3004 			tx_ring->fp = fp;
3005 			tx_ring->fp_idx = i;
3006 			tx_ring->tx_queue_index = i * qede->num_fp +
3007 			    fp->fp_index;
3008 			tx_ring->tx_buf_size = qede->tx_buf_size;
3009 			tx_ring->tx_ring_size = qede->tx_ring_size;
3010 			tx_ring->queue_started = B_FALSE;
3011 #ifdef	DBLK_DMA_PREMAP
3012 			tx_ring->pm_handle = qede->pm_handle;
3013 #endif
3014 
3015 			tx_ring->doorbell_addr =
3016 			    qede->doorbell;
3017 			tx_ring->doorbell_handle =
3018 			    qede->doorbell_handle;
3019 		}
3020 	}
3021 
3022 	return (DDI_SUCCESS);
3023 }
3024 
3025 /*
3026  * op = 1, Initialize link
3027  * op = 0, Destroy link
3028  */
3029 int
qede_configure_link(qede_t * qede,bool op)3030 qede_configure_link(qede_t *qede, bool op)
3031 {
3032 	struct ecore_dev *edev = &qede->edev;
3033 	struct ecore_hwfn *hwfn;
3034 	struct ecore_ptt *ptt = NULL;
3035 	int i, ret = DDI_SUCCESS;
3036 
3037 	for_each_hwfn(edev, i) {
3038 		hwfn = &edev->hwfns[i];
3039 		qede_info(qede, "Configuring link for hwfn#%d", i);
3040 
3041 		ptt = ecore_ptt_acquire(hwfn);
3042 		if (ptt == NULL) {
3043 			qede_info(qede, "Cannot reserver ptt from ecore");
3044 			ret = DDI_FAILURE;
3045 			goto exit;
3046 		}
3047 
3048 		ret = ecore_mcp_set_link(hwfn, ptt, op);
3049 
3050 		ecore_ptt_release(hwfn, ptt);
3051 		if (ret) {
3052 			/* if link config fails, make sure ptt is released */
3053 			goto exit;
3054 		}
3055 	}
3056 exit:
3057 	return (ret);
3058 }
3059 
3060 /*
3061  * drv_lock must be held by the caller.
3062  */
3063 int
qede_stop(qede_t * qede)3064 qede_stop(qede_t *qede)
3065 {
3066 	int status;
3067 
3068 	ASSERT(mutex_owned(&qede->drv_lock));
3069 	qede->qede_state = QEDE_STATE_STOPPING;
3070 
3071 	mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
3072 
3073 	qede_disable_all_fastpath_intrs(qede);
3074 	status = qede_configure_link(qede, false /* Re-Set */);
3075 	if (status) {
3076 		/* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3077 		cmn_err(CE_NOTE, "!%s(%d): Failed to reset link",
3078 		    __func__, qede->instance);
3079 		return (status);
3080 	}
3081 	qede_clear_filters(qede);
3082 	status = qede_fastpath_stop_queues(qede);
3083 	if (status != DDI_SUCCESS) {
3084 		/* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3085 		cmn_err(CE_WARN, "qede_stop:"
3086 		    " qede_fastpath_stop_queues FAILED "
3087 		    " qede=%p\n",
3088 		    qede);
3089 		return (status);
3090 	}
3091 
3092 	qede_fastpath_free_phys_mem(qede);
3093 
3094 	qede->qede_state = QEDE_STATE_STOPPED;
3095 	/* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3096 	cmn_err(CE_WARN, "qede_stop SUCCESS =%p\n", qede);
3097 	return (DDI_SUCCESS);
3098 }
3099 
3100 /*
3101  * drv_lock must be held by the caller.
3102  */
3103 int
qede_start(qede_t * qede)3104 qede_start(qede_t *qede)
3105 {
3106 	int status;
3107 
3108 	ASSERT(mutex_owned(&qede->drv_lock));
3109 
3110 	qede->qede_state = QEDE_STATE_STARTING;
3111 
3112 	mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
3113 
3114 	/*
3115 	 * Configure the fastpath blocks with
3116 	 * the sb_info, rx_ring and tx_rings
3117 	 */
3118 	if (qede_fastpath_config(qede) != DDI_SUCCESS) {
3119 		/* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3120 		qede_print_err("!%s(%d): qede_fastpath_config failed",
3121 		    __func__, qede->instance);
3122 		return (DDI_FAILURE);
3123 	}
3124 
3125 
3126 	/*
3127 	 * Allocate the physical memory
3128 	 * for fastpath.
3129 	 */
3130 	status = qede_fastpath_alloc_phys_mem(qede);
3131 	if (status) {
3132 		cmn_err(CE_NOTE, "fastpath_alloc_phys_mem "
3133 		    " failed qede=%p\n", qede);
3134 		return (DDI_FAILURE);
3135 	}
3136 
3137 	status = qede_fastpath_start_queues(qede);
3138 	if (status) {
3139 		cmn_err(CE_NOTE, "fp_start_queues "
3140 		    " failed qede=%p\n", qede);
3141 		goto err_out1;
3142 	}
3143 
3144 	cmn_err(CE_NOTE, "qede_start fp_start_queues qede=%p\n", qede);
3145 
3146 	status = qede_configure_link(qede, true /* Set */);
3147 	if (status) {
3148 		cmn_err(CE_NOTE, "!%s(%d): Failed to configure link",
3149 		    __func__, qede->instance);
3150 		goto err_out1;
3151 	}
3152 
3153 	/*
3154 	 * Put interface in regular mode
3155 	 */
3156 	if (qede_set_filter_rx_mode(qede,
3157 		QEDE_FILTER_RX_MODE_REGULAR) != DDI_SUCCESS) {
3158 		cmn_err(CE_NOTE, "!%s(%d): Failed to set filter mode",
3159 		    __func__, qede->instance);
3160 		goto err_out1;
3161 	}
3162 
3163 	status = qede_enable_all_fastpath_intrs(qede);
3164 	if (status) {
3165 		/* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3166 		cmn_err(CE_NOTE, "!%s(%d): Failed to enable intrs",
3167 		    __func__, qede->instance);
3168 		goto err_out2;
3169 	}
3170 	qede->qede_state = QEDE_STATE_STARTED;
3171 	cmn_err(CE_NOTE, "!%s(%d): SUCCESS",
3172 		    __func__, qede->instance);
3173 
3174 	return (status);
3175 
3176 err_out2:
3177 	(void) qede_fastpath_stop_queues(qede);
3178 err_out1:
3179 	qede_fastpath_free_phys_mem(qede);
3180 	return (DDI_FAILURE);
3181 }
3182 
3183 static void
qede_free_attach_resources(qede_t * qede)3184 qede_free_attach_resources(qede_t *qede)
3185 {
3186 	struct ecore_dev *edev;
3187 	int status;
3188 
3189 	edev = &qede->edev;
3190 
3191 	if (qede->attach_resources & QEDE_ECORE_HW_INIT) {
3192 		if (ecore_hw_stop(edev) != ECORE_SUCCESS) {
3193 			cmn_err(CE_NOTE, "%s(%d): ecore_hw_stop: failed\n",
3194 			    __func__, qede->instance);
3195 		}
3196 		qede->attach_resources &= ~QEDE_ECORE_HW_INIT;
3197 	}
3198 
3199 	if (qede->attach_resources & QEDE_SP_INTR_ENBL) {
3200 		status = qede_disable_slowpath_intrs(qede);
3201 		if (status != DDI_SUCCESS) {
3202 			qede_print("%s(%d): qede_disable_slowpath_intrs Failed",
3203 			    __func__, qede->instance);
3204 		}
3205 		qede->attach_resources &= ~QEDE_SP_INTR_ENBL;
3206 	}
3207 	if (qede->attach_resources & QEDE_KSTAT_INIT) {
3208 		qede_kstat_fini(qede);
3209 		qede->attach_resources &= ~QEDE_KSTAT_INIT;
3210 	}
3211 
3212 
3213 	if (qede->attach_resources & QEDE_GLD_INIT) {
3214 		status = mac_unregister(qede->mac_handle);
3215 		if (status != 0) {
3216 			qede_print("%s(%d): mac_unregister Failed",
3217 			    __func__, qede->instance);
3218 		}
3219 		qede->attach_resources &= ~QEDE_GLD_INIT;
3220 	}
3221 
3222 	if (qede->attach_resources & QEDE_EDEV_CONFIG) {
3223 		ecore_resc_free(edev);
3224 		qede->attach_resources &= ~QEDE_EDEV_CONFIG;
3225 	}
3226 
3227 	if (qede->attach_resources & QEDE_INTR_CONFIG) {
3228 		qede_unconfig_intrs(qede);
3229 		qede->attach_resources &= ~QEDE_INTR_CONFIG;
3230 	}
3231 
3232 	if (qede->attach_resources & QEDE_INTR_ALLOC) {
3233 		qede_free_intrs(qede);
3234 		qede->attach_resources &= ~QEDE_INTR_ALLOC;
3235 	}
3236 
3237 	if (qede->attach_resources & QEDE_INIT_LOCKS) {
3238 		qede_destroy_locks(qede);
3239 		qede->attach_resources &= ~QEDE_INIT_LOCKS;
3240 	}
3241 
3242 	if (qede->attach_resources & QEDE_IO_STRUCT_ALLOC) {
3243 		qede_free_io_structs(qede);
3244 		qede->attach_resources &= ~QEDE_IO_STRUCT_ALLOC;
3245 	}
3246 #ifdef QEDE_LSR
3247 	if (qede->attach_resources & QEDE_CALLBACK) {
3248 
3249 
3250 		status = ddi_cb_unregister(qede->callback_hdl);
3251 		if (status != DDI_SUCCESS) {
3252 		}
3253 		qede->attach_resources &= ~QEDE_CALLBACK;
3254 	}
3255 #endif
3256 	if (qede->attach_resources & QEDE_ECORE_HW_PREP) {
3257 		ecore_hw_remove(edev);
3258 		qede->attach_resources &= ~QEDE_ECORE_HW_PREP;
3259 	}
3260 
3261 	if (qede->attach_resources & QEDE_PCI) {
3262 		qede_unconfig_pci(qede);
3263 		qede->attach_resources &= ~QEDE_PCI;
3264 	}
3265 
3266 	if (qede->attach_resources & QEDE_FM) {
3267 		qede_unconfig_fm(qede);
3268 		qede->attach_resources &= ~QEDE_FM;
3269 	}
3270 
3271 	/*
3272 	 * Check for possible mem. left behind by ecore
3273 	 */
3274 	(void) qede_osal_cleanup(qede);
3275 
3276 	if (qede->attach_resources & QEDE_STRUCT_ALLOC) {
3277 		ddi_set_driver_private(qede->dip, NULL);
3278 		qede->attach_resources &= ~QEDE_STRUCT_ALLOC;
3279 		kmem_free(qede, sizeof (qede_t));
3280 	}
3281 }
3282 
3283 /*
3284  * drv_lock must be held by the caller.
3285  */
3286 static int
qede_suspend(qede_t * qede)3287 qede_suspend(qede_t *qede)
3288 {
3289 	// STUB
3290 	ASSERT(mutex_owned(&qede->drv_lock));
3291 	printf("in qede_suspend\n");
3292 	return (DDI_FAILURE);
3293 }
3294 
3295 static int
qede_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)3296 qede_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
3297 {
3298     	qede_t *qede;
3299 	struct ecore_dev *edev;
3300 	int instance;
3301 	uint32_t vendor_id;
3302 	uint32_t device_id;
3303 	struct ecore_hwfn *p_hwfn;
3304 	struct ecore_ptt *p_ptt;
3305 	struct ecore_mcp_link_params *link_params;
3306 	struct ecore_hw_init_params hw_init_params;
3307 	struct ecore_drv_load_params load_params;
3308 	int *props;
3309        	uint32_t num_props;
3310 	int rc = 0;
3311 
3312     	switch (cmd) {
3313     	default:
3314        		return (DDI_FAILURE);
3315 
3316 	case DDI_RESUME:
3317 	{
3318        		qede = (qede_t * )ddi_get_driver_private(dip);
3319         	if (qede == NULL || qede->dip != dip) {
3320 			cmn_err(CE_NOTE, "qede:%s: Could not allocate"
3321 			    " adapter structure\n", __func__);
3322 			return (DDI_FAILURE);
3323         	}
3324 
3325 		mutex_enter(&qede->drv_lock);
3326 		if (qede->qede_state != QEDE_STATE_SUSPENDED) {
3327 			mutex_exit(&qede->drv_lock);
3328         		return (DDI_FAILURE);
3329 		}
3330 
3331 		if (qede_resume(qede) != DDI_SUCCESS) {
3332 			cmn_err(CE_NOTE, "%s:%d resume operation failure\n",
3333 			    __func__, qede->instance);
3334 			mutex_exit(&qede->drv_lock);
3335             		return (DDI_FAILURE);
3336         	}
3337 
3338 		qede->qede_state = QEDE_STATE_ATTACHED;
3339 		mutex_exit(&qede->drv_lock);
3340         	return (DDI_SUCCESS);
3341 	}
3342 	case DDI_ATTACH:
3343 	{
3344     		instance = ddi_get_instance(dip);
3345 	    	cmn_err(CE_NOTE, "qede_attach(%d): Enter",
3346 		    instance);
3347 
3348     		/* Allocate main structure rounded up to cache line size */
3349     		if ((qede = kmem_zalloc(sizeof (qede_t), KM_SLEEP)) == NULL) {
3350 			cmn_err(CE_NOTE, "!%s(%d): Could not allocate adapter "
3351 			    "structure\n", __func__, instance);
3352         		return (DDI_FAILURE);
3353     		}
3354 
3355 		qede->attach_resources |= QEDE_STRUCT_ALLOC;
3356     		ddi_set_driver_private(dip, qede);
3357 		qede->dip = dip;
3358    		qede->instance = instance;
3359     		snprintf(qede->name, sizeof (qede->name), "qede%d", instance);
3360 		edev = &qede->edev;
3361 
3362 		if (qede_config_fm(qede) != DDI_SUCCESS) {
3363         		goto exit_with_err;
3364 		}
3365 		qede->attach_resources |= QEDE_FM;
3366 
3367 		/*
3368 		 * Do PCI config setup and map the register
3369 		 * and doorbell space */
3370 		if (qede_config_pci(qede) != DDI_SUCCESS) {
3371         		goto exit_with_err;
3372 		}
3373 		qede->attach_resources |= QEDE_PCI;
3374 
3375 		/*
3376 		 * Setup OSAL mem alloc related locks.
3377 		 * Do not call any ecore functions without
3378 		 * initializing these locks
3379 		 */
3380 		mutex_init(&qede->mem_list.mem_list_lock, NULL,
3381 		    MUTEX_DRIVER, 0);
3382 		mutex_init(&qede->phys_mem_list.lock, NULL,
3383 		    MUTEX_DRIVER, 0);
3384 		QEDE_INIT_LIST_HEAD(&qede->mem_list.mem_list_head);
3385 		QEDE_INIT_LIST_HEAD(&qede->phys_mem_list.head);
3386 		QEDE_INIT_LIST_HEAD(&qede->mclist.head);
3387 
3388 
3389 		/*
3390 		 * FIXME: this function calls ecore api, but
3391 		 * dp_level and module are not yet set
3392 		 */
3393 		if (qede_prepare_edev(qede) != ECORE_SUCCESS) {
3394 			// report fma
3395         		goto exit_with_err;
3396 		}
3397 
3398 		qede->num_hwfns = edev->num_hwfns;
3399 		qede->num_tc = 1;
3400 		memcpy(qede->ether_addr, edev->hwfns->hw_info.hw_mac_addr,
3401 		    ETHERADDRL);
3402 		qede_info(qede, "Interface mac_addr : " MAC_STRING,
3403 		    MACTOSTR(qede->ether_addr));
3404 		qede->attach_resources |= QEDE_ECORE_HW_PREP;
3405 
3406 		if (qede_set_operating_params(qede) != DDI_SUCCESS) {
3407         		goto exit_with_err;
3408 		}
3409 		qede->attach_resources |= QEDE_SET_PARAMS;
3410 #ifdef QEDE_LSR
3411 		if (ddi_cb_register(qede->dip,
3412 	    	    qede->callback_flags,
3413 	    	    qede_callback,
3414 		    qede,
3415 	    	    NULL,
3416 	    	    &qede->callback_hdl)) {
3417 			goto exit_with_err;
3418 		}
3419 		qede->attach_resources |= QEDE_CALLBACK;
3420 #endif
3421 		qede_cfg_reset(qede);
3422 
3423 		if (qede_alloc_intrs(qede)) {
3424 			cmn_err(CE_NOTE, "%s: Could not allocate interrupts\n",
3425 			    __func__);
3426         		goto exit_with_err;
3427 		}
3428 
3429 		qede->attach_resources |= QEDE_INTR_ALLOC;
3430 
3431 		if (qede_config_intrs(qede)) {
3432 			cmn_err(CE_NOTE, "%s: Could not allocate interrupts\n",
3433 			    __func__);
3434         		goto exit_with_err;
3435 		}
3436 		qede->attach_resources |= QEDE_INTR_CONFIG;
3437 
3438     		if (qede_alloc_io_structs(qede) != DDI_SUCCESS) {
3439 			cmn_err(CE_NOTE, "%s: Could not allocate data"
3440 			    " path structures\n", __func__);
3441         		goto exit_with_err;
3442     		}
3443 
3444 		qede->attach_resources |= QEDE_IO_STRUCT_ALLOC;
3445 
3446 		/* Lock init cannot fail */
3447 		qede_init_locks(qede);
3448 		qede->attach_resources |= QEDE_INIT_LOCKS;
3449 
3450 
3451 		if (qede_config_edev(qede)) {
3452 			cmn_err(CE_NOTE, "%s: Could not configure ecore \n",
3453 			    __func__);
3454 			goto exit_with_err;
3455 		}
3456 		qede->attach_resources |= QEDE_EDEV_CONFIG;
3457 
3458 		if (qede_kstat_init(qede) == B_FALSE) {
3459 			cmn_err(CE_NOTE, "%s: Could not initialize kstat \n",
3460 			    __func__);
3461 			goto exit_with_err;
3462 
3463 		}
3464 		qede->attach_resources |= QEDE_KSTAT_INIT;
3465 
3466 		if (qede_gld_init(qede) == B_FALSE) {
3467 			cmn_err(CE_NOTE, "%s: Failed call to qede_gld_init",
3468 			    __func__);
3469 			goto exit_with_err;
3470 		}
3471 
3472 		qede->attach_resources |= QEDE_GLD_INIT;
3473 
3474 		if (qede_enable_slowpath_intrs(qede)) {
3475 			cmn_err(CE_NOTE, "%s: Could not enable interrupts\n",
3476 			    __func__);
3477 			goto exit_with_err;
3478 		}
3479 
3480 		qede->attach_resources |= QEDE_SP_INTR_ENBL;
3481 
3482 		cmn_err(CE_NOTE, "qede->attach_resources = %x\n",
3483 		    qede->attach_resources);
3484 
3485 		memset((void *)&hw_init_params, 0,
3486 		    sizeof (struct ecore_hw_init_params));
3487 		hw_init_params.p_drv_load_params = &load_params;
3488 
3489 		hw_init_params.p_tunn = NULL;
3490 		hw_init_params.b_hw_start = true;
3491 		hw_init_params.int_mode = qede->intr_ctx.intr_mode;
3492 		hw_init_params.allow_npar_tx_switch = false;
3493 		hw_init_params.bin_fw_data = NULL;
3494 		load_params.is_crash_kernel = false;
3495 		load_params.mfw_timeout_val = 0;
3496 		load_params.avoid_eng_reset = false;
3497 		load_params.override_force_load =
3498 		    ECORE_OVERRIDE_FORCE_LOAD_NONE;
3499 
3500 		if (ecore_hw_init(edev, &hw_init_params) != ECORE_SUCCESS) {
3501 			cmn_err(CE_NOTE,
3502 			    "%s: Could not initialze ecore block\n",
3503 			     __func__);
3504 			goto exit_with_err;
3505 		}
3506 		qede->attach_resources |= QEDE_ECORE_HW_INIT;
3507 		qede->qede_state = QEDE_STATE_ATTACHED;
3508 
3509 		qede->detach_unsafe = 0;
3510 
3511 		snprintf(qede->version,
3512              		sizeof (qede->version),
3513              		"%d.%d.%d",
3514              		MAJVERSION,
3515              		MINVERSION,
3516              		REVVERSION);
3517 
3518 		snprintf(qede->versionFW,
3519              		sizeof (qede->versionFW),
3520              		"%d.%d.%d.%d",
3521              		FW_MAJOR_VERSION,
3522              		FW_MINOR_VERSION,
3523              		FW_REVISION_VERSION,
3524              		FW_ENGINEERING_VERSION);
3525 
3526 		p_hwfn = &qede->edev.hwfns[0];
3527 		p_ptt = ecore_ptt_acquire(p_hwfn);
3528 		/*
3529 		 * (test) : saving the default link_input params
3530 		 */
3531 		link_params = ecore_mcp_get_link_params(p_hwfn);
3532 		memset(&qede->link_input_params, 0,
3533 		    sizeof (qede_link_input_params_t));
3534 		memcpy(&qede->link_input_params.default_link_params,
3535 		    link_params,
3536 		    sizeof (struct ecore_mcp_link_params));
3537 
3538 		p_hwfn = ECORE_LEADING_HWFN(edev);
3539         	ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &qede->mfw_ver, NULL);
3540 
3541 		ecore_ptt_release(p_hwfn, p_ptt);
3542 
3543 		snprintf(qede->versionMFW,
3544 			sizeof (qede->versionMFW),
3545 			"%d.%d.%d.%d",
3546 			(qede->mfw_ver >> 24) & 0xFF,
3547 	        	(qede->mfw_ver >> 16) & 0xFF,
3548 			(qede->mfw_ver >> 8) & 0xFF,
3549 			qede->mfw_ver & 0xFF);
3550 
3551 		snprintf(qede->chip_name,
3552              		sizeof (qede->chip_name),
3553 			"%s",
3554 			ECORE_IS_BB(edev) ? "BB" : "AH");
3555 
3556 	   	snprintf(qede->chipID,
3557 			sizeof (qede->chipID),
3558              		"0x%x",
3559              		qede->edev.chip_num);
3560 
3561 		*qede->bus_dev_func = 0;
3562 		vendor_id = 0;
3563 		device_id = 0;
3564 
3565 
3566 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, qede->dip,
3567 					0, "reg", &props, &num_props);
3568 		if((rc == DDI_PROP_SUCCESS) && (num_props > 0)) {
3569 
3570 		snprintf(qede->bus_dev_func,
3571 			sizeof (qede->bus_dev_func),
3572 			"%04x:%02x:%02x",
3573 			PCI_REG_BUS_G(props[0]),
3574 			PCI_REG_DEV_G(props[0]),
3575 			PCI_REG_FUNC_G(props[0]));
3576 
3577 		/*
3578 		 * This information is used
3579 		 * in the QEDE_FUNC_INFO ioctl
3580 		 */
3581 		qede->pci_func = (uint8_t) PCI_REG_FUNC_G(props[0]);
3582 
3583 		ddi_prop_free(props);
3584 
3585 		}
3586 
3587 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, qede->dip,
3588 					0, "vendor-id", &props, &num_props);
3589 		if((rc == DDI_PROP_SUCCESS) && (num_props > 0)) {
3590 			vendor_id = props[0];
3591 			ddi_prop_free(props);
3592 		}
3593 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, qede->dip,
3594 					0, "device-id", &props, &num_props);
3595 		if((rc == DDI_PROP_SUCCESS) && (num_props > 0)) {
3596 			device_id = props[0];
3597 			ddi_prop_free(props);
3598 		}
3599 
3600 
3601 		snprintf(qede->vendor_device,
3602 			sizeof (qede->vendor_device),
3603 			"%04x:%04x",
3604 			vendor_id,
3605 			device_id);
3606 
3607 
3608 		snprintf(qede->intrAlloc,
3609 			sizeof (qede->intrAlloc), "%d %s",
3610 			(qede->intr_ctx.intr_type_in_use == DDI_INTR_TYPE_FIXED)
3611  			? 1 :
3612 			qede->intr_ctx.intr_vect_allocated,
3613 			(qede->intr_ctx.intr_type_in_use == DDI_INTR_TYPE_MSIX)
3614 			? "MSIX" :
3615 			(qede->intr_ctx.intr_type_in_use == DDI_INTR_TYPE_MSI)
3616 			? "MSI"  : "Fixed");
3617 
3618 	        qede_print("%s(%d): success, addr %p chip %s id %s intr %s\n",
3619 		    __func__, qede->instance, qede, qede->chip_name,
3620 		    qede->vendor_device,qede->intrAlloc);
3621 
3622 	        qede_print("%s(%d): version %s FW %s MFW %s\n",
3623 		    __func__, qede->instance, qede->version,
3624 		    qede->versionFW, qede->versionMFW);
3625 
3626 		return (DDI_SUCCESS);
3627 	}
3628 	}
3629 exit_with_err:
3630 	cmn_err(CE_WARN, "%s:%d   failed %x\n", __func__, qede->instance,
3631 	    qede->attach_resources);
3632 	(void)qede_free_attach_resources(qede);
3633 	return (DDI_FAILURE);
3634 }
3635 
3636 static int
qede_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)3637 qede_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
3638 {
3639 
3640 	qede_t *qede;
3641 	int status;
3642 	uint32_t count = 0;
3643 
3644 	qede = (qede_t *)ddi_get_driver_private(dip);
3645 	if ((qede == NULL) || (qede->dip != dip)) {
3646 		return (DDI_FAILURE);
3647 	}
3648 
3649 	switch (cmd) {
3650 	default:
3651 		return (DDI_FAILURE);
3652 	case DDI_SUSPEND:
3653 		mutex_enter(&qede->drv_lock);
3654 		status = qede_suspend(qede);
3655 		if (status != DDI_SUCCESS) {
3656 			mutex_exit(&qede->drv_lock);
3657 			return (DDI_FAILURE);
3658 		}
3659 
3660 		qede->qede_state = QEDE_STATE_SUSPENDED;
3661 		mutex_exit(&qede->drv_lock);
3662 		return (DDI_SUCCESS);
3663 
3664 	case DDI_DETACH:
3665 		mutex_enter(&qede->drv_lock);
3666 		if (qede->qede_state == QEDE_STATE_STARTED) {
3667 			qede->plumbed = 0;
3668 			status = qede_stop(qede);
3669 			if (status != DDI_SUCCESS) {
3670 				qede->qede_state = QEDE_STATE_FAILED;
3671 				mutex_exit(&qede->drv_lock);
3672 				return (DDI_FAILURE);
3673 			}
3674 		}
3675 		mutex_exit(&qede->drv_lock);
3676                 if (qede->detach_unsafe) {
3677                         /*
3678                          * wait for rx buffers to be returned from
3679                          * upper layers
3680                          */
3681                         count = 0;
3682                         while ((qede->detach_unsafe) && (count < 100)) {
3683                                 qede_delay(100);
3684                                 count++;
3685                         }
3686                         if (qede->detach_unsafe) {
3687                                 qede_info(qede, "!%s(%d) : Buffers still with"
3688                                     " OS, failing detach\n",
3689                                     qede->name, qede->instance);
3690                                 return (DDI_FAILURE);
3691                         }
3692                 }
3693 		qede_free_attach_resources(qede);
3694 		return (DDI_SUCCESS);
3695 	}
3696 }
3697 
3698 static int
3699 /* LINTED E_FUNC_ARG_UNUSED */
qede_quiesce(dev_info_t * dip)3700 qede_quiesce(dev_info_t *dip)
3701 {
3702 	qede_t *qede = (qede_t *)ddi_get_driver_private(dip);
3703 	struct ecore_dev *edev = &qede->edev;
3704 	int status = DDI_SUCCESS;
3705 	struct ecore_hwfn *p_hwfn;
3706 	struct ecore_ptt *p_ptt = NULL;
3707 
3708 	mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
3709 	p_hwfn = ECORE_LEADING_HWFN(edev);
3710 	p_ptt = ecore_ptt_acquire(p_hwfn);
3711 	if (p_ptt) {
3712 		status = ecore_start_recovery_process(p_hwfn, p_ptt);
3713 		ecore_ptt_release(p_hwfn, p_ptt);
3714 		OSAL_MSLEEP(5000);
3715 	}
3716 	return (status);
3717 
3718 }
3719 
3720 
3721 DDI_DEFINE_STREAM_OPS(qede_dev_ops, nulldev, nulldev, qede_attach, qede_detach,
3722     nodev, NULL, D_MP, NULL, qede_quiesce);
3723 
3724 static struct modldrv qede_modldrv =
3725 {
3726     &mod_driverops,    /* drv_modops (must be mod_driverops for drivers) */
3727     QEDE_PRODUCT_INFO, /* drv_linkinfo (string displayed by modinfo) */
3728     &qede_dev_ops      /* drv_dev_ops */
3729 };
3730 
3731 
3732 static struct modlinkage qede_modlinkage =
3733 {
3734     MODREV_1,        /* ml_rev */
3735     (&qede_modldrv), /* ml_linkage */
3736     NULL           /* NULL termination */
3737 };
3738 
3739 int
_init(void)3740 _init(void)
3741 {
3742     int rc;
3743 
3744     qede_dev_ops.devo_cb_ops->cb_str = NULL;
3745     mac_init_ops(&qede_dev_ops, "qede");
3746 
3747     /* Install module information with O/S */
3748     if ((rc = mod_install(&qede_modlinkage)) != DDI_SUCCESS) {
3749         mac_fini_ops(&qede_dev_ops);
3750 	cmn_err(CE_NOTE, "mod_install failed");
3751         return (rc);
3752     }
3753 
3754     return (rc);
3755 }
3756 
3757 
3758 int
_fini(void)3759 _fini(void)
3760 {
3761     int rc;
3762 
3763     if ((rc = mod_remove(&qede_modlinkage)) == DDI_SUCCESS) {
3764         mac_fini_ops(&qede_dev_ops);
3765     }
3766 
3767     return (rc);
3768 }
3769 
3770 
3771 int
_info(struct modinfo * modinfop)3772 _info(struct modinfo * modinfop)
3773 {
3774     return (mod_info(&qede_modlinkage, modinfop));
3775 }
3776