1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1, (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * Copyright 2025 Oxide Computer Company
25 */
26
27
28 #include "qede.h"
29
30 ddi_device_acc_attr_t qede_regs_acc_attr = {
31 DDI_DEVICE_ATTR_V1, // devacc_attr_version;
32 DDI_STRUCTURE_LE_ACC, // devacc_attr_endian_flags;
33 DDI_STRICTORDER_ACC, // devacc_attr_dataorder;
34 DDI_FLAGERR_ACC // devacc_attr_access;
35 };
36
37 ddi_device_acc_attr_t qede_desc_acc_attr = {
38 DDI_DEVICE_ATTR_V0, // devacc_attr_version;
39 DDI_STRUCTURE_LE_ACC, // devacc_attr_endian_flags;
40 DDI_STRICTORDER_ACC // devacc_attr_dataorder;
41 };
42
43 /*
44 * DMA access attributes for BUFFERS.
45 */
46 ddi_device_acc_attr_t qede_buf_acc_attr =
47 {
48 DDI_DEVICE_ATTR_V0, // devacc_attr_version;
49 DDI_NEVERSWAP_ACC, // devacc_attr_endian_flags;
50 DDI_STRICTORDER_ACC // devacc_attr_dataorder;
51 };
52
53
54 ddi_dma_attr_t qede_desc_dma_attr =
55 {
56 DMA_ATTR_V0,
57 0x0000000000000000ull,
58 0xFFFFFFFFFFFFFFFFull,
59 0x00000000FFFFFFFFull,
60 QEDE_PAGE_ALIGNMENT,
61 0x00000FFF,
62 0x00000001,
63 0x00000000FFFFFFFFull,
64 0xFFFFFFFFFFFFFFFFull,
65 1,
66 0x00000001,
67 DDI_DMA_FLAGERR
68 };
69
70 ddi_dma_attr_t qede_gen_buf_dma_attr =
71 {
72 DMA_ATTR_V0,
73 0x0000000000000000ull,
74 0xFFFFFFFFFFFFFFFFull,
75 0x00000000FFFFFFFFull,
76 QEDE_PAGE_ALIGNMENT,
77 0x00000FFF,
78 0x00000001,
79 0x00000000FFFFFFFFull,
80 0xFFFFFFFFFFFFFFFFull,
81 1,
82 0x00000001,
83 DDI_DMA_FLAGERR
84 };
85
86 /*
87 * DMA attributes for transmit.
88 */
89 ddi_dma_attr_t qede_tx_buf_dma_attr =
90 {
91 DMA_ATTR_V0,
92 0x0000000000000000ull,
93 0xFFFFFFFFFFFFFFFFull,
94 0x00000000FFFFFFFFull,
95 1,
96 0x00000FFF,
97 0x00000001,
98 0x00000000FFFFFFFFull,
99 0xFFFFFFFFFFFFFFFFull,
100 ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1,
101 0x00000001,
102 DDI_DMA_FLAGERR
103 };
104
105
106 ddi_dma_attr_t qede_dma_attr_desc =
107 {
108 DMA_ATTR_V0, /* dma_attr_version */
109 0, /* dma_attr_addr_lo */
110 0xffffffffffffffffull, /* dma_attr_addr_hi */
111 0x000fffffull, /* dma_attr_count_max */
112 4096, /* dma_attr_align */
113 0x000fffffull, /* dma_attr_burstsizes */
114 4, /* dma_attr_minxfer */
115 0xffffffffull, /* dma_attr_maxxfer */
116 0xffffffffull, /* dma_attr_seg */
117 1, /* dma_attr_sgllen */
118 1, /* dma_attr_granular */
119 DDI_DMA_FLAGERR /* dma_attr_flags */
120 };
121
122 static ddi_dma_attr_t qede_dma_attr_txbuf =
123 {
124 DMA_ATTR_V0, /* dma_attr_version */
125 0, /* dma_attr_addr_lo */
126 0xffffffffffffffffull, /* dma_attr_addr_hi */
127 0x00000000FFFFFFFFull, /* dma_attr_count_max */
128 QEDE_PAGE_ALIGNMENT, /* dma_attr_align */
129 0xfff8ull, /* dma_attr_burstsizes */
130 1, /* dma_attr_minxfer */
131 0xffffffffull, /* dma_attr_maxxfer */
132 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
133 1, /* dma_attr_sgllen */
134 1, /* dma_attr_granular */
135 0 /* dma_attr_flags */
136 };
137
138 ddi_dma_attr_t qede_dma_attr_rxbuf =
139 {
140 DMA_ATTR_V0, /* dma_attr_version */
141 0, /* dma_attr_addr_lo */
142 0xffffffffffffffffull, /* dma_attr_addr_hi */
143 0x00000000FFFFFFFFull, /* dma counter max */
144 QEDE_PAGE_ALIGNMENT, /* dma_attr_align */
145 0xfff8ull, /* dma_attr_burstsizes */
146 1, /* dma_attr_minxfer */
147 0xffffffffull, /* dma_attr_maxxfer */
148 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
149 1, /* dma_attr_sgllen */
150 1, /* dma_attr_granular */
151 DDI_DMA_RELAXED_ORDERING /* dma_attr_flags */
152 };
153
154 /* LINTED E_STATIC_UNUSED */
155 static ddi_dma_attr_t qede_dma_attr_cmddesc =
156 {
157 DMA_ATTR_V0, /* dma_attr_version */
158 0, /* dma_attr_addr_lo */
159 0xffffffffffffffffull, /* dma_attr_addr_hi */
160 0xffffffffull, /* dma_attr_count_max */
161 1, /* dma_attr_align */
162 0xfff8ull, /* dma_attr_burstsizes */
163 1, /* dma_attr_minxfer */
164 0xffffffff, /* dma_attr_maxxfer */
165 0xffffffff, /* dma_attr_seg */
166 ETH_TX_MAX_BDS_PER_NON_LSO_PACKET, /* dma_attr_sgllen */
167 1, /* dma_attr_granular */
168 0 /* dma_attr_flags */
169 };
170
171
172
173 /*
174 * Generic dma attribute for single sg
175 */
176 /* LINTED E_STATIC_UNUSED */
177 static ddi_dma_attr_t qede_gen_dma_attr_desc =
178 {
179 DMA_ATTR_V0, /* dma_attr_version */
180 0, /* dma_attr_addr_lo */
181 0xffffffffffffffffull, /* dma_attr_addr_hi */
182 0x000fffffull, /* dma_attr_count_max */
183 4096, /* dma_attr_align */
184 0x000fffffull, /* dma_attr_burstsizes */
185 4, /* dma_attr_minxfer */
186 0xffffffffull, /* dma_attr_maxxfer */
187 0xffffffffull, /* dma_attr_seg */
188 1, /* dma_attr_sgllen */
189 1, /* dma_attr_granular */
190 DDI_DMA_FLAGERR /* dma_attr_flags */
191 };
192
193 ddi_dma_attr_t qede_buf2k_dma_attr_txbuf =
194 {
195 DMA_ATTR_V0, /* dma_attr_version */
196 0, /* dma_attr_addr_lo */
197 0xffffffffffffffffull, /* dma_attr_addr_hi */
198 0x00000000FFFFFFFFull, /* dma_attr_count_max */
199 BUF_2K_ALIGNMENT, /* dma_attr_align */
200 0xfff8ull, /* dma_attr_burstsizes */
201 1, /* dma_attr_minxfer */
202 0xffffffffull, /* dma_attr_maxxfer */
203 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
204 1, /* dma_attr_sgllen */
205 0x00000001, /* dma_attr_granular */
206 0 /* dma_attr_flags */
207 };
208
209 char *
qede_get_ddi_fail(int status)210 qede_get_ddi_fail(int status)
211 {
212 switch (status) {
213 case DDI_FAILURE:
214 return ("DDI_FAILURE");
215 case DDI_NOT_WELL_FORMED:
216 return ("DDI_NOT_WELL_FORMED");
217 case DDI_EAGAIN:
218 return ("DDI_EAGAIN");
219 case DDI_EINVAL:
220 return ("DDI_EINVAL");
221 case DDI_ENOTSUP:
222 return ("DDI_ENOTSUP");
223 case DDI_EPENDING:
224 return ("DDI_EPENDING");
225 case DDI_EALREADY:
226 return ("DDI_EALREADY");
227 case DDI_ENOMEM:
228 return ("DDI_ENOMEM");
229 case DDI_EBUSY:
230 return ("DDI_EBUSY");
231 case DDI_ETRANSPORT:
232 return ("DDI_ETRANSPORT");
233 case DDI_ECONTEXT:
234 return ("DDI_ECONTEXT");
235 default:
236 return ("ERROR CODE NOT FOUND!");
237 }
238 }
239
240 char *
qede_get_ecore_fail(int status)241 qede_get_ecore_fail(int status)
242 {
243 switch (status) {
244 case ECORE_UNKNOWN_ERROR:
245 return ("ECORE_UNKNOWN_ERROR");
246 case ECORE_NORESOURCES:
247 return ("ECORE_NORESOURCES");
248 case ECORE_NODEV:
249 return ("ECORE_NODEV");
250 case ECORE_ABORTED:
251 return ("ECORE_ABORTED");
252 case ECORE_AGAIN:
253 return ("ECORE_AGAIN");
254 case ECORE_NOTIMPL:
255 return ("ECORE_NOTIMPL");
256 case ECORE_EXISTS:
257 return ("ECORE_EXISTS");
258 case ECORE_IO:
259 return ("ECORE_IO");
260 case ECORE_TIMEOUT:
261 return ("ECORE_TIMEOUT");
262 case ECORE_INVAL:
263 return ("ECORE_INVAL");
264 case ECORE_BUSY:
265 return ("ECORE_BUSY");
266 case ECORE_NOMEM:
267 return ("ECORE_NOMEM");
268 case ECORE_SUCCESS:
269 return ("ECORE_SUCCESS");
270 case ECORE_PENDING:
271 return ("ECORE_PENDING");
272 default:
273 return ("ECORE ERROR CODE NOT FOUND!");
274 }
275 }
276
277 #define QEDE_CHIP_NUM(_p)\
278 (((_p)->edev.chip_num) & 0xffff)
279
280 char *
qede_chip_name(qede_t * qede)281 qede_chip_name(qede_t *qede)
282 {
283 switch (QEDE_CHIP_NUM(qede)) {
284 case 0x1634:
285 return ("BCM57980E");
286
287 case 0x1629:
288 return ("BCM57980S");
289
290 case 0x1630:
291 return ("BCM57940_KR2");
292
293 case 0x8070:
294 return ("ARROWHEAD");
295
296 case 0x8071:
297 return ("ARROWHEAD");
298
299 case 0x8072:
300 return ("ARROWHEAD");
301
302 case 0x8073:
303 return ("ARROWHEAD");
304
305 default:
306 return ("UNKNOWN");
307 }
308 }
309
310
311
312
313 static void
qede_destroy_locks(qede_t * qede)314 qede_destroy_locks(qede_t *qede)
315 {
316 qede_fastpath_t *fp = &qede->fp_array[0];
317 qede_rx_ring_t *rx_ring;
318 qede_tx_ring_t *tx_ring;
319 int i, j;
320
321 mutex_destroy(&qede->drv_lock);
322 mutex_destroy(&qede->watch_lock);
323
324 for (i = 0; i < qede->num_fp; i++, fp++) {
325 mutex_destroy(&fp->fp_lock);
326
327 rx_ring = fp->rx_ring;
328 mutex_destroy(&rx_ring->rx_lock);
329 mutex_destroy(&rx_ring->rx_replen_lock);
330
331 for (j = 0; j < qede->num_tc; j++) {
332 tx_ring = fp->tx_ring[j];
333 mutex_destroy(&tx_ring->tx_lock);
334 }
335 }
336 mutex_destroy(&qede->gld_lock);
337 mutex_destroy(&qede->kstat_lock);
338 }
339
340 static void
qede_init_locks(qede_t * qede)341 qede_init_locks(qede_t *qede)
342 {
343 qede_intr_context_t *intr_ctx = &qede->intr_ctx;
344 qede_fastpath_t *fp = &qede->fp_array[0];
345 qede_rx_ring_t *rx_ring;
346 qede_tx_ring_t *tx_ring;
347 int i, tc;
348
349 mutex_init(&qede->drv_lock, NULL,
350 MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
351 mutex_init(&qede->watch_lock, NULL,
352 MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
353
354 for (i = 0; i < qede->num_fp; i++, fp++) {
355 mutex_init(&fp->fp_lock, NULL,
356 MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
357
358 rx_ring = fp->rx_ring;
359 mutex_init(&rx_ring->rx_lock, NULL,
360 MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
361 mutex_init(&rx_ring->rx_replen_lock, NULL,
362 MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
363
364 for (tc = 0; tc < qede->num_tc; tc++) {
365 tx_ring = fp->tx_ring[tc];
366 mutex_init(&tx_ring->tx_lock, NULL,
367 MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
368 }
369 }
370
371 mutex_init(&qede->gld_lock, NULL,
372 MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
373 mutex_init(&qede->kstat_lock, NULL,
374 MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
375 }
376
377 /* LINTED E_FUNC_ARG_UNUSED */
qede_free_io_structs(qede_t * qede)378 static void qede_free_io_structs(qede_t *qede)
379 {
380 }
381
382 static int
qede_alloc_io_structs(qede_t * qede)383 qede_alloc_io_structs(qede_t *qede)
384 {
385 qede_fastpath_t *fp;
386 qede_rx_ring_t *rx_ring;
387 qede_tx_ring_t *tx_array, *tx_ring;
388 int i, tc;
389
390 /*
391 * Put rx ring + tx_ring pointers paired
392 * into the fp data structure array
393 */
394 for (i = 0; i < qede->num_fp; i++) {
395 fp = &qede->fp_array[i];
396 rx_ring = &qede->rx_array[i];
397
398 for (tc = 0; tc < qede->num_tc; tc++) {
399 tx_array = qede->tx_array[tc];
400 tx_ring = &tx_array[i];
401 fp->tx_ring[tc] = tx_ring;
402 }
403
404 fp->rx_ring = rx_ring;
405 rx_ring->group_index = 0;
406 }
407
408 return (DDI_SUCCESS);
409 }
410
411 static int
qede_get_config_params(qede_t * qede)412 qede_get_config_params(qede_t *qede)
413 {
414 struct ecore_dev *edev = &qede->edev;
415
416 qede_cfg_init(qede);
417
418 qede->num_tc = DEFAULT_TRFK_CLASS_COUNT;
419 qede->num_hwfns = edev->num_hwfns;
420 qede->rx_buf_count = qede->rx_ring_size;
421 qede->rx_buf_size = DEFAULT_RX_BUF_SIZE;
422 qede_print("!%s:%d: qede->num_fp = %d\n", __func__, qede->instance,
423 qede->num_fp);
424 qede_print("!%s:%d: qede->rx_ring_size = %d\n", __func__,
425 qede->instance, qede->rx_ring_size);
426 qede_print("!%s:%d: qede->rx_buf_count = %d\n", __func__,
427 qede->instance, qede->rx_buf_count);
428 qede_print("!%s:%d: qede->rx_buf_size = %d\n", __func__,
429 qede->instance, qede->rx_buf_size);
430 qede_print("!%s:%d: qede->rx_copy_threshold = %d\n", __func__,
431 qede->instance, qede->rx_copy_threshold);
432 qede_print("!%s:%d: qede->tx_ring_size = %d\n", __func__,
433 qede->instance, qede->tx_ring_size);
434 qede_print("!%s:%d: qede->tx_copy_threshold = %d\n", __func__,
435 qede->instance, qede->tx_bcopy_threshold);
436 qede_print("!%s:%d: qede->lso_enable = %d\n", __func__,
437 qede->instance, qede->lso_enable);
438 qede_print("!%s:%d: qede->lro_enable = %d\n", __func__,
439 qede->instance, qede->lro_enable);
440 qede_print("!%s:%d: qede->jumbo_enable = %d\n", __func__,
441 qede->instance, qede->jumbo_enable);
442 qede_print("!%s:%d: qede->log_enable = %d\n", __func__,
443 qede->instance, qede->log_enable);
444 qede_print("!%s:%d: qede->checksum = %d\n", __func__,
445 qede->instance, qede->checksum);
446 qede_print("!%s:%d: qede->debug_level = 0x%x\n", __func__,
447 qede->instance, qede->ecore_debug_level);
448 qede_print("!%s:%d: qede->num_hwfns = %d\n", __func__,
449 qede->instance,qede->num_hwfns);
450
451 //qede->tx_buf_size = qede->mtu + QEDE_MAX_ETHER_HDR;
452 qede->tx_buf_size = BUF_2K_SIZE;
453 return (DDI_SUCCESS);
454 }
455
456 void
qede_config_debug(qede_t * qede)457 qede_config_debug(qede_t *qede)
458 {
459
460 struct ecore_dev *edev = &qede->edev;
461 u32 dp_level = 0;
462 u8 dp_module = 0;
463
464 dp_level = qede->ecore_debug_level;
465 dp_module = qede->ecore_debug_module;
466 ecore_init_dp(edev, dp_module, dp_level, NULL);
467 }
468
469
470
471 static int
qede_set_operating_params(qede_t * qede)472 qede_set_operating_params(qede_t *qede)
473 {
474 int status = 0;
475 qede_intr_context_t *intr_ctx = &qede->intr_ctx;
476
477 /* Get qede.conf paramters from user */
478 status = qede_get_config_params(qede);
479 if (status != DDI_SUCCESS) {
480 return (DDI_FAILURE);
481 }
482 /* config debug level */
483 qede_config_debug(qede);
484
485
486 intr_ctx->intr_vect_to_request =
487 qede->num_fp + qede->num_hwfns;
488 intr_ctx->intr_fp_vector_count = qede->num_fp - qede->num_hwfns;
489
490 /* set max number of Unicast list */
491 qede->ucst_total = QEDE_MAX_UCST_CNT;
492 qede->ucst_avail = QEDE_MAX_UCST_CNT;
493 bzero(&qede->ucst_mac[0], sizeof (qede_mac_addr_t) * qede->ucst_total);
494 qede->params.multi_promisc_fl = B_FALSE;
495 qede->params.promisc_fl = B_FALSE;
496 qede->mc_cnt = 0;
497 qede->rx_low_buffer_threshold = RX_LOW_BUFFER_THRESHOLD;
498
499 return (status);
500 }
501
502 /* Resume the interface */
503 static int
qede_resume(qede_t * qede)504 qede_resume(qede_t *qede)
505 {
506 mutex_enter(&qede->drv_lock);
507 cmn_err(CE_NOTE, "%s:%d Enter\n", __func__, qede->instance);
508 qede->qede_state = QEDE_STATE_ATTACHED;
509 mutex_exit(&qede->drv_lock);
510 return (DDI_FAILURE);
511 }
512
513 /*
514 * Write dword to doorbell from tx_path
515 * Avoid use of qede_t * pointer
516 */
517 #pragma inline(qede_bar2_write32_tx_doorbell)
518 void
qede_bar2_write32_tx_doorbell(qede_tx_ring_t * tx_ring,u32 val)519 qede_bar2_write32_tx_doorbell(qede_tx_ring_t *tx_ring, u32 val)
520 {
521 u64 addr = (u64)tx_ring->doorbell_addr;
522 ddi_put32(tx_ring->doorbell_handle, (u32 *)addr, val);
523 }
524
525 static void
qede_unconfig_pci(qede_t * qede)526 qede_unconfig_pci(qede_t *qede)
527 {
528 if (qede->doorbell_handle != NULL) {
529 ddi_regs_map_free(&(qede->doorbell_handle));
530 qede->doorbell_handle = NULL;
531 }
532
533 if (qede->regs_handle != NULL) {
534 ddi_regs_map_free(&qede->regs_handle);
535 qede->regs_handle = NULL;
536 }
537 if (qede->pci_cfg_handle != NULL) {
538 pci_config_teardown(&qede->pci_cfg_handle);
539 qede->pci_cfg_handle = NULL;
540 }
541 }
542
543 static int
qede_config_pci(qede_t * qede)544 qede_config_pci(qede_t *qede)
545 {
546 int ret;
547
548 ret = pci_config_setup(qede->dip, &qede->pci_cfg_handle);
549 if (ret != DDI_SUCCESS) {
550 cmn_err(CE_NOTE, "%s:%d Failed to get PCI config handle\n",
551 __func__, qede->instance);
552 return (DDI_FAILURE);
553 }
554
555 /* get register size */
556 ret = ddi_dev_regsize(qede->dip, 1, &qede->regview_size);
557 if (ret != DDI_SUCCESS) {
558 cmn_err(CE_WARN, "%s%d: failed to read reg size for bar0",
559 __func__, qede->instance);
560 goto err_exit;
561 }
562
563 /* get doorbell size */
564 ret = ddi_dev_regsize(qede->dip, 3, &qede->doorbell_size);
565 if (ret != DDI_SUCCESS) {
566 cmn_err(CE_WARN, "%s%d: failed to read doorbell size for bar2",
567 __func__, qede->instance);
568 goto err_exit;
569 }
570
571 /* map register space */
572 ret = ddi_regs_map_setup(
573 /* Pointer to the device's dev_info structure. */
574 qede->dip,
575 /*
576 * Index number to the register address space set.
577 * A value of 0 indicates PCI configuration space,
578 * while a value of 1 indicates the real start of
579 * device register sets.
580 */
581 1,
582 /*
583 * A platform-dependent value that, when added to
584 * an offset that is less than or equal to the len
585 * parameter (see below), is used for the dev_addr
586 * argument to the ddi_get, ddi_mem_get, and
587 * ddi_io_get/put routines.
588 */
589 &qede->regview,
590 /*
591 * Offset into the register address space.
592 */
593 0,
594 /* Length to be mapped. */
595 qede->regview_size,
596 /*
597 * Pointer to a device access attribute structure
598 * of this mapping.
599 */
600 &qede_regs_acc_attr,
601 /* Pointer to a data access handle. */
602 &qede->regs_handle);
603
604 if (ret != DDI_SUCCESS) {
605 cmn_err(CE_WARN, "!qede(%d): failed to map registers, err %d",
606 qede->instance, ret);
607 goto err_exit;
608 }
609
610 qede->pci_bar0_base = (unsigned long)qede->regview;
611
612 /* map doorbell space */
613 ret = ddi_regs_map_setup(qede->dip,
614 2,
615 &qede->doorbell,
616 0,
617 qede->doorbell_size,
618 &qede_regs_acc_attr,
619 &qede->doorbell_handle);
620
621 if (ret != DDI_SUCCESS) {
622 cmn_err(CE_WARN, "qede%d: failed to map doorbell, err %d",
623 qede->instance, ret);
624 goto err_exit;
625 }
626
627 qede->pci_bar2_base = (unsigned long)qede->doorbell;
628
629 return (ret);
630 err_exit:
631 qede_unconfig_pci(qede);
632 return (DDI_FAILURE);
633 }
634
635 static uint_t
qede_sp_handler(caddr_t arg1,caddr_t arg2)636 qede_sp_handler(caddr_t arg1, caddr_t arg2)
637 {
638 /*LINTED E_BAD_PTR_CAST_ALIGN*/
639 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)arg1;
640 /* LINTED E_BAD_PTR_CAST_ALIGN */
641 qede_vector_info_t *vect_info = (qede_vector_info_t *)arg2;
642 struct ecore_dev *edev = p_hwfn->p_dev;
643 qede_t *qede = (qede_t *)edev;
644
645 if ((arg1 == NULL) || (arg2 == NULL)) {
646 cmn_err(CE_WARN, "qede_sp_handler: invalid parameters");
647 /*
648 * MSIX intr should always
649 * return DDI_INTR_CLAIMED
650 */
651 return (DDI_INTR_CLAIMED);
652 }
653
654
655 vect_info->in_isr = B_TRUE;
656
657 atomic_add_64((volatile uint64_t *)&qede->intrFired, 1);
658 qede->intrSbCnt[vect_info->vect_index]++;
659
660
661 ecore_int_sp_dpc((osal_int_ptr_t)p_hwfn);
662
663 vect_info->in_isr = B_FALSE;
664
665 return (DDI_INTR_CLAIMED);
666 }
667
668 void
qede_enable_hw_intr(qede_fastpath_t * fp)669 qede_enable_hw_intr(qede_fastpath_t *fp)
670 {
671 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
672 ddi_dma_sync(fp->sb_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
673 }
674
675 void
qede_disable_hw_intr(qede_fastpath_t * fp)676 qede_disable_hw_intr(qede_fastpath_t *fp)
677 {
678 ddi_dma_sync(fp->sb_dma_handle, 0, 0, DDI_DMA_SYNC_FORKERNEL);
679 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
680 }
681
682
683 static uint_t
qede_fp_handler(caddr_t arg1,caddr_t arg2)684 qede_fp_handler(caddr_t arg1, caddr_t arg2)
685 {
686 /* LINTED E_BAD_PTR_CAST_ALIGN */
687 qede_vector_info_t *vect_info = (qede_vector_info_t *)arg1;
688 /* LINTED E_BAD_PTR_CAST_ALIGN */
689 qede_t *qede = (qede_t *)arg2;
690 qede_fastpath_t *fp;
691 qede_rx_ring_t *rx_ring;
692 mblk_t *mp;
693 int work_done = 0;
694
695 if ((vect_info == NULL) || (vect_info->fp == NULL)) {
696 cmn_err(CE_WARN, "qede_fp_handler: invalid parameters");
697 return (DDI_INTR_UNCLAIMED);
698 }
699
700 fp = (qede_fastpath_t *)vect_info->fp;
701 rx_ring = fp->rx_ring;
702
703 mutex_enter(&fp->fp_lock);
704
705 atomic_add_64((volatile uint64_t *)&qede->intrFired, 1);
706 qede->intrSbCnt[vect_info->vect_index]++;
707
708 mutex_enter(&fp->qede->drv_lock);
709 qede_disable_hw_intr(fp);
710 mutex_exit(&fp->qede->drv_lock);
711
712 mp = qede_process_fastpath(fp, QEDE_POLL_ALL,
713 QEDE_MAX_RX_PKTS_PER_INTR, &work_done);
714
715 if (mp)
716 #ifndef NO_CROSSBOW
717 {
718 mac_rx_ring(rx_ring->qede->mac_handle,
719 rx_ring->mac_ring_handle,
720 mp,
721 rx_ring->mr_gen_num);
722 }
723 #else
724 {
725 mac_rx(qede->mac_handle, NULL, mp);
726 }
727 #endif
728 else if (!mp && (work_done == 0)) {
729 qede->intrSbNoChangeCnt[vect_info->vect_index]++;
730 }
731
732
733 mutex_enter(&fp->qede->drv_lock);
734 /*
735 * The mac layer may disabled interrupts
736 * in the context of the mac_rx_ring call
737 * above while readying for poll process.
738 * In this case we do not want to
739 * enable them here.
740 */
741 if (fp->disabled_by_poll == 0) {
742 qede_enable_hw_intr(fp);
743 }
744 mutex_exit(&fp->qede->drv_lock);
745
746 mutex_exit(&fp->fp_lock);
747
748 return (work_done ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
749 }
750
751 static int
qede_disable_intr(qede_t * qede,uint32_t index)752 qede_disable_intr(qede_t *qede, uint32_t index)
753 {
754 int status;
755 qede_intr_context_t *intr_ctx = &qede->intr_ctx;
756
757 status = ddi_intr_disable(intr_ctx->intr_hdl_array[index]);
758 if (status != DDI_SUCCESS) {
759 cmn_err(CE_WARN, "qede:%s: Failed ddi_intr_enable with %s"
760 " for index %d\n",
761 __func__, qede_get_ddi_fail(status), index);
762 return (status);
763 }
764 atomic_and_32(&intr_ctx->intr_state, ~(1 << index));
765
766 return (status);
767 }
768
769 static int
qede_enable_intr(qede_t * qede,int index)770 qede_enable_intr(qede_t *qede, int index)
771 {
772 int status = 0;
773
774 qede_intr_context_t *intr_ctx = &qede->intr_ctx;
775
776 status = ddi_intr_enable(intr_ctx->intr_hdl_array[index]);
777
778 if (status != DDI_SUCCESS) {
779 cmn_err(CE_WARN, "qede:%s: Failed ddi_intr_enable with %s"
780 " for index %d\n",
781 __func__, qede_get_ddi_fail(status), index);
782 return (status);
783 }
784
785 atomic_or_32(&intr_ctx->intr_state, (1 << index));
786
787 return (status);
788 }
789
790 static int
qede_disable_all_fastpath_intrs(qede_t * qede)791 qede_disable_all_fastpath_intrs(qede_t *qede)
792 {
793 int i, status;
794
795 for (i = qede->num_hwfns; i <= qede->num_fp; i++) {
796 status = qede_disable_intr(qede, i);
797 if (status != DDI_SUCCESS) {
798 return (status);
799 }
800 }
801 return (DDI_SUCCESS);
802 }
803
804 static int
qede_enable_all_fastpath_intrs(qede_t * qede)805 qede_enable_all_fastpath_intrs(qede_t *qede)
806 {
807 int status = 0, i;
808
809 for (i = qede->num_hwfns; i <= qede->num_fp; i++) {
810 status = qede_enable_intr(qede, i);
811 if (status != DDI_SUCCESS) {
812 return (status);
813 }
814 }
815 return (DDI_SUCCESS);
816 }
817
818 static int
qede_disable_slowpath_intrs(qede_t * qede)819 qede_disable_slowpath_intrs(qede_t *qede)
820 {
821 int i, status;
822
823 for (i = 0; i < qede->num_hwfns; i++) {
824 status = qede_disable_intr(qede, i);
825 if (status != DDI_SUCCESS) {
826 return (status);
827 }
828 }
829 return (DDI_SUCCESS);
830 }
831
832 static int
qede_enable_slowpath_intrs(qede_t * qede)833 qede_enable_slowpath_intrs(qede_t *qede)
834 {
835 int i, status;
836
837 for (i = 0; i < qede->num_hwfns; i++) {
838 status = qede_enable_intr(qede, i);
839 if (status != DDI_SUCCESS) {
840 return (status);
841 }
842 }
843 return (DDI_SUCCESS);
844 }
845
846 static int
qede_prepare_edev(qede_t * qede)847 qede_prepare_edev(qede_t *qede)
848 {
849 struct ecore_dev *edev = &qede->edev;
850 struct ecore_hw_prepare_params p_params;
851
852 /*
853 * Setup the bar0 and bar2 base address
854 * in ecore_device
855 */
856 edev->regview = (void *)qede->regview;
857 edev->doorbells = (void *)qede->doorbell;
858
859 /* LINTED E_FUNC_RET_MAYBE_IGNORED2 */
860 strcpy(edev->name, qede->name);
861 ecore_init_struct(edev);
862
863 p_params.personality = ECORE_PCI_ETH;
864 p_params.drv_resc_alloc = 0;
865 p_params.chk_reg_fifo = 1;
866 p_params.initiate_pf_flr = 1;
867 //p_params->epoch = time(&epoch);
868 p_params.allow_mdump = 1;
869 p_params.b_relaxed_probe = 0;
870 return (ecore_hw_prepare(edev, &p_params));
871 }
872
873 static int
qede_config_edev(qede_t * qede)874 qede_config_edev(qede_t *qede)
875 {
876 int status, i;
877 struct ecore_dev *edev = &qede->edev;
878 struct ecore_pf_params *params;
879
880 for (i = 0; i < qede->num_hwfns; i++) {
881 struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
882 params = &p_hwfn->pf_params;
883 memset((void *)params, 0, sizeof (struct ecore_pf_params));
884 params->eth_pf_params.num_cons = 32;
885 }
886 status = ecore_resc_alloc(edev);
887 if (status != ECORE_SUCCESS) {
888 cmn_err(CE_NOTE, "%s: Could not allocate ecore resources\n",
889 __func__);
890 return (DDI_ENOMEM);
891 }
892 ecore_resc_setup(edev);
893 return (DDI_SUCCESS);
894 }
895
896 static void
qede_unconfig_intrs(qede_t * qede)897 qede_unconfig_intrs(qede_t *qede)
898 {
899 qede_intr_context_t *intr_ctx = &qede->intr_ctx;
900 qede_vector_info_t *vect_info;
901 int i, status = 0;
902
903 for (i = 0; i < intr_ctx->intr_vect_allocated; i++) {
904 vect_info = &intr_ctx->intr_vect_info[i];
905 if (intr_ctx->intr_vect_info[i].handler_added == B_TRUE) {
906 status = ddi_intr_remove_handler(
907 intr_ctx->intr_hdl_array[i]);
908 if (status != DDI_SUCCESS) {
909 cmn_err(CE_WARN, "qede:%s: Failed"
910 " ddi_intr_remove_handler with %s"
911 " for index %d\n",
912 __func__, qede_get_ddi_fail(
913 status), i);
914 }
915
916 (void) ddi_intr_free(intr_ctx->intr_hdl_array[i]);
917
918 vect_info->handler_added = B_FALSE;
919 intr_ctx->intr_hdl_array[i] = NULL;
920 }
921 }
922 }
923
924 static int
qede_config_intrs(qede_t * qede)925 qede_config_intrs(qede_t *qede)
926 {
927 qede_intr_context_t *intr_ctx = &qede->intr_ctx;
928 qede_vector_info_t *vect_info;
929 struct ecore_dev *edev = &qede->edev;
930 int i, status = DDI_FAILURE;
931 ddi_intr_handler_t *handler;
932 void *arg1, *arg2;
933
934 /*
935 * Set up the interrupt handler argument
936 * for the slowpath
937 */
938 for (i = 0; i < intr_ctx->intr_vect_allocated; i++) {
939 vect_info = &intr_ctx->intr_vect_info[i];
940 /* Store the table index */
941 vect_info->vect_index = i;
942 vect_info->qede = qede;
943 /*
944 * Store the interrupt handler's argument.
945 * This will be the a pointer to ecore_dev->hwfns
946 * for slowpath, a pointer to the fastpath
947 * structure for fastpath.
948 */
949 if (i < qede->num_hwfns) {
950 vect_info->fp = (void *)&edev->hwfns[i];
951 handler = qede_sp_handler;
952 arg1 = (caddr_t)&qede->edev.hwfns[i];
953 arg2 = (caddr_t)vect_info;
954 } else {
955 /*
956 * loop index includes hwfns
957 * so they need to be subtracked
958 * for fp_array
959 */
960 vect_info->fp =
961 (void *)&qede->fp_array[i - qede->num_hwfns];
962 handler = qede_fp_handler;
963 arg1 = (caddr_t)vect_info;
964 arg2 = (caddr_t)qede;
965 }
966
967 status = ddi_intr_add_handler(
968 intr_ctx->intr_hdl_array[i],
969 handler,
970 arg1,
971 arg2);
972 if (status != DDI_SUCCESS) {
973 cmn_err(CE_WARN, "qede:%s: Failed "
974 " ddi_intr_add_handler with %s"
975 " for index %d\n",
976 __func__, qede_get_ddi_fail(
977 status), i);
978 qede_unconfig_intrs(qede);
979 return (DDI_FAILURE);
980 }
981 vect_info->handler_added = B_TRUE;
982 }
983
984 return (status);
985 }
986
987 static void
qede_free_intrs(qede_t * qede)988 qede_free_intrs(qede_t *qede)
989 {
990 qede_intr_context_t *intr_ctx;
991 int i, status;
992
993 ASSERT(qede != NULL);
994 intr_ctx = &qede->intr_ctx;
995 ASSERT(intr_ctx != NULL);
996
997 if (intr_ctx->intr_hdl_array) {
998 for (i = 0; i < intr_ctx->intr_vect_allocated; i++) {
999 if (intr_ctx->intr_hdl_array[i]) {
1000 status =
1001 ddi_intr_free(intr_ctx->intr_hdl_array[i]);
1002 if (status != DDI_SUCCESS) {
1003 cmn_err(CE_NOTE,
1004 "qede:%s: Failed ddi_intr_free"
1005 " with %s\n",
1006 __func__,
1007 qede_get_ddi_fail(status));
1008 }
1009 }
1010 }
1011 intr_ctx->intr_hdl_array = NULL;
1012 }
1013
1014 if (intr_ctx->intr_hdl_array) {
1015 kmem_free(intr_ctx->intr_hdl_array,
1016 intr_ctx->intr_hdl_array_size);
1017 intr_ctx->intr_hdl_array = NULL;
1018 }
1019
1020 if (intr_ctx->intr_vect_info) {
1021 kmem_free(intr_ctx->intr_vect_info,
1022 intr_ctx->intr_vect_info_array_size);
1023 intr_ctx->intr_vect_info = NULL;
1024 }
1025 }
1026
1027 static int
qede_alloc_intrs(qede_t * qede)1028 qede_alloc_intrs(qede_t *qede)
1029 {
1030 int status, type_supported, num_supported;
1031 int actual, num_available, num_to_request;
1032 dev_info_t *dip;
1033 qede_intr_context_t *intr_ctx = &qede->intr_ctx;
1034
1035 dip = qede->dip;
1036
1037 status = ddi_intr_get_supported_types(dip, &type_supported);
1038 if (status != DDI_SUCCESS) {
1039 cmn_err(CE_WARN,
1040 "qede:%s: Failed ddi_intr_get_supported_types with %s\n",
1041 __func__, qede_get_ddi_fail(status));
1042 return (status);
1043 }
1044 intr_ctx->intr_types_available = type_supported;
1045
1046 if (type_supported & DDI_INTR_TYPE_MSIX) {
1047 intr_ctx->intr_type_in_use = DDI_INTR_TYPE_MSIX;
1048
1049 /*
1050 * get the total number of vectors
1051 * supported by the device
1052 */
1053 status = ddi_intr_get_nintrs(qede->dip,
1054 DDI_INTR_TYPE_MSIX, &num_supported);
1055 if (status != DDI_SUCCESS) {
1056 cmn_err(CE_WARN,
1057 "qede:%s: Failed ddi_intr_get_nintrs with %s\n",
1058 __func__, qede_get_ddi_fail(status));
1059 return (status);
1060 }
1061 intr_ctx->intr_vect_supported = num_supported;
1062
1063 /*
1064 * get the total number of vectors
1065 * available for this instance
1066 */
1067 status = ddi_intr_get_navail(dip, DDI_INTR_TYPE_MSIX,
1068 &num_available);
1069 if (status != DDI_SUCCESS) {
1070 cmn_err(CE_WARN,
1071 "qede:%s: Failed ddi_intr_get_navail with %s\n",
1072 __func__, qede_get_ddi_fail(status));
1073 return (status);
1074 }
1075
1076 if ((num_available < intr_ctx->intr_vect_to_request) &&
1077 (num_available >= 2)) {
1078 qede->num_fp = num_available - qede->num_hwfns;
1079 cmn_err(CE_NOTE,
1080 "qede:%s: allocated %d interrupts"
1081 " requested was %d\n",
1082 __func__, num_available,
1083 intr_ctx->intr_vect_to_request);
1084 intr_ctx->intr_vect_to_request = num_available;
1085 } else if(num_available < 2) {
1086 cmn_err(CE_WARN,
1087 "qede:%s: Failed ddi_intr_get_navail with %s\n",
1088 __func__, qede_get_ddi_fail(status));
1089 return (DDI_FAILURE);
1090 }
1091
1092 intr_ctx->intr_vect_available = num_available;
1093 num_to_request = intr_ctx->intr_vect_to_request;
1094 intr_ctx->intr_hdl_array_size = num_to_request *
1095 sizeof (ddi_intr_handle_t);
1096 intr_ctx->intr_vect_info_array_size = num_to_request *
1097 sizeof (qede_vector_info_t);
1098
1099 /* Allocate an array big enough for maximum supported */
1100 intr_ctx->intr_hdl_array = kmem_zalloc(
1101 intr_ctx->intr_hdl_array_size, KM_SLEEP);
1102
1103 intr_ctx->intr_vect_info = kmem_zalloc(
1104 intr_ctx->intr_vect_info_array_size, KM_SLEEP);
1105
1106 /*
1107 * Use strict allocation. It will fail if we do not get
1108 * exactly what we want. Later we can shift through with
1109 * power of two like this:
1110 * for (i = intr_ctx->intr_requested; i > 0; i >>= 1)
1111 * (Though we would need to account for the slowpath vector)
1112 */
1113 status = ddi_intr_alloc(qede->dip,
1114 intr_ctx->intr_hdl_array,
1115 DDI_INTR_TYPE_MSIX,
1116 0,
1117 num_to_request,
1118 &actual,
1119 DDI_INTR_ALLOC_STRICT);
1120 if (status != DDI_SUCCESS) {
1121 cmn_err(CE_WARN,
1122 "qede:%s: Failed to allocate"
1123 " %d interrupts with %s\n",
1124 __func__, num_to_request,
1125 qede_get_ddi_fail(status));
1126 cmn_err(CE_WARN,
1127 "qede:%s: Only %d interrupts available.\n",
1128 __func__, actual);
1129 goto err_exit;
1130 }
1131 intr_ctx->intr_vect_allocated = num_to_request;
1132
1133 status = ddi_intr_get_pri(intr_ctx->intr_hdl_array[0],
1134 &intr_ctx->intr_pri);
1135 if (status != DDI_SUCCESS) {
1136 cmn_err(CE_WARN,
1137 "qede:%s: Failed ddi_intr_get_pri with %s\n",
1138 __func__, qede_get_ddi_fail(status));
1139 goto err_exit;
1140 }
1141
1142 status = ddi_intr_get_cap(intr_ctx->intr_hdl_array[0],
1143 &intr_ctx->intr_cap);
1144 if (status != DDI_SUCCESS) {
1145 cmn_err(CE_WARN,
1146 "qede:%s: Failed ddi_intr_get_cap with %s\n",
1147 __func__, qede_get_ddi_fail(status));
1148 goto err_exit;
1149 }
1150
1151 } else {
1152 /* For now we only support type MSIX */
1153 cmn_err(CE_WARN,
1154 "qede:%s: Failed to allocate intr_ctx->intr_hdl_array\n",
1155 __func__);
1156 return (DDI_FAILURE);
1157 }
1158
1159 intr_ctx->intr_mode = ECORE_INT_MODE_MSIX;
1160 return (status);
1161 err_exit:
1162 qede_free_intrs(qede);
1163 return (status);
1164 }
1165
1166 static void
1167 /* LINTED E_FUNC_ARG_UNUSED */
qede_unconfig_fm(qede_t * qede)1168 qede_unconfig_fm(qede_t *qede)
1169 {
1170 }
1171
1172 /* LINTED E_FUNC_ARG_UNUSED */
1173 static int
qede_fm_err_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)1174 qede_fm_err_cb(dev_info_t *dip, ddi_fm_error_t *err,
1175 const void *impl_data)
1176 {
1177 pci_ereport_post(dip, err, NULL);
1178 return (err->fme_status);
1179 }
1180
1181
1182 static int
qede_config_fm(qede_t * qede)1183 qede_config_fm(qede_t * qede)
1184 {
1185 ddi_iblock_cookie_t iblk;
1186
1187 cmn_err(CE_NOTE, "Entered qede_config_fm\n");
1188 qede_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1189 qede_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1190 qede_buf_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1191 qede_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1192 qede_gen_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1193 qede_tx_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1194 qede_dma_attr_desc.dma_attr_flags = DDI_DMA_FLAGERR;
1195 qede_dma_attr_txbuf.dma_attr_flags = DDI_DMA_FLAGERR;
1196 qede_dma_attr_rxbuf.dma_attr_flags = DDI_DMA_FLAGERR;
1197 qede_dma_attr_cmddesc.dma_attr_flags = DDI_DMA_FLAGERR;
1198 qede_gen_dma_attr_desc.dma_attr_flags = DDI_DMA_FLAGERR;
1199 qede_buf2k_dma_attr_txbuf.dma_attr_flags = DDI_DMA_FLAGERR;
1200
1201 ddi_fm_init(qede->dip, &qede->fm_cap, &iblk);
1202
1203 if (DDI_FM_EREPORT_CAP(qede->fm_cap) ||
1204 DDI_FM_ERRCB_CAP(qede->fm_cap)) {
1205 pci_ereport_setup(qede->dip);
1206 }
1207
1208 if (DDI_FM_ERRCB_CAP(qede->fm_cap)) {
1209 ddi_fm_handler_register(qede->dip,
1210 qede_fm_err_cb, (void *)qede);
1211 }
1212 return (DDI_SUCCESS);
1213
1214 }
1215
1216 int
qede_dma_mem_alloc(qede_t * qede,int size,uint_t dma_flags,caddr_t * address,ddi_dma_cookie_t * cookie,ddi_dma_handle_t * dma_handle,ddi_acc_handle_t * handlep,ddi_dma_attr_t * dma_attr,ddi_device_acc_attr_t * dev_acc_attr)1217 qede_dma_mem_alloc(qede_t *qede,
1218 int size, uint_t dma_flags, caddr_t *address, ddi_dma_cookie_t *cookie,
1219 ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *handlep,
1220 ddi_dma_attr_t *dma_attr, ddi_device_acc_attr_t *dev_acc_attr)
1221 {
1222 int err;
1223 uint32_t ncookies;
1224 size_t ring_len;
1225
1226 *dma_handle = NULL;
1227
1228 if (size <= 0) {
1229 return (DDI_ENOMEM);
1230 }
1231
1232 err = ddi_dma_alloc_handle(qede->dip,
1233 dma_attr,
1234 DDI_DMA_DONTWAIT, NULL, dma_handle);
1235 if (err != DDI_SUCCESS) {
1236 cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1237 "ddi_dma_alloc_handle FAILED: %d", qede->instance, err);
1238 *dma_handle = NULL;
1239 return (DDI_ENOMEM);
1240 }
1241
1242 err = ddi_dma_mem_alloc(*dma_handle,
1243 size, dev_acc_attr,
1244 dma_flags,
1245 DDI_DMA_DONTWAIT, NULL, address, &ring_len,
1246 handlep);
1247 if (err != DDI_SUCCESS) {
1248 cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1249 "ddi_dma_mem_alloc FAILED: %d, request size: %d",
1250 qede->instance, err, size);
1251 ddi_dma_free_handle(dma_handle);
1252 *dma_handle = NULL;
1253 *handlep = NULL;
1254 return (DDI_ENOMEM);
1255 }
1256
1257 if (ring_len < size) {
1258 cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1259 "could not allocate required: %d, request size: %d",
1260 qede->instance, err, size);
1261 ddi_dma_mem_free(handlep);
1262 ddi_dma_free_handle(dma_handle);
1263 *dma_handle = NULL;
1264 *handlep = NULL;
1265 return (DDI_FAILURE);
1266 }
1267
1268 (void) memset(*address, 0, size);
1269
1270 if (((err = ddi_dma_addr_bind_handle(*dma_handle,
1271 NULL, *address, ring_len,
1272 dma_flags,
1273 DDI_DMA_DONTWAIT, NULL,
1274 cookie, &ncookies)) != DDI_DMA_MAPPED) ||
1275 (ncookies != 1)) {
1276 cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1277 "ddi_dma_addr_bind_handle Failed: %d",
1278 qede->instance, err);
1279 ddi_dma_mem_free(handlep);
1280 ddi_dma_free_handle(dma_handle);
1281 *dma_handle = NULL;
1282 *handlep = NULL;
1283 return (DDI_FAILURE);
1284 }
1285
1286 return (DDI_SUCCESS);
1287 }
1288
1289 void
qede_pci_free_consistent(ddi_dma_handle_t * dma_handle,ddi_acc_handle_t * acc_handle)1290 qede_pci_free_consistent(ddi_dma_handle_t *dma_handle,
1291 ddi_acc_handle_t *acc_handle)
1292 {
1293 int err;
1294
1295 if (*dma_handle != NULL) {
1296 err = ddi_dma_unbind_handle(*dma_handle);
1297 if (err != DDI_SUCCESS) {
1298 cmn_err(CE_WARN, "!pci_free_consistent: "
1299 "Error unbinding memory, err %d", err);
1300 return;
1301 }
1302 } else {
1303 goto exit;
1304 }
1305 ddi_dma_mem_free(acc_handle);
1306 ddi_dma_free_handle(dma_handle);
1307 exit:
1308 *dma_handle = NULL;
1309 *acc_handle = NULL;
1310 }
1311
1312 static int
qede_vport_stop(qede_t * qede)1313 qede_vport_stop(qede_t *qede)
1314 {
1315 struct ecore_dev *edev = &qede->edev;
1316 struct ecore_hwfn *p_hwfn;
1317 int i, status = ECORE_BUSY;
1318
1319 for (i = 0; i < edev->num_hwfns; i++) {
1320 p_hwfn = &edev->hwfns[i];
1321
1322 if (qede->vport_state[i] !=
1323 QEDE_VPORT_STARTED) {
1324 qede_info(qede, "vport %d not started", i);
1325 continue;
1326 }
1327
1328 status = ecore_sp_vport_stop(p_hwfn,
1329 p_hwfn->hw_info.opaque_fid,
1330 i); /* vport needs fix */
1331 if (status != ECORE_SUCCESS) {
1332 cmn_err(CE_WARN, "!qede_vport_stop: "
1333 "FAILED for hwfn%d ", i);
1334 return (DDI_FAILURE);
1335 }
1336 cmn_err(CE_WARN, "!qede_vport_stop: "
1337 "SUCCESS for hwfn%d ", i);
1338
1339 qede->vport_state[i] =
1340 QEDE_VPORT_STOPPED;
1341 }
1342
1343 return (status);
1344 }
1345
1346 static uint8_t
qede_get_active_rss_params(qede_t * qede,u8 hwfn_id)1347 qede_get_active_rss_params(qede_t *qede, u8 hwfn_id)
1348 {
1349 struct ecore_rss_params rss_params;
1350 qede_fastpath_t *fp;
1351 int i;
1352 const uint64_t hash_key[] =
1353 {
1354 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
1355 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
1356 0x255b0ec26d5a56daULL
1357 };
1358 uint8_t enable_rss = 0;
1359
1360 bzero(&rss_params, sizeof (rss_params));
1361 if (qede->num_fp > 1) {
1362 qede_info(qede, "Configuring RSS parameters");
1363 enable_rss = 1;
1364 } else {
1365 qede_info(qede, "RSS configuration not needed");
1366 enable_rss = 0;
1367 goto exit;
1368 }
1369
1370 rss_params.update_rss_config = 1;
1371 rss_params.rss_enable = 1;
1372 rss_params.update_rss_capabilities = 1;
1373 rss_params.update_rss_ind_table = 1;
1374 rss_params.update_rss_key = 1;
1375
1376 rss_params.rss_caps = ECORE_RSS_IPV4 |
1377 ECORE_RSS_IPV6 |
1378 ECORE_RSS_IPV4_TCP |
1379 ECORE_RSS_IPV6_TCP |
1380 ECORE_RSS_IPV4_UDP |
1381 ECORE_RSS_IPV6_UDP;
1382
1383 rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
1384
1385 bcopy(&hash_key[0], &rss_params.rss_key[0],
1386 sizeof (rss_params.rss_key));
1387
1388 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1389 fp = &qede->fp_array[i % qede->num_fp];
1390 rss_params.rss_ind_table[i] = (void *)(fp->rx_ring->p_cid);
1391 }
1392 exit:
1393 bcopy(&rss_params, &qede->rss_params[hwfn_id], sizeof (rss_params));
1394 return (enable_rss);
1395 }
1396
1397 static int
qede_vport_update(qede_t * qede,enum qede_vport_state state)1398 qede_vport_update(qede_t *qede,
1399 enum qede_vport_state state)
1400 {
1401 struct ecore_dev *edev = &qede->edev;
1402 struct ecore_hwfn *p_hwfn;
1403 struct ecore_sp_vport_update_params *vport_params;
1404 struct ecore_sge_tpa_params tpa_params;
1405 int status = DDI_SUCCESS;
1406 bool new_state;
1407 uint8_t i;
1408
1409 cmn_err(CE_NOTE, "qede_vport_update: "
1410 "Enter, state = %s%s%s%s%s",
1411 state == QEDE_VPORT_STARTED ? "QEDE_VPORT_STARTED" : "",
1412 state == QEDE_VPORT_ON ? "QEDE_VPORT_ON" : "",
1413 state == QEDE_VPORT_OFF ? "QEDE_VPORT_OFF" : "",
1414 state == QEDE_VPORT_STOPPED ? "QEDE_VPORT_STOPPED" : "",
1415 state == QEDE_VPORT_UNKNOWN ? "" : "");
1416
1417 /*
1418 * Update only does on and off.
1419 * For now we combine TX and RX
1420 * together. Later we can split them
1421 * and set other params as well.
1422 */
1423 if (state == QEDE_VPORT_ON) {
1424 new_state = true;
1425 } else if (state == QEDE_VPORT_OFF) {
1426 new_state = false;
1427 } else {
1428 cmn_err(CE_WARN, "qede_vport_update: "
1429 "invalid, state = %d", state);
1430 return (DDI_EINVAL);
1431 }
1432
1433 for (i = 0; i < edev->num_hwfns; i++) {
1434 p_hwfn = &edev->hwfns[i];
1435 vport_params = &qede->vport_params[i];
1436
1437 vport_params->opaque_fid =
1438 p_hwfn->hw_info.opaque_fid;
1439 vport_params->vport_id =
1440 i;
1441
1442 vport_params->update_vport_active_rx_flg =
1443 1;
1444 if (new_state)
1445 vport_params->vport_active_rx_flg = 1;
1446 else
1447 vport_params->vport_active_rx_flg = 0;
1448
1449 vport_params->update_vport_active_tx_flg =
1450 1;
1451 if (new_state)
1452 vport_params->vport_active_tx_flg = 1;
1453 else
1454 vport_params->vport_active_tx_flg = 0;
1455
1456 vport_params->update_inner_vlan_removal_flg =
1457 0;
1458 vport_params->inner_vlan_removal_flg =
1459 0;
1460 vport_params->update_default_vlan_enable_flg =
1461 0;
1462 vport_params->default_vlan_enable_flg =
1463 0;
1464 vport_params->update_default_vlan_flg =
1465 1;
1466 vport_params->default_vlan =
1467 0;
1468 vport_params->update_tx_switching_flg =
1469 0;
1470 vport_params->tx_switching_flg =
1471 0;
1472 vport_params->update_approx_mcast_flg =
1473 0;
1474 vport_params->update_anti_spoofing_en_flg =
1475 0;
1476 vport_params->anti_spoofing_en = 0;
1477 vport_params->update_accept_any_vlan_flg =
1478 1;
1479 vport_params->accept_any_vlan = 1;
1480
1481 vport_params->accept_flags.update_rx_mode_config = 1;
1482 vport_params->accept_flags.update_tx_mode_config = 1;
1483 vport_params->accept_flags.rx_accept_filter =
1484 ECORE_ACCEPT_BCAST |
1485 ECORE_ACCEPT_UCAST_UNMATCHED |
1486 ECORE_ACCEPT_MCAST_UNMATCHED;
1487 vport_params->accept_flags.tx_accept_filter =
1488 ECORE_ACCEPT_BCAST |
1489 ECORE_ACCEPT_UCAST_UNMATCHED |
1490 ECORE_ACCEPT_MCAST_UNMATCHED;
1491
1492 vport_params->sge_tpa_params = NULL;
1493
1494 if (qede->lro_enable && new_state) {
1495 qede_print("!%s(%d): enabling LRO ",
1496 __func__, qede->instance);
1497
1498 memset(&tpa_params, 0,
1499 sizeof (struct ecore_sge_tpa_params));
1500 tpa_params.max_buffers_per_cqe = 5;
1501 tpa_params.update_tpa_en_flg = 1;
1502 tpa_params.tpa_ipv4_en_flg = 1;
1503 tpa_params.tpa_ipv6_en_flg = 1;
1504 tpa_params.tpa_ipv4_tunn_en_flg = 0;
1505 tpa_params.tpa_ipv6_tunn_en_flg = 0;
1506 tpa_params.update_tpa_param_flg = 1;
1507 tpa_params.tpa_pkt_split_flg = 0;
1508 tpa_params.tpa_hdr_data_split_flg = 0;
1509 tpa_params.tpa_gro_consistent_flg = 0;
1510 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
1511 tpa_params.tpa_max_size = 65535;
1512 tpa_params.tpa_min_size_to_start = qede->mtu/2;
1513 tpa_params.tpa_min_size_to_cont = qede->mtu/2;
1514 vport_params->sge_tpa_params = &tpa_params;
1515 }
1516
1517 /*
1518 * Get the rss_params to be configured
1519 */
1520 if (qede_get_active_rss_params(qede, i /* hwfn id */)) {
1521 vport_params->rss_params = &qede->rss_params[i];
1522 } else {
1523 vport_params->rss_params = NULL;
1524 }
1525
1526 status = ecore_sp_vport_update(p_hwfn,
1527 vport_params,
1528 ECORE_SPQ_MODE_EBLOCK,
1529 NULL);
1530
1531 if (status != ECORE_SUCCESS) {
1532 cmn_err(CE_WARN, "ecore_sp_vport_update: "
1533 "FAILED for hwfn%d "
1534 " with ", i);
1535 return (DDI_FAILURE);
1536 }
1537 cmn_err(CE_NOTE, "!ecore_sp_vport_update: "
1538 "SUCCESS for hwfn%d ", i);
1539 }
1540 return (DDI_SUCCESS);
1541 }
1542
1543
1544 static int
qede_vport_start(qede_t * qede)1545 qede_vport_start(qede_t *qede)
1546 {
1547 struct ecore_dev *edev = &qede->edev;
1548 struct ecore_hwfn *p_hwfn;
1549 struct ecore_sp_vport_start_params params;
1550 uint8_t i;
1551 int status = ECORE_BUSY;
1552
1553 for (i = 0; i < edev->num_hwfns; i++) {
1554 p_hwfn = &edev->hwfns[i];
1555 if ((qede->vport_state[i] !=
1556 QEDE_VPORT_UNKNOWN) &&
1557 (qede->vport_state[i] !=
1558 QEDE_VPORT_STOPPED)) {
1559 continue;
1560 }
1561
1562 params.tpa_mode = ECORE_TPA_MODE_NONE;
1563 params.remove_inner_vlan = 0;
1564 params.tx_switching = 0;
1565 params.handle_ptp_pkts = 0;
1566 params.only_untagged = 0;
1567 params.drop_ttl0 = 1;
1568 params.max_buffers_per_cqe = 16;
1569 params.concrete_fid = p_hwfn->hw_info.concrete_fid;
1570 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1571 params.vport_id = i;
1572 params.mtu = qede->mtu;
1573 status = ecore_sp_vport_start(p_hwfn, ¶ms);
1574 if (status != ECORE_SUCCESS) {
1575 cmn_err(CE_WARN, "qede_vport_start: "
1576 "FAILED for hwfn%d", i);
1577 return (DDI_FAILURE);
1578 }
1579 cmn_err(CE_NOTE, "!ecore_sp_vport_start: "
1580 "SUCCESS for hwfn%d ", i);
1581
1582 ecore_hw_start_fastpath(p_hwfn);
1583 qede->vport_state[i] = QEDE_VPORT_STARTED;
1584 }
1585 ecore_reset_vport_stats(edev);
1586 return (status);
1587 }
1588
1589 void
qede_update_rx_q_producer(qede_rx_ring_t * rx_ring)1590 qede_update_rx_q_producer(qede_rx_ring_t *rx_ring)
1591 {
1592 u16 bd_prod = ecore_chain_get_prod_idx(&rx_ring->rx_bd_ring);
1593 u16 cqe_prod = ecore_chain_get_prod_idx(&rx_ring->rx_cqe_ring);
1594 /* LINTED E_FUNC_SET_NOT_USED */
1595 struct eth_rx_prod_data rx_prod_cmd = { 0 };
1596
1597
1598 rx_prod_cmd.bd_prod = HOST_TO_LE_32(bd_prod);
1599 rx_prod_cmd.cqe_prod = HOST_TO_LE_32(cqe_prod);
1600 UPDATE_RX_PROD(rx_ring, rx_prod_cmd);
1601 }
1602
1603 static int
qede_fastpath_stop_queues(qede_t * qede)1604 qede_fastpath_stop_queues(qede_t *qede)
1605 {
1606 int i, j;
1607 int status = DDI_FAILURE;
1608 struct ecore_dev *edev;
1609 struct ecore_hwfn *p_hwfn;
1610 struct ecore_queue_cid *p_tx_cid, *p_rx_cid;
1611
1612 qede_fastpath_t *fp;
1613 qede_rx_ring_t *rx_ring;
1614 qede_tx_ring_t *tx_ring;
1615
1616 ASSERT(qede != NULL);
1617 /* ASSERT(qede->edev != NULL); */
1618
1619 edev = &qede->edev;
1620
1621 status = qede_vport_update(qede, QEDE_VPORT_OFF);
1622 if (status != DDI_SUCCESS) {
1623 cmn_err(CE_WARN, "FAILED to "
1624 "update vports");
1625 return (DDI_FAILURE);
1626 }
1627
1628 for (i = 0; i < qede->num_fp; i++) {
1629 fp = &qede->fp_array[i];
1630 rx_ring = fp->rx_ring;
1631 p_hwfn = &edev->hwfns[fp->fp_hw_eng_index];
1632 for (j = 0; j < qede->num_tc; j++) {
1633 tx_ring = fp->tx_ring[j];
1634 if (tx_ring->queue_started == B_TRUE) {
1635 cmn_err(CE_WARN, "Stopping tx queue "
1636 "%d:%d. ", i, j);
1637 p_tx_cid = tx_ring->p_cid;
1638 status = ecore_eth_tx_queue_stop(p_hwfn,
1639 (void *)p_tx_cid);
1640 if (status != ECORE_SUCCESS) {
1641 cmn_err(CE_WARN, "FAILED to "
1642 "stop tx queue %d:%d", i, j);
1643 return (DDI_FAILURE);
1644 }
1645 tx_ring->queue_started = B_FALSE;
1646 cmn_err(CE_NOTE, "tx_ring %d:%d stopped\n", i,
1647 j);
1648 }
1649 }
1650
1651 if (rx_ring->queue_started == B_TRUE) {
1652 cmn_err(CE_WARN, "Stopping rx queue "
1653 "%d. ", i);
1654 p_rx_cid = rx_ring->p_cid;
1655 status = ecore_eth_rx_queue_stop(p_hwfn,
1656 (void *)p_rx_cid, B_TRUE, B_FALSE);
1657 if (status != ECORE_SUCCESS) {
1658 cmn_err(CE_WARN, "FAILED to "
1659 "stop rx queue %d "
1660 "with ecore status %s",
1661 i, qede_get_ecore_fail(status));
1662 return (DDI_FAILURE);
1663 }
1664 rx_ring->queue_started = B_FALSE;
1665 cmn_err(CE_NOTE, "rx_ring%d stopped\n", i);
1666 }
1667 }
1668
1669 status = qede_vport_stop(qede);
1670 if (status != DDI_SUCCESS) {
1671 cmn_err(CE_WARN, "qede_vport_stop "
1672 "FAILED to stop vports");
1673 return (DDI_FAILURE);
1674 }
1675
1676 ecore_hw_stop_fastpath(edev);
1677
1678 return (DDI_SUCCESS);
1679 }
1680
1681 static int
qede_fastpath_start_queues(qede_t * qede)1682 qede_fastpath_start_queues(qede_t *qede)
1683 {
1684 int i, j;
1685 int status = DDI_FAILURE;
1686 struct ecore_dev *edev;
1687 struct ecore_hwfn *p_hwfn;
1688 struct ecore_queue_start_common_params params;
1689 struct ecore_txq_start_ret_params tx_ret_params;
1690 struct ecore_rxq_start_ret_params rx_ret_params;
1691 qede_fastpath_t *fp;
1692 qede_rx_ring_t *rx_ring;
1693 qede_tx_ring_t *tx_ring;
1694 dma_addr_t p_phys_table;
1695 u16 page_cnt;
1696
1697 ASSERT(qede != NULL);
1698 /* ASSERT(qede->edev != NULL); */
1699 edev = &qede->edev;
1700
1701 status = qede_vport_start(qede);
1702 if (status != DDI_SUCCESS) {
1703 cmn_err(CE_WARN, "Failed to "
1704 "start vports");
1705 return (DDI_FAILURE);
1706 }
1707
1708 for (i = 0; i < qede->num_fp; i++) {
1709 fp = &qede->fp_array[i];
1710 rx_ring = fp->rx_ring;
1711 p_hwfn = &edev->hwfns[fp->fp_hw_eng_index];
1712
1713 params.vport_id = fp->vport_id;
1714 params.queue_id = fp->rx_queue_index;
1715 params.stats_id = fp->stats_id;
1716 params.p_sb = fp->sb_info;
1717 params.sb_idx = RX_PI;
1718 p_phys_table = ecore_chain_get_pbl_phys(&rx_ring->rx_cqe_ring);
1719 page_cnt = ecore_chain_get_page_cnt(&rx_ring->rx_cqe_ring);
1720
1721 status = ecore_eth_rx_queue_start(p_hwfn,
1722 p_hwfn->hw_info.opaque_fid,
1723 ¶ms,
1724 qede->rx_buf_size,
1725 rx_ring->rx_bd_ring.p_phys_addr,
1726 p_phys_table,
1727 page_cnt,
1728 &rx_ret_params);
1729
1730 rx_ring->hw_rxq_prod_addr = rx_ret_params.p_prod;
1731 rx_ring->p_cid = rx_ret_params.p_handle;
1732 if (status != DDI_SUCCESS) {
1733 cmn_err(CE_WARN, "ecore_sp_eth_rx_queue_start "
1734 "FAILED for rxq%d", i);
1735 return (DDI_FAILURE);
1736 }
1737 rx_ring->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
1738
1739 OSAL_MSLEEP(20);
1740 *rx_ring->hw_cons_ptr = 0;
1741
1742 qede_update_rx_q_producer(rx_ring);
1743 rx_ring->queue_started = B_TRUE;
1744 cmn_err(CE_NOTE, "rx_ring%d started\n", i);
1745
1746 for (j = 0; j < qede->num_tc; j++) {
1747 tx_ring = fp->tx_ring[j];
1748
1749 params.vport_id = fp->vport_id;
1750 params.queue_id = tx_ring->tx_queue_index;
1751 params.stats_id = fp->stats_id;
1752 params.p_sb = fp->sb_info;
1753 params.sb_idx = TX_PI(j);
1754
1755 p_phys_table = ecore_chain_get_pbl_phys(
1756 &tx_ring->tx_bd_ring);
1757 page_cnt = ecore_chain_get_page_cnt(
1758 &tx_ring->tx_bd_ring);
1759 status = ecore_eth_tx_queue_start(p_hwfn,
1760 p_hwfn->hw_info.opaque_fid,
1761 ¶ms,
1762 0,
1763 p_phys_table,
1764 page_cnt,
1765 &tx_ret_params);
1766 tx_ring->doorbell_addr = tx_ret_params.p_doorbell;
1767 tx_ring->p_cid = tx_ret_params.p_handle;
1768 if (status != DDI_SUCCESS) {
1769 cmn_err(CE_WARN, "ecore_sp_eth_tx_queue_start "
1770 "FAILED for txq%d:%d", i,j);
1771 return (DDI_FAILURE);
1772 }
1773 tx_ring->hw_cons_ptr =
1774 &fp->sb_info->sb_virt->pi_array[TX_PI(j)];
1775 /* LINTED E_CONSTANT_CONDITION */
1776 SET_FIELD(tx_ring->tx_db.data.params,
1777 ETH_DB_DATA_DEST, DB_DEST_XCM);
1778 /* LINTED E_CONSTANT_CONDITION */
1779 SET_FIELD(tx_ring->tx_db.data.params,
1780 ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1781 /* LINTED E_CONSTANT_CONDITION */
1782 SET_FIELD(tx_ring->tx_db.data.params,
1783 ETH_DB_DATA_AGG_VAL_SEL, DQ_XCM_ETH_TX_BD_PROD_CMD);
1784 tx_ring->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
1785 tx_ring->queue_started = B_TRUE;
1786 cmn_err(CE_NOTE, "tx_ring %d:%d started\n", i, j);
1787 }
1788 }
1789
1790 status = qede_vport_update(qede, QEDE_VPORT_ON);
1791 if (status != DDI_SUCCESS) {
1792 cmn_err(CE_WARN, "Failed to "
1793 "update vports");
1794 return (DDI_FAILURE);
1795 }
1796 return (status);
1797 }
1798
1799 static void
qede_free_mag_elem(qede_rx_ring_t * rx_ring,qede_rx_buffer_t * rx_buffer,struct eth_rx_bd * bd)1800 qede_free_mag_elem(qede_rx_ring_t *rx_ring, qede_rx_buffer_t *rx_buffer,
1801 struct eth_rx_bd *bd)
1802 {
1803 int i;
1804
1805 if (bd != NULL) {
1806 bzero(bd, sizeof (*bd));
1807 }
1808
1809 if (rx_buffer->mp != NULL) {
1810 freemsg(rx_buffer->mp);
1811 rx_buffer->mp = NULL;
1812 }
1813 }
1814
1815 static void
qede_free_lro_rx_buffers(qede_rx_ring_t * rx_ring)1816 qede_free_lro_rx_buffers(qede_rx_ring_t *rx_ring)
1817 {
1818 int i, j;
1819 qede_lro_info_t *lro_info;
1820
1821 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1822 lro_info = &rx_ring->lro_info[i];
1823 if (lro_info->agg_state == QEDE_AGG_STATE_NONE) {
1824 continue;
1825 }
1826 for (j = 0; j < QEDE_MAX_BD_PER_AGG; j++) {
1827 if (lro_info->rx_buffer[j] == NULL) {
1828 break;
1829 }
1830 qede_recycle_copied_rx_buffer(
1831 lro_info->rx_buffer[j]);
1832 lro_info->rx_buffer[j] = NULL;
1833 }
1834 lro_info->agg_state = QEDE_AGG_STATE_NONE;
1835 }
1836 }
1837
1838 static void
qede_free_rx_buffers_legacy(qede_t * qede,qede_rx_buf_area_t * rx_buf_area)1839 qede_free_rx_buffers_legacy(qede_t *qede, qede_rx_buf_area_t *rx_buf_area)
1840 {
1841 int i, j;
1842 u32 ref_cnt, bufs_per_page;
1843 qede_rx_buffer_t *rx_buffer, *first_rx_buf_in_page = 0;
1844 qede_rx_ring_t *rx_ring = rx_buf_area->rx_ring;
1845 bool free_rx_buffer;
1846
1847 bufs_per_page = rx_buf_area->bufs_per_page;
1848
1849 rx_buffer = &rx_buf_area->rx_buf_pool[0];
1850
1851 if (rx_buf_area) {
1852 for (i = 0; i < rx_ring->rx_buf_count; i += bufs_per_page) {
1853 free_rx_buffer = true;
1854 for (j = 0; j < bufs_per_page; j++) {
1855 if (!j) {
1856 first_rx_buf_in_page = rx_buffer;
1857 }
1858 if (rx_buffer->ref_cnt != 0) {
1859 ref_cnt = atomic_dec_32_nv(
1860 &rx_buffer->ref_cnt);
1861 if (ref_cnt == 0) {
1862 /*
1863 * Buffer is now
1864 * completely free
1865 */
1866 if (rx_buffer->mp) {
1867 freemsg(rx_buffer->mp);
1868 rx_buffer->mp = NULL;
1869 }
1870 } else {
1871 /*
1872 * Since Buffer still
1873 * held up in Stack,
1874 * we cant free the whole page
1875 */
1876 free_rx_buffer = false;
1877 }
1878 }
1879 rx_buffer++;
1880 }
1881
1882 if (free_rx_buffer) {
1883 qede_pci_free_consistent(
1884 &first_rx_buf_in_page->dma_info.dma_handle,
1885 &first_rx_buf_in_page->dma_info.acc_handle);
1886 }
1887 }
1888
1889 /*
1890 * If no more buffers are with the stack
1891 * then free the buf pools
1892 */
1893 if (rx_buf_area->buf_upstream == 0) {
1894 mutex_destroy(&rx_buf_area->active_buf_list.lock);
1895 mutex_destroy(&rx_buf_area->passive_buf_list.lock);
1896
1897 kmem_free(rx_buf_area, sizeof (qede_rx_buf_area_t));
1898 rx_buf_area = NULL;
1899 if (atomic_cas_32(&qede->detach_unsafe, 2, 2)) {
1900 atomic_dec_32(&qede->detach_unsafe);
1901 }
1902 }
1903 }
1904 }
1905
1906
1907 static void
qede_free_rx_buffers(qede_t * qede,qede_rx_ring_t * rx_ring)1908 qede_free_rx_buffers(qede_t *qede, qede_rx_ring_t *rx_ring)
1909 {
1910 qede_free_lro_rx_buffers(rx_ring);
1911 qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
1912 qede_free_rx_buffers_legacy(qede, rx_buf_area);
1913 }
1914
1915 static void
qede_free_rx_ring_phys(qede_t * qede,qede_fastpath_t * fp)1916 qede_free_rx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
1917 {
1918 qede_rx_ring_t *rx_ring;
1919
1920 ASSERT(qede != NULL);
1921 ASSERT(fp != NULL);
1922
1923
1924 rx_ring = fp->rx_ring;
1925 rx_ring->rx_buf_area->inactive = 1;
1926
1927 qede_free_rx_buffers(qede, rx_ring);
1928
1929
1930 if (rx_ring->rx_bd_ring.p_virt_addr) {
1931 ecore_chain_free(&qede->edev, &rx_ring->rx_bd_ring);
1932 rx_ring->rx_bd_ring.p_virt_addr = NULL;
1933 }
1934
1935 if (rx_ring->rx_cqe_ring.p_virt_addr) {
1936 ecore_chain_free(&qede->edev, &rx_ring->rx_cqe_ring);
1937 rx_ring->rx_cqe_ring.p_virt_addr = NULL;
1938 if (rx_ring->rx_cqe_ring.pbl_sp.p_virt_table) {
1939 rx_ring->rx_cqe_ring.pbl_sp.p_virt_table = NULL;
1940 }
1941 }
1942 rx_ring->hw_cons_ptr = NULL;
1943 rx_ring->hw_rxq_prod_addr = NULL;
1944 rx_ring->sw_rx_cons = 0;
1945 rx_ring->sw_rx_prod = 0;
1946
1947 }
1948
1949
1950 static int
qede_init_bd(qede_t * qede,qede_rx_ring_t * rx_ring)1951 qede_init_bd(qede_t *qede, qede_rx_ring_t *rx_ring)
1952 {
1953 struct eth_rx_bd *bd = NULL;
1954 int ret = DDI_SUCCESS;
1955 int i;
1956 qede_rx_buffer_t *rx_buffer;
1957 qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
1958 qede_rx_buf_list_t *active_buf_list = &rx_buf_area->active_buf_list;
1959
1960 for (i = 0; i < rx_ring->rx_buf_count; i++) {
1961 rx_buffer = &rx_buf_area->rx_buf_pool[i];
1962 active_buf_list->buf_list[i] = rx_buffer;
1963 active_buf_list->num_entries++;
1964 bd = ecore_chain_produce(&rx_ring->rx_bd_ring);
1965 if (bd == NULL) {
1966 qede_print_err("!%s(%d): invalid NULL bd in "
1967 "rx_bd_ring", __func__, qede->instance);
1968 ret = DDI_FAILURE;
1969 goto err;
1970 }
1971
1972 bd->addr.lo = HOST_TO_LE_32(U64_LO(
1973 rx_buffer->dma_info.phys_addr));
1974 bd->addr.hi = HOST_TO_LE_32(U64_HI(
1975 rx_buffer->dma_info.phys_addr));
1976
1977 }
1978 active_buf_list->tail = 0;
1979 err:
1980 return (ret);
1981 }
1982
1983
1984 qede_rx_buffer_t *
qede_get_from_active_list(qede_rx_ring_t * rx_ring,uint32_t * num_entries)1985 qede_get_from_active_list(qede_rx_ring_t *rx_ring,
1986 uint32_t *num_entries)
1987 {
1988 qede_rx_buffer_t *rx_buffer;
1989 qede_rx_buf_list_t *active_buf_list =
1990 &rx_ring->rx_buf_area->active_buf_list;
1991 u16 head = active_buf_list->head;
1992
1993 rx_buffer = active_buf_list->buf_list[head];
1994 active_buf_list->buf_list[head] = NULL;
1995 head = (head + 1) & RX_RING_MASK;
1996
1997 if (rx_buffer) {
1998 atomic_dec_32(&active_buf_list->num_entries);
1999 atomic_inc_32(&rx_ring->rx_buf_area->buf_upstream);
2000 atomic_inc_32(&rx_buffer->ref_cnt);
2001 rx_buffer->buf_state = RX_BUF_STATE_WITH_OS;
2002
2003 if (rx_buffer->mp == NULL) {
2004 rx_buffer->mp =
2005 desballoc(rx_buffer->dma_info.virt_addr,
2006 rx_ring->rx_buf_size, 0, &rx_buffer->recycle);
2007 }
2008 }
2009
2010 *num_entries = active_buf_list->num_entries;
2011 active_buf_list->head = head;
2012
2013 return (rx_buffer);
2014 }
2015
2016 qede_rx_buffer_t *
qede_get_from_passive_list(qede_rx_ring_t * rx_ring)2017 qede_get_from_passive_list(qede_rx_ring_t *rx_ring)
2018 {
2019 qede_rx_buf_list_t *passive_buf_list =
2020 &rx_ring->rx_buf_area->passive_buf_list;
2021 qede_rx_buffer_t *rx_buffer;
2022 u32 head;
2023
2024 mutex_enter(&passive_buf_list->lock);
2025 head = passive_buf_list->head;
2026 if (passive_buf_list->buf_list[head] == NULL) {
2027 mutex_exit(&passive_buf_list->lock);
2028 return (NULL);
2029 }
2030
2031 rx_buffer = passive_buf_list->buf_list[head];
2032 passive_buf_list->buf_list[head] = NULL;
2033
2034 passive_buf_list->head = (passive_buf_list->head + 1) & RX_RING_MASK;
2035 mutex_exit(&passive_buf_list->lock);
2036
2037 atomic_dec_32(&passive_buf_list->num_entries);
2038
2039 return (rx_buffer);
2040 }
2041
2042 void
qede_put_to_active_list(qede_rx_ring_t * rx_ring,qede_rx_buffer_t * rx_buffer)2043 qede_put_to_active_list(qede_rx_ring_t *rx_ring, qede_rx_buffer_t *rx_buffer)
2044 {
2045 qede_rx_buf_list_t *active_buf_list =
2046 &rx_ring->rx_buf_area->active_buf_list;
2047 u16 tail = active_buf_list->tail;
2048
2049 active_buf_list->buf_list[tail] = rx_buffer;
2050 tail = (tail + 1) & RX_RING_MASK;
2051
2052 active_buf_list->tail = tail;
2053 atomic_inc_32(&active_buf_list->num_entries);
2054 }
2055
2056 void
qede_replenish_rx_buffers(qede_rx_ring_t * rx_ring)2057 qede_replenish_rx_buffers(qede_rx_ring_t *rx_ring)
2058 {
2059 qede_rx_buffer_t *rx_buffer;
2060 int count = 0;
2061 struct eth_rx_bd *bd;
2062
2063 /*
2064 * Only replenish when we have at least
2065 * 1/4th of the ring to do. We don't want
2066 * to incur many lock contentions and
2067 * cycles for just a few buffers.
2068 * We don't bother with the passive area lock
2069 * here because we're just getting an
2070 * estimate. Also, we only pull from
2071 * the passive list in this function.
2072 */
2073
2074 /*
2075 * Use a replenish lock because we can do the
2076 * replenish operation at the end of
2077 * processing the rx_ring, but also when
2078 * we get buffers back from the upper
2079 * layers.
2080 */
2081 if (mutex_tryenter(&rx_ring->rx_replen_lock) == 0) {
2082 qede_info(rx_ring->qede, "!%s(%d): Failed to take"
2083 " replenish_lock",
2084 __func__, rx_ring->qede->instance);
2085 return;
2086 }
2087
2088 rx_buffer = qede_get_from_passive_list(rx_ring);
2089
2090 while (rx_buffer != NULL) {
2091 bd = ecore_chain_produce(&rx_ring->rx_bd_ring);
2092 if (bd == NULL) {
2093 qede_info(rx_ring->qede, "!%s(%d): bd = null",
2094 __func__, rx_ring->qede->instance);
2095 qede_put_to_passive_list(rx_ring, rx_buffer);
2096 break;
2097 }
2098
2099 bd->addr.lo = HOST_TO_LE_32(U64_LO(
2100 rx_buffer->dma_info.phys_addr));
2101 bd->addr.hi = HOST_TO_LE_32(
2102 U64_HI(rx_buffer->dma_info.phys_addr));
2103
2104 /*
2105 * Put the buffer in active list since it will be
2106 * posted to fw now
2107 */
2108 qede_put_to_active_list(rx_ring, rx_buffer);
2109 rx_buffer->buf_state = RX_BUF_STATE_WITH_FW;
2110 count++;
2111 rx_buffer = qede_get_from_passive_list(rx_ring);
2112 }
2113 mutex_exit(&rx_ring->rx_replen_lock);
2114 }
2115
2116 /*
2117 * Put the rx_buffer to the passive_buf_list
2118 */
2119 int
qede_put_to_passive_list(qede_rx_ring_t * rx_ring,qede_rx_buffer_t * rx_buffer)2120 qede_put_to_passive_list(qede_rx_ring_t *rx_ring, qede_rx_buffer_t *rx_buffer)
2121 {
2122 qede_rx_buf_list_t *passive_buf_list =
2123 &rx_ring->rx_buf_area->passive_buf_list;
2124 qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
2125 int tail = 0;
2126
2127 mutex_enter(&passive_buf_list->lock);
2128
2129 tail = passive_buf_list->tail;
2130 passive_buf_list->tail = (passive_buf_list->tail + 1) & RX_RING_MASK;
2131
2132 rx_buf_area->passive_buf_list.buf_list[tail] = rx_buffer;
2133 atomic_inc_32(&passive_buf_list->num_entries);
2134
2135 if (passive_buf_list->num_entries > rx_ring->rx_buf_count) {
2136 /* Sanity check */
2137 qede_info(rx_ring->qede, "ERROR: num_entries (%d)"
2138 " > max count (%d)",
2139 passive_buf_list->num_entries,
2140 rx_ring->rx_buf_count);
2141 }
2142 mutex_exit(&passive_buf_list->lock);
2143 return (passive_buf_list->num_entries);
2144 }
2145
2146 void
qede_recycle_rx_buffer(char * arg)2147 qede_recycle_rx_buffer(char *arg)
2148 {
2149 /* LINTED E_BAD_PTR_CAST_ALIGN */
2150 qede_rx_buffer_t *rx_buffer = (qede_rx_buffer_t *)arg;
2151 qede_rx_ring_t *rx_ring = rx_buffer->rx_ring;
2152 qede_rx_buf_area_t *rx_buf_area = rx_buffer->rx_buf_area;
2153 qede_t *qede = rx_ring->qede;
2154 u32 buf_upstream = 0, ref_cnt;
2155 u32 num_entries;
2156
2157 if (rx_buffer->ref_cnt == 0) {
2158 return;
2159 }
2160
2161 /*
2162 * Since the data buffer associated with the mblk is free'ed
2163 * by upper layer, allocate it again to contain proper
2164 * free_func pointer
2165 */
2166 rx_buffer->mp = desballoc(rx_buffer->dma_info.virt_addr,
2167 rx_ring->rx_buf_size, 0, &rx_buffer->recycle);
2168
2169 ref_cnt = atomic_dec_32_nv(&rx_buffer->ref_cnt);
2170 if (ref_cnt == 1) {
2171 /* Put the buffer into passive_buf_list to be reused */
2172 num_entries = qede_put_to_passive_list(rx_ring, rx_buffer);
2173 if(num_entries >= 32) {
2174 if(mutex_tryenter(&rx_ring->rx_lock) != 0) {
2175 qede_replenish_rx_buffers(rx_ring);
2176 qede_update_rx_q_producer(rx_ring);
2177 mutex_exit(&rx_ring->rx_lock);
2178 }
2179 }
2180 } else if (ref_cnt == 0) {
2181 /*
2182 * This is a buffer from a previous load instance of
2183 * rx_buf_area. Free the rx_buffer and if no more
2184 * buffers are upstream from this rx_buf_area instance
2185 * then free the rx_buf_area;
2186 */
2187 if (rx_buffer->mp != NULL) {
2188 freemsg(rx_buffer->mp);
2189 rx_buffer->mp = NULL;
2190 }
2191 mutex_enter(&qede->drv_lock);
2192
2193 buf_upstream = atomic_cas_32(&rx_buf_area->buf_upstream, 1, 1);
2194 if (buf_upstream >= 1) {
2195 atomic_dec_32(&rx_buf_area->buf_upstream);
2196 }
2197 if (rx_buf_area->inactive && (rx_buf_area->buf_upstream == 0)) {
2198 qede_free_rx_buffers_legacy(qede, rx_buf_area);
2199 }
2200
2201 mutex_exit(&qede->drv_lock);
2202 } else {
2203 /* Sanity check */
2204 qede_info(rx_ring->qede, "rx_buffer %p"
2205 " ref_cnt %d is invalid",
2206 rx_buffer, ref_cnt);
2207 }
2208 }
2209
2210 void
qede_recycle_copied_rx_buffer(qede_rx_buffer_t * rx_buffer)2211 qede_recycle_copied_rx_buffer(qede_rx_buffer_t *rx_buffer)
2212 {
2213 qede_rx_ring_t *rx_ring = rx_buffer->rx_ring;
2214 qede_rx_buf_area_t *rx_buf_area = rx_buffer->rx_buf_area;
2215 qede_t *qede = rx_ring->qede;
2216 u32 buf_upstream = 0, ref_cnt;
2217
2218 if (rx_buffer->ref_cnt == 0) {
2219 /*
2220 * Can happen if the buffer is being free'd
2221 * in the stop routine
2222 */
2223 qede_info(qede, "!%s(%d): rx_buffer->ref_cnt = 0",
2224 __func__, qede->instance);
2225 return;
2226 }
2227
2228 buf_upstream = atomic_cas_32(&rx_buf_area->buf_upstream, 1, 1);
2229 if (buf_upstream >= 1) {
2230 atomic_dec_32(&rx_buf_area->buf_upstream);
2231 }
2232
2233 /*
2234 * Since the data buffer associated with the mblk is free'ed
2235 * by upper layer, allocate it again to contain proper
2236 * free_func pointer
2237 * Though we could also be recycling a buffer that got copied,
2238 * so in that case the mp would still be intact.
2239 */
2240
2241 ref_cnt = atomic_dec_32_nv(&rx_buffer->ref_cnt);
2242 if (ref_cnt == 1) {
2243 qede_put_to_passive_list(rx_ring, rx_buffer);
2244 /* Put the buffer into passive_buf_list to be reused */
2245 } else if (ref_cnt == 0) {
2246 /*
2247 * This is a buffer from a previous load instance of
2248 * rx_buf_area. Free the rx_buffer and if no more
2249 * buffers are upstream from this rx_buf_area instance
2250 * then free the rx_buf_area;
2251 */
2252 qede_info(rx_ring->qede, "Free up rx_buffer %p, index %d"
2253 " ref_cnt %d from a previous driver iteration",
2254 rx_buffer, rx_buffer->index, ref_cnt);
2255 if (rx_buffer->mp != NULL) {
2256 freemsg(rx_buffer->mp);
2257 rx_buffer->mp = NULL;
2258 }
2259
2260 if (rx_buf_area->inactive && (rx_buf_area->buf_upstream == 0)) {
2261 mutex_enter(&qede->drv_lock);
2262 qede_free_rx_buffers_legacy(qede, rx_buf_area);
2263 mutex_exit(&qede->drv_lock);
2264 }
2265 } else {
2266 /* Sanity check */
2267 qede_info(rx_ring->qede, "rx_buffer %p"
2268 " ref_cnt %d is invalid",
2269 rx_buffer, ref_cnt);
2270 }
2271 }
2272
2273
2274 static int
qede_alloc_rx_buffers(qede_t * qede,qede_rx_ring_t * rx_ring)2275 qede_alloc_rx_buffers(qede_t *qede, qede_rx_ring_t *rx_ring)
2276 {
2277 int ret = DDI_SUCCESS, i, j;
2278 qede_rx_buffer_t *rx_buffer;
2279 qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
2280 u32 bufs_per_page, buf_size;
2281 int page_size = (int)ddi_ptob(qede->dip, 1);
2282 qede_dma_info_t *dma_info;
2283 ddi_dma_cookie_t temp_cookie;
2284 int allocated = 0;
2285 u64 dma_addr;
2286 u8 *vaddr;
2287 ddi_dma_handle_t dma_handle;
2288 ddi_acc_handle_t acc_handle;
2289
2290 if (rx_ring->rx_buf_size > page_size) {
2291 bufs_per_page = 1;
2292 buf_size = rx_ring->rx_buf_size;
2293 } else {
2294 bufs_per_page =
2295 (page_size) / DEFAULT_RX_BUF_SIZE;
2296 buf_size = page_size;
2297 }
2298
2299 rx_buffer = &rx_buf_area->rx_buf_pool[0];
2300 rx_buf_area->bufs_per_page = bufs_per_page;
2301
2302 mutex_init(&rx_buf_area->active_buf_list.lock, NULL,
2303 MUTEX_DRIVER, 0);
2304 mutex_init(&rx_buf_area->passive_buf_list.lock, NULL,
2305 MUTEX_DRIVER, 0);
2306
2307 for (i = 0; i < rx_ring->rx_buf_count; i += bufs_per_page) {
2308 dma_info = &rx_buffer->dma_info;
2309
2310 ret = qede_dma_mem_alloc(qede,
2311 buf_size,
2312 DDI_DMA_READ | DDI_DMA_STREAMING | DDI_DMA_CONSISTENT,
2313 (caddr_t *)&dma_info->virt_addr,
2314 &temp_cookie,
2315 &dma_info->dma_handle,
2316 &dma_info->acc_handle,
2317 &qede_dma_attr_rxbuf,
2318 &qede_buf_acc_attr);
2319 if (ret != DDI_SUCCESS) {
2320 goto err;
2321 }
2322
2323 allocated++;
2324 vaddr = dma_info->virt_addr;
2325 dma_addr = temp_cookie.dmac_laddress;
2326 dma_handle = dma_info->dma_handle;
2327 acc_handle = dma_info->acc_handle;
2328
2329 for (j = 0; j < bufs_per_page; j++) {
2330 dma_info = &rx_buffer->dma_info;
2331 dma_info->virt_addr = vaddr;
2332 dma_info->phys_addr = dma_addr;
2333 dma_info->dma_handle = dma_handle;
2334 dma_info->acc_handle = acc_handle;
2335 dma_info->offset = j * rx_ring->rx_buf_size;
2336 /* Populate the recycle func and arg for the buffer */
2337 rx_buffer->recycle.free_func = qede_recycle_rx_buffer;
2338 rx_buffer->recycle.free_arg = (caddr_t)rx_buffer;
2339
2340 rx_buffer->mp = desballoc(dma_info->virt_addr,
2341 rx_ring->rx_buf_size, 0,
2342 &rx_buffer->recycle);
2343 if (rx_buffer->mp == NULL) {
2344 qede_warn(qede, "desballoc() failed, index %d",
2345 i);
2346 }
2347 rx_buffer->rx_ring = rx_ring;
2348 rx_buffer->rx_buf_area = rx_buf_area;
2349 rx_buffer->index = i + j;
2350 rx_buffer->ref_cnt = 1;
2351 rx_buffer++;
2352
2353 vaddr += rx_ring->rx_buf_size;
2354 dma_addr += rx_ring->rx_buf_size;
2355 }
2356 rx_ring->sw_rx_prod++;
2357 }
2358
2359 /*
2360 * Fill the rx_bd_ring with the allocated
2361 * buffers
2362 */
2363 ret = qede_init_bd(qede, rx_ring);
2364 if (ret != DDI_SUCCESS) {
2365 goto err;
2366 }
2367
2368 rx_buf_area->buf_upstream = 0;
2369
2370 return (ret);
2371 err:
2372 qede_free_rx_buffers(qede, rx_ring);
2373 return (ret);
2374 }
2375
2376 static int
qede_alloc_rx_ring_phys(qede_t * qede,qede_fastpath_t * fp)2377 qede_alloc_rx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
2378 {
2379 qede_rx_ring_t *rx_ring;
2380 qede_rx_buf_area_t *rx_buf_area;
2381 size_t size;
2382
2383 ASSERT(qede != NULL);
2384 ASSERT(fp != NULL);
2385
2386 rx_ring = fp->rx_ring;
2387
2388 atomic_inc_32(&qede->detach_unsafe);
2389 /*
2390 * Allocate rx_buf_area for the plumb instance
2391 */
2392 rx_buf_area = kmem_zalloc(sizeof (*rx_buf_area), KM_SLEEP);
2393 if (rx_buf_area == NULL) {
2394 qede_info(qede, "!%s(%d): Cannot alloc rx_buf_area",
2395 __func__, qede->instance);
2396 return (DDI_FAILURE);
2397 }
2398
2399 rx_buf_area->inactive = 0;
2400 rx_buf_area->rx_ring = rx_ring;
2401 rx_ring->rx_buf_area = rx_buf_area;
2402 /* Rx Buffer descriptor queue */
2403 if (ecore_chain_alloc(&qede->edev,
2404 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2405 ECORE_CHAIN_MODE_NEXT_PTR,
2406 ECORE_CHAIN_CNT_TYPE_U16,
2407 qede->rx_ring_size,
2408 sizeof (struct eth_rx_bd),
2409 &rx_ring->rx_bd_ring,
2410 NULL) != ECORE_SUCCESS) {
2411 cmn_err(CE_WARN, "Failed to allocate "
2412 "ecore cqe chain");
2413 return (DDI_FAILURE);
2414 }
2415
2416 /* Rx Completion Descriptor queue */
2417 if (ecore_chain_alloc(&qede->edev,
2418 ECORE_CHAIN_USE_TO_CONSUME,
2419 ECORE_CHAIN_MODE_PBL,
2420 ECORE_CHAIN_CNT_TYPE_U16,
2421 qede->rx_ring_size,
2422 sizeof (union eth_rx_cqe),
2423 &rx_ring->rx_cqe_ring,
2424 NULL) != ECORE_SUCCESS) {
2425 cmn_err(CE_WARN, "Failed to allocate "
2426 "ecore bd chain");
2427 return (DDI_FAILURE);
2428 }
2429
2430 /* Rx Data buffers */
2431 if (qede_alloc_rx_buffers(qede, rx_ring) != DDI_SUCCESS) {
2432 qede_print_err("!%s(%d): Failed to alloc rx buffers",
2433 __func__, qede->instance);
2434 return (DDI_FAILURE);
2435 }
2436 return (DDI_SUCCESS);
2437 }
2438
2439 static void
qede_free_tx_bd_ring(qede_t * qede,qede_fastpath_t * fp)2440 qede_free_tx_bd_ring(qede_t *qede, qede_fastpath_t *fp)
2441 {
2442 int i;
2443 qede_tx_ring_t *tx_ring;
2444
2445 ASSERT(qede != NULL);
2446 ASSERT(fp != NULL);
2447
2448 for (i = 0; i < qede->num_tc; i++) {
2449 tx_ring = fp->tx_ring[i];
2450
2451 if (tx_ring->tx_bd_ring.p_virt_addr) {
2452 ecore_chain_free(&qede->edev, &tx_ring->tx_bd_ring);
2453 tx_ring->tx_bd_ring.p_virt_addr = NULL;
2454 }
2455 tx_ring->hw_cons_ptr = NULL;
2456 tx_ring->sw_tx_cons = 0;
2457 tx_ring->sw_tx_prod = 0;
2458
2459 }
2460 }
2461
2462 static u32
qede_alloc_tx_bd_ring(qede_t * qede,qede_tx_ring_t * tx_ring)2463 qede_alloc_tx_bd_ring(qede_t *qede, qede_tx_ring_t *tx_ring)
2464 {
2465 u32 ret = 0;
2466
2467 ret = ecore_chain_alloc(&qede->edev,
2468 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2469 ECORE_CHAIN_MODE_PBL,
2470 ECORE_CHAIN_CNT_TYPE_U16,
2471 tx_ring->bd_ring_size,
2472 sizeof (union eth_tx_bd_types),
2473 &tx_ring->tx_bd_ring,
2474 NULL);
2475 if (ret) {
2476 cmn_err(CE_WARN, "!%s(%d): Failed to alloc tx bd chain",
2477 __func__, qede->instance);
2478 goto error;
2479 }
2480
2481
2482 error:
2483 return (ret);
2484 }
2485
2486 static void
qede_free_tx_bcopy_buffers(qede_tx_ring_t * tx_ring)2487 qede_free_tx_bcopy_buffers(qede_tx_ring_t *tx_ring)
2488 {
2489 qede_tx_bcopy_pkt_t *bcopy_pkt;
2490 int i;
2491
2492 for (i = 0; i < tx_ring->tx_ring_size; i++) {
2493 bcopy_pkt = &tx_ring->bcopy_list.bcopy_pool[i];
2494 if(bcopy_pkt->dma_handle != NULL)
2495 (void) ddi_dma_unbind_handle(bcopy_pkt->dma_handle);
2496 if(bcopy_pkt->acc_handle != NULL) {
2497 ddi_dma_mem_free(&bcopy_pkt->acc_handle);
2498 bcopy_pkt->acc_handle = NULL;
2499 }
2500 if(bcopy_pkt->dma_handle != NULL) {
2501 ddi_dma_free_handle(&bcopy_pkt->dma_handle);
2502 bcopy_pkt->dma_handle = NULL;
2503 }
2504 if (bcopy_pkt) {
2505 if (bcopy_pkt->mp) {
2506 freemsg(bcopy_pkt->mp);
2507 }
2508 }
2509 }
2510
2511 if (tx_ring->bcopy_list.bcopy_pool != NULL) {
2512 kmem_free(tx_ring->bcopy_list.bcopy_pool,
2513 tx_ring->bcopy_list.size);
2514 tx_ring->bcopy_list.bcopy_pool = NULL;
2515 }
2516
2517 mutex_destroy(&tx_ring->bcopy_list.lock);
2518 }
2519
2520 static u32
qede_alloc_tx_bcopy_buffers(qede_t * qede,qede_tx_ring_t * tx_ring)2521 qede_alloc_tx_bcopy_buffers(qede_t *qede, qede_tx_ring_t *tx_ring)
2522 {
2523 u32 ret = DDI_SUCCESS;
2524 int page_size = (int)ddi_ptob(qede->dip, 1);
2525 size_t size;
2526 qede_tx_bcopy_pkt_t *bcopy_pkt, *bcopy_list;
2527 int i;
2528 qede_dma_info_t dma_info;
2529 ddi_dma_cookie_t temp_cookie;
2530
2531 /*
2532 * If the tx_buffers size if less than the page size
2533 * then try to use multiple copy buffers inside the
2534 * same page. Otherwise use the whole page (or more)
2535 * for the copy buffers
2536 */
2537 if (qede->tx_buf_size > page_size) {
2538 size = qede->tx_buf_size;
2539 } else {
2540 size = page_size;
2541 }
2542
2543 size = sizeof (qede_tx_bcopy_pkt_t) * qede->tx_ring_size;
2544 bcopy_list = kmem_zalloc(size, KM_SLEEP);
2545 if (bcopy_list == NULL) {
2546 qede_warn(qede, "!%s(%d): Failed to allocate bcopy_list",
2547 __func__, qede->instance);
2548 ret = DDI_FAILURE;
2549 goto exit;
2550 }
2551
2552 tx_ring->bcopy_list.size = size;
2553 tx_ring->bcopy_list.bcopy_pool = bcopy_list;
2554 bcopy_pkt = bcopy_list;
2555
2556 tx_ring->bcopy_list.head = 0;
2557 tx_ring->bcopy_list.tail = 0;
2558 mutex_init(&tx_ring->bcopy_list.lock, NULL, MUTEX_DRIVER, 0);
2559
2560 for (i = 0; i < qede->tx_ring_size; i++) {
2561
2562 ret = qede_dma_mem_alloc(qede,
2563 qede->tx_buf_size,
2564 DDI_DMA_READ | DDI_DMA_STREAMING | DDI_DMA_CONSISTENT,
2565 (caddr_t *)&dma_info.virt_addr,
2566 &temp_cookie,
2567 &dma_info.dma_handle,
2568 &dma_info.acc_handle,
2569 &qede_dma_attr_txbuf,
2570 &qede_buf_acc_attr);
2571 if(ret) {
2572 ret = DDI_FAILURE;
2573 goto exit;
2574 }
2575
2576
2577 bcopy_pkt->virt_addr = dma_info.virt_addr;
2578 bcopy_pkt->phys_addr = temp_cookie.dmac_laddress;
2579 bcopy_pkt->dma_handle = dma_info.dma_handle;
2580 bcopy_pkt->acc_handle = dma_info.acc_handle;
2581
2582 tx_ring->bcopy_list.free_list[i] = bcopy_pkt;
2583 bcopy_pkt++;
2584 }
2585
2586 exit:
2587 return (ret);
2588 }
2589
2590 static void
qede_free_tx_dma_handles(qede_t * qede,qede_tx_ring_t * tx_ring)2591 qede_free_tx_dma_handles(qede_t *qede, qede_tx_ring_t *tx_ring)
2592 {
2593 qede_dma_handle_entry_t *dmah_entry;
2594 int i;
2595
2596 for (i = 0; i < tx_ring->tx_ring_size; i++) {
2597 dmah_entry = &tx_ring->dmah_list.dmah_pool[i];
2598 if (dmah_entry) {
2599 if (dmah_entry->dma_handle != NULL) {
2600 ddi_dma_free_handle(&dmah_entry->dma_handle);
2601 dmah_entry->dma_handle = NULL;
2602 } else {
2603 qede_info(qede, "dmah_entry %p, handle is NULL",
2604 dmah_entry);
2605 }
2606 }
2607 }
2608
2609 if (tx_ring->dmah_list.dmah_pool != NULL) {
2610 kmem_free(tx_ring->dmah_list.dmah_pool,
2611 tx_ring->dmah_list.size);
2612 tx_ring->dmah_list.dmah_pool = NULL;
2613 }
2614
2615 mutex_destroy(&tx_ring->dmah_list.lock);
2616 }
2617
2618 static u32
qede_alloc_tx_dma_handles(qede_t * qede,qede_tx_ring_t * tx_ring)2619 qede_alloc_tx_dma_handles(qede_t *qede, qede_tx_ring_t *tx_ring)
2620 {
2621 int i;
2622 size_t size;
2623 u32 ret = DDI_SUCCESS;
2624 qede_dma_handle_entry_t *dmah_entry, *dmah_list;
2625
2626 size = sizeof (qede_dma_handle_entry_t) * qede->tx_ring_size;
2627 dmah_list = kmem_zalloc(size, KM_SLEEP);
2628 if (dmah_list == NULL) {
2629 qede_warn(qede, "!%s(%d): Failed to allocated dmah_list",
2630 __func__, qede->instance);
2631 /* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2632 ret = DDI_FAILURE;
2633 goto exit;
2634 }
2635
2636 tx_ring->dmah_list.size = size;
2637 tx_ring->dmah_list.dmah_pool = dmah_list;
2638 dmah_entry = dmah_list;
2639
2640 tx_ring->dmah_list.head = 0;
2641 tx_ring->dmah_list.tail = 0;
2642 mutex_init(&tx_ring->dmah_list.lock, NULL, MUTEX_DRIVER, 0);
2643
2644 /*
2645 *
2646 */
2647 for (i = 0; i < qede->tx_ring_size; i++) {
2648 ret = ddi_dma_alloc_handle(qede->dip,
2649 &qede_tx_buf_dma_attr,
2650 DDI_DMA_DONTWAIT,
2651 NULL,
2652 &dmah_entry->dma_handle);
2653 if (ret != DDI_SUCCESS) {
2654 qede_print_err("!%s(%d): dma alloc handle failed "
2655 "for index %d",
2656 __func__, qede->instance, i);
2657 /* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2658 ret = DDI_FAILURE;
2659 goto exit;
2660 }
2661
2662 tx_ring->dmah_list.free_list[i] = dmah_entry;
2663 dmah_entry++;
2664 }
2665 exit:
2666 return (ret);
2667 }
2668
2669 static u32
qede_alloc_tx_ring_phys(qede_t * qede,qede_fastpath_t * fp)2670 qede_alloc_tx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
2671 {
2672 int i;
2673 qede_tx_ring_t *tx_ring;
2674 u32 ret = DDI_SUCCESS;
2675 size_t size;
2676 qede_tx_recycle_list_t *recycle_list;
2677
2678 ASSERT(qede != NULL);
2679 ASSERT(fp != NULL);
2680
2681 for (i = 0; i < qede->num_tc; i++) {
2682 tx_ring = fp->tx_ring[i];
2683 tx_ring->bd_ring_size = qede->tx_ring_size;
2684
2685 /*
2686 * Allocate the buffer descriptor chain
2687 */
2688 ret = qede_alloc_tx_bd_ring(qede, tx_ring);
2689 if (ret) {
2690 cmn_err(CE_WARN, "!%s(%d): failed, %s",
2691 __func__, qede->instance, qede_get_ddi_fail(ret));
2692 return (ret);
2693 }
2694
2695 /*
2696 * Allocate copy mode buffers
2697 */
2698 ret = qede_alloc_tx_bcopy_buffers(qede, tx_ring);
2699 if (ret) {
2700 qede_print_err("!%s(%d): Failed to alloc tx copy "
2701 "buffers", __func__, qede->instance);
2702 /* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2703 ret = DDI_FAILURE;
2704 goto exit;
2705 }
2706
2707 /*
2708 * Allocate dma handles for mapped mode
2709 */
2710 ret = qede_alloc_tx_dma_handles(qede, tx_ring);
2711 if (ret) {
2712 qede_print_err("!%s(%d): Failed to alloc tx dma "
2713 "handles", __func__, qede->instance);
2714 /* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2715 ret = DDI_FAILURE;
2716 goto exit;
2717 }
2718
2719 /* Allocate tx_recycle list */
2720 size = sizeof (qede_tx_recycle_list_t) * qede->tx_ring_size;
2721 recycle_list = kmem_zalloc(size, KM_SLEEP);
2722 if (recycle_list == NULL) {
2723 qede_warn(qede, "!%s(%d): Failed to allocate"
2724 " tx_recycle_list", __func__, qede->instance);
2725 /* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2726 ret = DDI_FAILURE;
2727 goto exit;
2728 }
2729
2730 tx_ring->tx_recycle_list = recycle_list;
2731 }
2732 exit:
2733 return (ret);
2734 }
2735
2736 static void
2737 /* LINTED E_FUNC_ARG_UNUSED */
qede_free_sb_phys(qede_t * qede,qede_fastpath_t * fp)2738 qede_free_sb_phys(qede_t *qede, qede_fastpath_t *fp)
2739 {
2740 qede_pci_free_consistent(&fp->sb_dma_handle, &fp->sb_acc_handle);
2741 fp->sb_virt = NULL;
2742 fp->sb_phys = 0;
2743 }
2744
2745 static int
qede_alloc_sb_phys(qede_t * qede,qede_fastpath_t * fp)2746 qede_alloc_sb_phys(qede_t *qede, qede_fastpath_t *fp)
2747 {
2748 int status;
2749 int sb_id;
2750 struct ecore_dev *edev = &qede->edev;
2751 struct ecore_hwfn *p_hwfn;
2752 qede_vector_info_t *vect_info = fp->vect_info;
2753 ddi_dma_cookie_t sb_cookie;
2754
2755 ASSERT(qede != NULL);
2756 ASSERT(fp != NULL);
2757
2758 /*
2759 * In the case of multiple hardware engines,
2760 * interrupts are spread across all of them.
2761 * In the case of only one engine, all
2762 * interrupts are handled by that engine.
2763 * In the case of 2 engines, each has half
2764 * of the interrupts.
2765 */
2766 sb_id = vect_info->vect_index;
2767 p_hwfn = &edev->hwfns[sb_id % qede->num_hwfns];
2768
2769 /* Allocate dma mem. for status_block */
2770 status = qede_dma_mem_alloc(qede,
2771 sizeof (struct status_block),
2772 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
2773 (caddr_t *)&fp->sb_virt,
2774 &sb_cookie,
2775 &fp->sb_dma_handle,
2776 &fp->sb_acc_handle,
2777 &qede_desc_dma_attr,
2778 &qede_desc_acc_attr);
2779
2780 if (status != DDI_SUCCESS) {
2781 qede_info(qede, "Failed to allocate status_block dma mem");
2782 return (status);
2783 }
2784
2785 fp->sb_phys = sb_cookie.dmac_laddress;
2786
2787
2788 status = ecore_int_sb_init(p_hwfn,
2789 p_hwfn->p_main_ptt,
2790 fp->sb_info,
2791 (void *)fp->sb_virt,
2792 fp->sb_phys,
2793 fp->fp_index);
2794 if (status != ECORE_SUCCESS) {
2795 cmn_err(CE_WARN, "Failed ecore_int_sb_init");
2796 return (DDI_FAILURE);
2797 }
2798
2799 return (status);
2800 }
2801
2802 static void
qede_free_tx_ring_phys(qede_t * qede,qede_fastpath_t * fp)2803 qede_free_tx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
2804 {
2805 qede_tx_ring_t *tx_ring;
2806 int i;
2807
2808 for (i = 0; i < qede->num_tc; i++) {
2809 tx_ring = fp->tx_ring[i];
2810 qede_free_tx_dma_handles(qede, tx_ring);
2811 qede_free_tx_bcopy_buffers(tx_ring);
2812 qede_free_tx_bd_ring(qede, fp);
2813
2814 if (tx_ring->tx_recycle_list) {
2815 kmem_free(tx_ring->tx_recycle_list,
2816 sizeof (qede_tx_recycle_list_t)
2817 * qede->tx_ring_size);
2818 }
2819 }
2820 }
2821
2822 static void
qede_fastpath_free_phys_mem(qede_t * qede)2823 qede_fastpath_free_phys_mem(qede_t *qede)
2824 {
2825 int i;
2826 qede_fastpath_t *fp;
2827
2828 for (i = 0; i < qede->num_fp; i++) {
2829 fp = &qede->fp_array[i];
2830
2831 qede_free_rx_ring_phys(qede, fp);
2832 qede_free_tx_ring_phys(qede, fp);
2833 qede_free_sb_phys(qede, fp);
2834 }
2835 }
2836
2837 /*
2838 * Save dma_handles associated with the fastpath elements
2839 * allocate by ecore for doing dma_sync in the fast_path
2840 */
2841 static int
qede_save_fp_dma_handles(qede_t * qede,qede_fastpath_t * fp)2842 qede_save_fp_dma_handles(qede_t *qede, qede_fastpath_t *fp)
2843 {
2844 int ret, i;
2845 qede_rx_ring_t *rx_ring;
2846 qede_tx_ring_t *tx_ring;
2847
2848 rx_ring = fp->rx_ring;
2849
2850 /* Rx bd ring dma_handle */
2851 ret = qede_osal_find_dma_handle_for_block(qede,
2852 (void *)rx_ring->rx_bd_ring.p_phys_addr,
2853 &rx_ring->rx_bd_dmah);
2854 if (ret != DDI_SUCCESS) {
2855 qede_print_err("!%s(%d): Cannot find dma_handle for "
2856 "rx_bd_ring, addr %p", __func__, qede->instance,
2857 rx_ring->rx_bd_ring.p_phys_addr);
2858 goto exit;
2859 }
2860
2861 /* rx cqe ring dma_handle */
2862 ret = qede_osal_find_dma_handle_for_block(qede,
2863 (void *)rx_ring->rx_cqe_ring.p_phys_addr,
2864 &rx_ring->rx_cqe_dmah);
2865 if (ret != DDI_SUCCESS) {
2866 qede_print_err("!%s(%d): Cannot find dma_handle for "
2867 "rx_cqe_ring, addr %p", __func__, qede->instance,
2868 rx_ring->rx_cqe_ring.p_phys_addr);
2869 goto exit;
2870 }
2871 /* rx cqe ring pbl */
2872 ret = qede_osal_find_dma_handle_for_block(qede,
2873 (void *)rx_ring->rx_cqe_ring.pbl_sp.p_phys_table,
2874 &rx_ring->rx_cqe_pbl_dmah);
2875 if (ret) {
2876 qede_print_err("!%s(%d): Cannot find dma_handle for "
2877 "rx_cqe pbl, addr %p", __func__, qede->instance,
2878 rx_ring->rx_cqe_ring.pbl_sp.p_phys_table);
2879 goto exit;
2880 }
2881
2882 /* tx_bd ring dma_handle(s) */
2883 for (i = 0; i < qede->num_tc; i++) {
2884 tx_ring = fp->tx_ring[i];
2885
2886 ret = qede_osal_find_dma_handle_for_block(qede,
2887 (void *)tx_ring->tx_bd_ring.p_phys_addr,
2888 &tx_ring->tx_bd_dmah);
2889 if (ret != DDI_SUCCESS) {
2890 qede_print_err("!%s(%d): Cannot find dma_handle "
2891 "for tx_bd_ring, addr %p", __func__,
2892 qede->instance,
2893 tx_ring->tx_bd_ring.p_phys_addr);
2894 goto exit;
2895 }
2896
2897 ret = qede_osal_find_dma_handle_for_block(qede,
2898 (void *)tx_ring->tx_bd_ring.pbl_sp.p_phys_table,
2899 &tx_ring->tx_pbl_dmah);
2900 if (ret) {
2901 qede_print_err("!%s(%d): Cannot find dma_handle for "
2902 "tx_bd pbl, addr %p", __func__, qede->instance,
2903 tx_ring->tx_bd_ring.pbl_sp.p_phys_table);
2904 goto exit;
2905 }
2906 }
2907
2908 exit:
2909 return (ret);
2910 }
2911
2912 int
qede_fastpath_alloc_phys_mem(qede_t * qede)2913 qede_fastpath_alloc_phys_mem(qede_t *qede)
2914 {
2915 int status = 0, i;
2916 qede_fastpath_t *fp;
2917
2918 for (i = 0; i < qede->num_fp; i++) {
2919 fp = &qede->fp_array[i];
2920
2921 status = qede_alloc_sb_phys(qede, fp);
2922 if (status != DDI_SUCCESS) {
2923 goto err;
2924 }
2925
2926 status = qede_alloc_rx_ring_phys(qede, fp);
2927 if (status != DDI_SUCCESS) {
2928 goto err;
2929 }
2930
2931 status = qede_alloc_tx_ring_phys(qede, fp);
2932 if (status != DDI_SUCCESS) {
2933 goto err;
2934 }
2935 status = qede_save_fp_dma_handles(qede, fp);
2936 if (status != DDI_SUCCESS) {
2937 goto err;
2938 }
2939 }
2940 return (status);
2941 err:
2942 qede_fastpath_free_phys_mem(qede);
2943 return (status);
2944 }
2945
2946 static int
qede_fastpath_config(qede_t * qede)2947 qede_fastpath_config(qede_t *qede)
2948 {
2949 int i, j;
2950 qede_fastpath_t *fp;
2951 qede_rx_ring_t *rx_ring;
2952 qede_tx_ring_t *tx_ring;
2953 qede_vector_info_t *vect_info;
2954 int num_fp, num_hwfns;
2955
2956 ASSERT(qede != NULL);
2957
2958 num_fp = qede->num_fp;
2959 num_hwfns = qede->num_hwfns;
2960
2961 vect_info = &qede->intr_ctx.intr_vect_info[num_hwfns];
2962 fp = &qede->fp_array[0];
2963 tx_ring = &qede->tx_array[0][0];
2964
2965 for (i = 0; i < num_fp; i++, fp++, vect_info++) {
2966 fp->sb_info = &qede->sb_array[i];
2967 fp->qede = qede;
2968 fp->fp_index = i;
2969 /*
2970 * With a single hwfn, all fp's hwfn index should be zero
2971 * for all fp entries. If there are two engines this
2972 * index should altenate between 0 and 1.
2973 */
2974 fp->fp_hw_eng_index = fp->fp_index % num_hwfns;
2975 fp->vport_id = 0;
2976 fp->stats_id = 0;
2977 fp->rss_id = fp->fp_index;
2978 fp->rx_queue_index = fp->fp_index;
2979 fp->vect_info = vect_info;
2980 /*
2981 * After vport update, interrupts will be
2982 * running, so we need to intialize our
2983 * enable/disable gate as such.
2984 */
2985 fp->disabled_by_poll = 0;
2986
2987 /* rx_ring setup */
2988 rx_ring = &qede->rx_array[i];
2989 fp->rx_ring = rx_ring;
2990 rx_ring->fp = fp;
2991 rx_ring->rx_buf_count = qede->rx_buf_count;
2992 rx_ring->rx_buf_size = qede->rx_buf_size;
2993 rx_ring->qede = qede;
2994 rx_ring->sw_rx_cons = 0;
2995 rx_ring->rx_copy_threshold = qede->rx_copy_threshold;
2996 rx_ring->rx_low_buffer_threshold =
2997 qede->rx_low_buffer_threshold;
2998 rx_ring->queue_started = B_FALSE;
2999
3000 /* tx_ring setup */
3001 for (j = 0; j < qede->num_tc; j++) {
3002 tx_ring = &qede->tx_array[j][i];
3003 fp->tx_ring[j] = tx_ring;
3004 tx_ring->qede = qede;
3005 tx_ring->fp = fp;
3006 tx_ring->fp_idx = i;
3007 tx_ring->tx_queue_index = i * qede->num_fp +
3008 fp->fp_index;
3009 tx_ring->tx_buf_size = qede->tx_buf_size;
3010 tx_ring->tx_ring_size = qede->tx_ring_size;
3011 tx_ring->queue_started = B_FALSE;
3012 #ifdef DBLK_DMA_PREMAP
3013 tx_ring->pm_handle = qede->pm_handle;
3014 #endif
3015
3016 tx_ring->doorbell_addr =
3017 qede->doorbell;
3018 tx_ring->doorbell_handle =
3019 qede->doorbell_handle;
3020 }
3021 }
3022
3023 return (DDI_SUCCESS);
3024 }
3025
3026 /*
3027 * op = 1, Initialize link
3028 * op = 0, Destroy link
3029 */
3030 int
qede_configure_link(qede_t * qede,bool op)3031 qede_configure_link(qede_t *qede, bool op)
3032 {
3033 struct ecore_dev *edev = &qede->edev;
3034 struct ecore_hwfn *hwfn;
3035 struct ecore_ptt *ptt = NULL;
3036 int i, ret = DDI_SUCCESS;
3037
3038 for_each_hwfn(edev, i) {
3039 hwfn = &edev->hwfns[i];
3040 qede_info(qede, "Configuring link for hwfn#%d", i);
3041
3042 ptt = ecore_ptt_acquire(hwfn);
3043 if (ptt == NULL) {
3044 qede_info(qede, "Cannot reserver ptt from ecore");
3045 ret = DDI_FAILURE;
3046 goto exit;
3047 }
3048
3049 ret = ecore_mcp_set_link(hwfn, ptt, op);
3050
3051 ecore_ptt_release(hwfn, ptt);
3052 if (ret) {
3053 /* if link config fails, make sure ptt is released */
3054 goto exit;
3055 }
3056 }
3057 exit:
3058 return (ret);
3059 }
3060
3061 /*
3062 * drv_lock must be held by the caller.
3063 */
3064 int
qede_stop(qede_t * qede)3065 qede_stop(qede_t *qede)
3066 {
3067 int status;
3068
3069 ASSERT(mutex_owned(&qede->drv_lock));
3070 qede->qede_state = QEDE_STATE_STOPPING;
3071
3072 mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
3073
3074 qede_disable_all_fastpath_intrs(qede);
3075 status = qede_configure_link(qede, false /* Re-Set */);
3076 if (status) {
3077 /* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3078 cmn_err(CE_NOTE, "!%s(%d): Failed to reset link",
3079 __func__, qede->instance);
3080 return (status);
3081 }
3082 qede_clear_filters(qede);
3083 status = qede_fastpath_stop_queues(qede);
3084 if (status != DDI_SUCCESS) {
3085 /* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3086 cmn_err(CE_WARN, "qede_stop:"
3087 " qede_fastpath_stop_queues FAILED "
3088 " qede=%p\n",
3089 qede);
3090 return (status);
3091 }
3092
3093 qede_fastpath_free_phys_mem(qede);
3094
3095 qede->qede_state = QEDE_STATE_STOPPED;
3096 /* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3097 cmn_err(CE_WARN, "qede_stop SUCCESS =%p\n", qede);
3098 return (DDI_SUCCESS);
3099 }
3100
3101 /*
3102 * drv_lock must be held by the caller.
3103 */
3104 int
qede_start(qede_t * qede)3105 qede_start(qede_t *qede)
3106 {
3107 int status;
3108
3109 ASSERT(mutex_owned(&qede->drv_lock));
3110
3111 qede->qede_state = QEDE_STATE_STARTING;
3112
3113 mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
3114
3115 /*
3116 * Configure the fastpath blocks with
3117 * the sb_info, rx_ring and tx_rings
3118 */
3119 if (qede_fastpath_config(qede) != DDI_SUCCESS) {
3120 /* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3121 qede_print_err("!%s(%d): qede_fastpath_config failed",
3122 __func__, qede->instance);
3123 return (DDI_FAILURE);
3124 }
3125
3126
3127 /*
3128 * Allocate the physical memory
3129 * for fastpath.
3130 */
3131 status = qede_fastpath_alloc_phys_mem(qede);
3132 if (status) {
3133 cmn_err(CE_NOTE, "fastpath_alloc_phys_mem "
3134 " failed qede=%p\n", qede);
3135 return (DDI_FAILURE);
3136 }
3137
3138 status = qede_fastpath_start_queues(qede);
3139 if (status) {
3140 cmn_err(CE_NOTE, "fp_start_queues "
3141 " failed qede=%p\n", qede);
3142 goto err_out1;
3143 }
3144
3145 cmn_err(CE_NOTE, "qede_start fp_start_queues qede=%p\n", qede);
3146
3147 status = qede_configure_link(qede, true /* Set */);
3148 if (status) {
3149 cmn_err(CE_NOTE, "!%s(%d): Failed to configure link",
3150 __func__, qede->instance);
3151 goto err_out1;
3152 }
3153
3154 /*
3155 * Put interface in regular mode
3156 */
3157 if (qede_set_filter_rx_mode(qede,
3158 QEDE_FILTER_RX_MODE_REGULAR) != DDI_SUCCESS) {
3159 cmn_err(CE_NOTE, "!%s(%d): Failed to set filter mode",
3160 __func__, qede->instance);
3161 goto err_out1;
3162 }
3163
3164 status = qede_enable_all_fastpath_intrs(qede);
3165 if (status) {
3166 /* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3167 cmn_err(CE_NOTE, "!%s(%d): Failed to enable intrs",
3168 __func__, qede->instance);
3169 goto err_out2;
3170 }
3171 qede->qede_state = QEDE_STATE_STARTED;
3172 cmn_err(CE_NOTE, "!%s(%d): SUCCESS",
3173 __func__, qede->instance);
3174
3175 return (status);
3176
3177 err_out2:
3178 (void) qede_fastpath_stop_queues(qede);
3179 err_out1:
3180 qede_fastpath_free_phys_mem(qede);
3181 return (DDI_FAILURE);
3182 }
3183
3184 static void
qede_free_attach_resources(qede_t * qede)3185 qede_free_attach_resources(qede_t *qede)
3186 {
3187 struct ecore_dev *edev;
3188 int status;
3189
3190 edev = &qede->edev;
3191
3192 if (qede->attach_resources & QEDE_ECORE_HW_INIT) {
3193 if (ecore_hw_stop(edev) != ECORE_SUCCESS) {
3194 cmn_err(CE_NOTE, "%s(%d): ecore_hw_stop: failed\n",
3195 __func__, qede->instance);
3196 }
3197 qede->attach_resources &= ~QEDE_ECORE_HW_INIT;
3198 }
3199
3200 if (qede->attach_resources & QEDE_SP_INTR_ENBL) {
3201 status = qede_disable_slowpath_intrs(qede);
3202 if (status != DDI_SUCCESS) {
3203 qede_print("%s(%d): qede_disable_slowpath_intrs Failed",
3204 __func__, qede->instance);
3205 }
3206 qede->attach_resources &= ~QEDE_SP_INTR_ENBL;
3207 }
3208 if (qede->attach_resources & QEDE_KSTAT_INIT) {
3209 qede_kstat_fini(qede);
3210 qede->attach_resources &= ~QEDE_KSTAT_INIT;
3211 }
3212
3213
3214 if (qede->attach_resources & QEDE_GLD_INIT) {
3215 status = mac_unregister(qede->mac_handle);
3216 if (status != 0) {
3217 qede_print("%s(%d): mac_unregister Failed",
3218 __func__, qede->instance);
3219 }
3220 qede->attach_resources &= ~QEDE_GLD_INIT;
3221 }
3222
3223 if (qede->attach_resources & QEDE_EDEV_CONFIG) {
3224 ecore_resc_free(edev);
3225 qede->attach_resources &= ~QEDE_EDEV_CONFIG;
3226 }
3227
3228 if (qede->attach_resources & QEDE_INTR_CONFIG) {
3229 qede_unconfig_intrs(qede);
3230 qede->attach_resources &= ~QEDE_INTR_CONFIG;
3231 }
3232
3233 if (qede->attach_resources & QEDE_INTR_ALLOC) {
3234 qede_free_intrs(qede);
3235 qede->attach_resources &= ~QEDE_INTR_ALLOC;
3236 }
3237
3238 if (qede->attach_resources & QEDE_INIT_LOCKS) {
3239 qede_destroy_locks(qede);
3240 qede->attach_resources &= ~QEDE_INIT_LOCKS;
3241 }
3242
3243 if (qede->attach_resources & QEDE_IO_STRUCT_ALLOC) {
3244 qede_free_io_structs(qede);
3245 qede->attach_resources &= ~QEDE_IO_STRUCT_ALLOC;
3246 }
3247 #ifdef QEDE_LSR
3248 if (qede->attach_resources & QEDE_CALLBACK) {
3249
3250
3251 status = ddi_cb_unregister(qede->callback_hdl);
3252 if (status != DDI_SUCCESS) {
3253 }
3254 qede->attach_resources &= ~QEDE_CALLBACK;
3255 }
3256 #endif
3257 if (qede->attach_resources & QEDE_ECORE_HW_PREP) {
3258 ecore_hw_remove(edev);
3259 qede->attach_resources &= ~QEDE_ECORE_HW_PREP;
3260 }
3261
3262 if (qede->attach_resources & QEDE_PCI) {
3263 qede_unconfig_pci(qede);
3264 qede->attach_resources &= ~QEDE_PCI;
3265 }
3266
3267 if (qede->attach_resources & QEDE_FM) {
3268 qede_unconfig_fm(qede);
3269 qede->attach_resources &= ~QEDE_FM;
3270 }
3271
3272 /*
3273 * Check for possible mem. left behind by ecore
3274 */
3275 (void) qede_osal_cleanup(qede);
3276
3277 if (qede->attach_resources & QEDE_STRUCT_ALLOC) {
3278 ddi_set_driver_private(qede->dip, NULL);
3279 qede->attach_resources &= ~QEDE_STRUCT_ALLOC;
3280 kmem_free(qede, sizeof (qede_t));
3281 }
3282 }
3283
3284 /*
3285 * drv_lock must be held by the caller.
3286 */
3287 static int
qede_suspend(qede_t * qede)3288 qede_suspend(qede_t *qede)
3289 {
3290 // STUB
3291 ASSERT(mutex_owned(&qede->drv_lock));
3292 printf("in qede_suspend\n");
3293 return (DDI_FAILURE);
3294 }
3295
3296 static int
qede_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)3297 qede_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
3298 {
3299 qede_t *qede;
3300 struct ecore_dev *edev;
3301 int instance;
3302 uint32_t vendor_id;
3303 uint32_t device_id;
3304 struct ecore_hwfn *p_hwfn;
3305 struct ecore_ptt *p_ptt;
3306 struct ecore_mcp_link_params *link_params;
3307 struct ecore_hw_init_params hw_init_params;
3308 struct ecore_drv_load_params load_params;
3309 int *props;
3310 uint32_t num_props;
3311 int rc = 0;
3312
3313 switch (cmd) {
3314 default:
3315 return (DDI_FAILURE);
3316
3317 case DDI_RESUME:
3318 {
3319 qede = (qede_t * )ddi_get_driver_private(dip);
3320 if (qede == NULL || qede->dip != dip) {
3321 cmn_err(CE_NOTE, "qede:%s: Could not allocate"
3322 " adapter structure\n", __func__);
3323 return (DDI_FAILURE);
3324 }
3325
3326 mutex_enter(&qede->drv_lock);
3327 if (qede->qede_state != QEDE_STATE_SUSPENDED) {
3328 mutex_exit(&qede->drv_lock);
3329 return (DDI_FAILURE);
3330 }
3331
3332 if (qede_resume(qede) != DDI_SUCCESS) {
3333 cmn_err(CE_NOTE, "%s:%d resume operation failure\n",
3334 __func__, qede->instance);
3335 mutex_exit(&qede->drv_lock);
3336 return (DDI_FAILURE);
3337 }
3338
3339 qede->qede_state = QEDE_STATE_ATTACHED;
3340 mutex_exit(&qede->drv_lock);
3341 return (DDI_SUCCESS);
3342 }
3343 case DDI_ATTACH:
3344 {
3345 instance = ddi_get_instance(dip);
3346 cmn_err(CE_NOTE, "qede_attach(%d): Enter",
3347 instance);
3348
3349 /* Allocate main structure rounded up to cache line size */
3350 if ((qede = kmem_zalloc(sizeof (qede_t), KM_SLEEP)) == NULL) {
3351 cmn_err(CE_NOTE, "!%s(%d): Could not allocate adapter "
3352 "structure\n", __func__, instance);
3353 return (DDI_FAILURE);
3354 }
3355
3356 qede->attach_resources |= QEDE_STRUCT_ALLOC;
3357 ddi_set_driver_private(dip, qede);
3358 qede->dip = dip;
3359 qede->instance = instance;
3360 snprintf(qede->name, sizeof (qede->name), "qede%d", instance);
3361 edev = &qede->edev;
3362
3363 if (qede_config_fm(qede) != DDI_SUCCESS) {
3364 goto exit_with_err;
3365 }
3366 qede->attach_resources |= QEDE_FM;
3367
3368 /*
3369 * Do PCI config setup and map the register
3370 * and doorbell space */
3371 if (qede_config_pci(qede) != DDI_SUCCESS) {
3372 goto exit_with_err;
3373 }
3374 qede->attach_resources |= QEDE_PCI;
3375
3376 /*
3377 * Setup OSAL mem alloc related locks.
3378 * Do not call any ecore functions without
3379 * initializing these locks
3380 */
3381 mutex_init(&qede->mem_list.mem_list_lock, NULL,
3382 MUTEX_DRIVER, 0);
3383 mutex_init(&qede->phys_mem_list.lock, NULL,
3384 MUTEX_DRIVER, 0);
3385 QEDE_INIT_LIST_HEAD(&qede->mem_list.mem_list_head);
3386 QEDE_INIT_LIST_HEAD(&qede->phys_mem_list.head);
3387 QEDE_INIT_LIST_HEAD(&qede->mclist.head);
3388
3389
3390 /*
3391 * FIXME: this function calls ecore api, but
3392 * dp_level and module are not yet set
3393 */
3394 if (qede_prepare_edev(qede) != ECORE_SUCCESS) {
3395 // report fma
3396 goto exit_with_err;
3397 }
3398
3399 qede->num_hwfns = edev->num_hwfns;
3400 qede->num_tc = 1;
3401 memcpy(qede->ether_addr, edev->hwfns->hw_info.hw_mac_addr,
3402 ETHERADDRL);
3403 qede_info(qede, "Interface mac_addr : " MAC_STRING,
3404 MACTOSTR(qede->ether_addr));
3405 qede->attach_resources |= QEDE_ECORE_HW_PREP;
3406
3407 if (qede_set_operating_params(qede) != DDI_SUCCESS) {
3408 goto exit_with_err;
3409 }
3410 qede->attach_resources |= QEDE_SET_PARAMS;
3411 #ifdef QEDE_LSR
3412 if (ddi_cb_register(qede->dip,
3413 qede->callback_flags,
3414 qede_callback,
3415 qede,
3416 NULL,
3417 &qede->callback_hdl)) {
3418 goto exit_with_err;
3419 }
3420 qede->attach_resources |= QEDE_CALLBACK;
3421 #endif
3422 qede_cfg_reset(qede);
3423
3424 if (qede_alloc_intrs(qede)) {
3425 cmn_err(CE_NOTE, "%s: Could not allocate interrupts\n",
3426 __func__);
3427 goto exit_with_err;
3428 }
3429
3430 qede->attach_resources |= QEDE_INTR_ALLOC;
3431
3432 if (qede_config_intrs(qede)) {
3433 cmn_err(CE_NOTE, "%s: Could not allocate interrupts\n",
3434 __func__);
3435 goto exit_with_err;
3436 }
3437 qede->attach_resources |= QEDE_INTR_CONFIG;
3438
3439 if (qede_alloc_io_structs(qede) != DDI_SUCCESS) {
3440 cmn_err(CE_NOTE, "%s: Could not allocate data"
3441 " path structures\n", __func__);
3442 goto exit_with_err;
3443 }
3444
3445 qede->attach_resources |= QEDE_IO_STRUCT_ALLOC;
3446
3447 /* Lock init cannot fail */
3448 qede_init_locks(qede);
3449 qede->attach_resources |= QEDE_INIT_LOCKS;
3450
3451
3452 if (qede_config_edev(qede)) {
3453 cmn_err(CE_NOTE, "%s: Could not configure ecore \n",
3454 __func__);
3455 goto exit_with_err;
3456 }
3457 qede->attach_resources |= QEDE_EDEV_CONFIG;
3458
3459 if (qede_kstat_init(qede) == B_FALSE) {
3460 cmn_err(CE_NOTE, "%s: Could not initialize kstat \n",
3461 __func__);
3462 goto exit_with_err;
3463
3464 }
3465 qede->attach_resources |= QEDE_KSTAT_INIT;
3466
3467 if (qede_gld_init(qede) == B_FALSE) {
3468 cmn_err(CE_NOTE, "%s: Failed call to qede_gld_init",
3469 __func__);
3470 goto exit_with_err;
3471 }
3472
3473 qede->attach_resources |= QEDE_GLD_INIT;
3474
3475 if (qede_enable_slowpath_intrs(qede)) {
3476 cmn_err(CE_NOTE, "%s: Could not enable interrupts\n",
3477 __func__);
3478 goto exit_with_err;
3479 }
3480
3481 qede->attach_resources |= QEDE_SP_INTR_ENBL;
3482
3483 cmn_err(CE_NOTE, "qede->attach_resources = %x\n",
3484 qede->attach_resources);
3485
3486 memset((void *)&hw_init_params, 0,
3487 sizeof (struct ecore_hw_init_params));
3488 hw_init_params.p_drv_load_params = &load_params;
3489
3490 hw_init_params.p_tunn = NULL;
3491 hw_init_params.b_hw_start = true;
3492 hw_init_params.int_mode = qede->intr_ctx.intr_mode;
3493 hw_init_params.allow_npar_tx_switch = false;
3494 hw_init_params.bin_fw_data = NULL;
3495 load_params.is_crash_kernel = false;
3496 load_params.mfw_timeout_val = 0;
3497 load_params.avoid_eng_reset = false;
3498 load_params.override_force_load =
3499 ECORE_OVERRIDE_FORCE_LOAD_NONE;
3500
3501 if (ecore_hw_init(edev, &hw_init_params) != ECORE_SUCCESS) {
3502 cmn_err(CE_NOTE,
3503 "%s: Could not initialze ecore block\n",
3504 __func__);
3505 goto exit_with_err;
3506 }
3507 qede->attach_resources |= QEDE_ECORE_HW_INIT;
3508 qede->qede_state = QEDE_STATE_ATTACHED;
3509
3510 qede->detach_unsafe = 0;
3511
3512 snprintf(qede->version,
3513 sizeof (qede->version),
3514 "%d.%d.%d",
3515 MAJVERSION,
3516 MINVERSION,
3517 REVVERSION);
3518
3519 snprintf(qede->versionFW,
3520 sizeof (qede->versionFW),
3521 "%d.%d.%d.%d",
3522 FW_MAJOR_VERSION,
3523 FW_MINOR_VERSION,
3524 FW_REVISION_VERSION,
3525 FW_ENGINEERING_VERSION);
3526
3527 p_hwfn = &qede->edev.hwfns[0];
3528 p_ptt = ecore_ptt_acquire(p_hwfn);
3529 /*
3530 * (test) : saving the default link_input params
3531 */
3532 link_params = ecore_mcp_get_link_params(p_hwfn);
3533 memset(&qede->link_input_params, 0,
3534 sizeof (qede_link_input_params_t));
3535 memcpy(&qede->link_input_params.default_link_params,
3536 link_params,
3537 sizeof (struct ecore_mcp_link_params));
3538
3539 p_hwfn = ECORE_LEADING_HWFN(edev);
3540 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &qede->mfw_ver, NULL);
3541
3542 ecore_ptt_release(p_hwfn, p_ptt);
3543
3544 snprintf(qede->versionMFW,
3545 sizeof (qede->versionMFW),
3546 "%d.%d.%d.%d",
3547 (qede->mfw_ver >> 24) & 0xFF,
3548 (qede->mfw_ver >> 16) & 0xFF,
3549 (qede->mfw_ver >> 8) & 0xFF,
3550 qede->mfw_ver & 0xFF);
3551
3552 snprintf(qede->chip_name,
3553 sizeof (qede->chip_name),
3554 "%s",
3555 ECORE_IS_BB(edev) ? "BB" : "AH");
3556
3557 snprintf(qede->chipID,
3558 sizeof (qede->chipID),
3559 "0x%x",
3560 qede->edev.chip_num);
3561
3562 *qede->bus_dev_func = 0;
3563 vendor_id = 0;
3564 device_id = 0;
3565
3566
3567 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, qede->dip,
3568 0, "reg", &props, &num_props);
3569 if((rc == DDI_PROP_SUCCESS) && (num_props > 0)) {
3570
3571 snprintf(qede->bus_dev_func,
3572 sizeof (qede->bus_dev_func),
3573 "%04x:%02x:%02x",
3574 PCI_REG_BUS_G(props[0]),
3575 PCI_REG_DEV_G(props[0]),
3576 PCI_REG_FUNC_G(props[0]));
3577
3578 /*
3579 * This information is used
3580 * in the QEDE_FUNC_INFO ioctl
3581 */
3582 qede->pci_func = (uint8_t) PCI_REG_FUNC_G(props[0]);
3583
3584 ddi_prop_free(props);
3585
3586 }
3587
3588 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, qede->dip,
3589 0, "vendor-id", &props, &num_props);
3590 if((rc == DDI_PROP_SUCCESS) && (num_props > 0)) {
3591 vendor_id = props[0];
3592 ddi_prop_free(props);
3593 }
3594 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, qede->dip,
3595 0, "device-id", &props, &num_props);
3596 if((rc == DDI_PROP_SUCCESS) && (num_props > 0)) {
3597 device_id = props[0];
3598 ddi_prop_free(props);
3599 }
3600
3601
3602 snprintf(qede->vendor_device,
3603 sizeof (qede->vendor_device),
3604 "%04x:%04x",
3605 vendor_id,
3606 device_id);
3607
3608
3609 snprintf(qede->intrAlloc,
3610 sizeof (qede->intrAlloc), "%d %s",
3611 (qede->intr_ctx.intr_type_in_use == DDI_INTR_TYPE_FIXED)
3612 ? 1 :
3613 qede->intr_ctx.intr_vect_allocated,
3614 (qede->intr_ctx.intr_type_in_use == DDI_INTR_TYPE_MSIX)
3615 ? "MSIX" :
3616 (qede->intr_ctx.intr_type_in_use == DDI_INTR_TYPE_MSI)
3617 ? "MSI" : "Fixed");
3618
3619 qede_print("%s(%d): success, addr %p chip %s id %s intr %s\n",
3620 __func__, qede->instance, qede, qede->chip_name,
3621 qede->vendor_device,qede->intrAlloc);
3622
3623 qede_print("%s(%d): version %s FW %s MFW %s\n",
3624 __func__, qede->instance, qede->version,
3625 qede->versionFW, qede->versionMFW);
3626
3627 return (DDI_SUCCESS);
3628 }
3629 }
3630 exit_with_err:
3631 cmn_err(CE_WARN, "%s:%d failed %x\n", __func__, qede->instance,
3632 qede->attach_resources);
3633 (void)qede_free_attach_resources(qede);
3634 return (DDI_FAILURE);
3635 }
3636
3637 static int
qede_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)3638 qede_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
3639 {
3640
3641 qede_t *qede;
3642 int status;
3643 uint32_t count = 0;
3644
3645 qede = (qede_t *)ddi_get_driver_private(dip);
3646 if ((qede == NULL) || (qede->dip != dip)) {
3647 return (DDI_FAILURE);
3648 }
3649
3650 switch (cmd) {
3651 default:
3652 return (DDI_FAILURE);
3653 case DDI_SUSPEND:
3654 mutex_enter(&qede->drv_lock);
3655 status = qede_suspend(qede);
3656 if (status != DDI_SUCCESS) {
3657 mutex_exit(&qede->drv_lock);
3658 return (DDI_FAILURE);
3659 }
3660
3661 qede->qede_state = QEDE_STATE_SUSPENDED;
3662 mutex_exit(&qede->drv_lock);
3663 return (DDI_SUCCESS);
3664
3665 case DDI_DETACH:
3666 mutex_enter(&qede->drv_lock);
3667 if (qede->qede_state == QEDE_STATE_STARTED) {
3668 qede->plumbed = 0;
3669 status = qede_stop(qede);
3670 if (status != DDI_SUCCESS) {
3671 qede->qede_state = QEDE_STATE_FAILED;
3672 mutex_exit(&qede->drv_lock);
3673 return (DDI_FAILURE);
3674 }
3675 }
3676 mutex_exit(&qede->drv_lock);
3677 if (qede->detach_unsafe) {
3678 /*
3679 * wait for rx buffers to be returned from
3680 * upper layers
3681 */
3682 count = 0;
3683 while ((qede->detach_unsafe) && (count < 100)) {
3684 qede_delay(100);
3685 count++;
3686 }
3687 if (qede->detach_unsafe) {
3688 qede_info(qede, "!%s(%d) : Buffers still with"
3689 " OS, failing detach\n",
3690 qede->name, qede->instance);
3691 return (DDI_FAILURE);
3692 }
3693 }
3694 qede_free_attach_resources(qede);
3695 return (DDI_SUCCESS);
3696 }
3697 }
3698
3699 static int
3700 /* LINTED E_FUNC_ARG_UNUSED */
qede_quiesce(dev_info_t * dip)3701 qede_quiesce(dev_info_t *dip)
3702 {
3703 qede_t *qede = (qede_t *)ddi_get_driver_private(dip);
3704 struct ecore_dev *edev = &qede->edev;
3705 int status = DDI_SUCCESS;
3706 struct ecore_hwfn *p_hwfn;
3707 struct ecore_ptt *p_ptt = NULL;
3708
3709 mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
3710 p_hwfn = ECORE_LEADING_HWFN(edev);
3711 p_ptt = ecore_ptt_acquire(p_hwfn);
3712 if (p_ptt) {
3713 status = ecore_start_recovery_process(p_hwfn, p_ptt);
3714 ecore_ptt_release(p_hwfn, p_ptt);
3715 OSAL_MSLEEP(5000);
3716 }
3717 return (status);
3718
3719 }
3720
3721
3722 DDI_DEFINE_STREAM_OPS(qede_dev_ops, nulldev, nulldev, qede_attach, qede_detach,
3723 nodev, NULL, D_MP, NULL, qede_quiesce);
3724
3725 static struct modldrv qede_modldrv =
3726 {
3727 &mod_driverops, /* drv_modops (must be mod_driverops for drivers) */
3728 QEDE_PRODUCT_INFO, /* drv_linkinfo (string displayed by modinfo) */
3729 &qede_dev_ops /* drv_dev_ops */
3730 };
3731
3732
3733 static struct modlinkage qede_modlinkage =
3734 {
3735 MODREV_1, /* ml_rev */
3736 (&qede_modldrv), /* ml_linkage */
3737 NULL /* NULL termination */
3738 };
3739
3740 int
_init(void)3741 _init(void)
3742 {
3743 int rc;
3744
3745 qede_dev_ops.devo_cb_ops->cb_str = NULL;
3746 mac_init_ops(&qede_dev_ops, "qede");
3747
3748 /* Install module information with O/S */
3749 if ((rc = mod_install(&qede_modlinkage)) != DDI_SUCCESS) {
3750 mac_fini_ops(&qede_dev_ops);
3751 cmn_err(CE_NOTE, "mod_install failed");
3752 return (rc);
3753 }
3754
3755 return (rc);
3756 }
3757
3758
3759 int
_fini(void)3760 _fini(void)
3761 {
3762 int rc;
3763
3764 if ((rc = mod_remove(&qede_modlinkage)) == DDI_SUCCESS) {
3765 mac_fini_ops(&qede_dev_ops);
3766 }
3767
3768 return (rc);
3769 }
3770
3771
3772 int
_info(struct modinfo * modinfop)3773 _info(struct modinfo * modinfop)
3774 {
3775 return (mod_info(&qede_modlinkage, modinfop));
3776 }
3777