1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1, (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1, (the "License").
26
27 * You may not use this file except in compliance with the License.
28
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
31
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
34 */
35
36
37 #include "qede.h"
38
39 ddi_device_acc_attr_t qede_regs_acc_attr = {
40 DDI_DEVICE_ATTR_V1, // devacc_attr_version;
41 DDI_STRUCTURE_LE_ACC, // devacc_attr_endian_flags;
42 DDI_STRICTORDER_ACC, // devacc_attr_dataorder;
43 DDI_FLAGERR_ACC // devacc_attr_access;
44 };
45
46 ddi_device_acc_attr_t qede_desc_acc_attr = {
47 DDI_DEVICE_ATTR_V0, // devacc_attr_version;
48 DDI_STRUCTURE_LE_ACC, // devacc_attr_endian_flags;
49 DDI_STRICTORDER_ACC // devacc_attr_dataorder;
50 };
51
52 /*
53 * DMA access attributes for BUFFERS.
54 */
55 ddi_device_acc_attr_t qede_buf_acc_attr =
56 {
57 DDI_DEVICE_ATTR_V0, // devacc_attr_version;
58 DDI_NEVERSWAP_ACC, // devacc_attr_endian_flags;
59 DDI_STRICTORDER_ACC // devacc_attr_dataorder;
60 };
61
62
63 ddi_dma_attr_t qede_desc_dma_attr =
64 {
65 DMA_ATTR_V0,
66 0x0000000000000000ull,
67 0xFFFFFFFFFFFFFFFFull,
68 0x00000000FFFFFFFFull,
69 QEDE_PAGE_ALIGNMENT,
70 0x00000FFF,
71 0x00000001,
72 0x00000000FFFFFFFFull,
73 0xFFFFFFFFFFFFFFFFull,
74 1,
75 0x00000001,
76 DDI_DMA_FLAGERR
77 };
78
79 ddi_dma_attr_t qede_gen_buf_dma_attr =
80 {
81 DMA_ATTR_V0,
82 0x0000000000000000ull,
83 0xFFFFFFFFFFFFFFFFull,
84 0x00000000FFFFFFFFull,
85 QEDE_PAGE_ALIGNMENT,
86 0x00000FFF,
87 0x00000001,
88 0x00000000FFFFFFFFull,
89 0xFFFFFFFFFFFFFFFFull,
90 1,
91 0x00000001,
92 DDI_DMA_FLAGERR
93 };
94
95 /*
96 * DMA attributes for transmit.
97 */
98 ddi_dma_attr_t qede_tx_buf_dma_attr =
99 {
100 DMA_ATTR_V0,
101 0x0000000000000000ull,
102 0xFFFFFFFFFFFFFFFFull,
103 0x00000000FFFFFFFFull,
104 1,
105 0x00000FFF,
106 0x00000001,
107 0x00000000FFFFFFFFull,
108 0xFFFFFFFFFFFFFFFFull,
109 ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1,
110 0x00000001,
111 DDI_DMA_FLAGERR
112 };
113
114
115 ddi_dma_attr_t qede_dma_attr_desc =
116 {
117 DMA_ATTR_V0, /* dma_attr_version */
118 0, /* dma_attr_addr_lo */
119 0xffffffffffffffffull, /* dma_attr_addr_hi */
120 0x000fffffull, /* dma_attr_count_max */
121 4096, /* dma_attr_align */
122 0x000fffffull, /* dma_attr_burstsizes */
123 4, /* dma_attr_minxfer */
124 0xffffffffull, /* dma_attr_maxxfer */
125 0xffffffffull, /* dma_attr_seg */
126 1, /* dma_attr_sgllen */
127 1, /* dma_attr_granular */
128 DDI_DMA_FLAGERR /* dma_attr_flags */
129 };
130
131 static ddi_dma_attr_t qede_dma_attr_txbuf =
132 {
133 DMA_ATTR_V0, /* dma_attr_version */
134 0, /* dma_attr_addr_lo */
135 0xffffffffffffffffull, /* dma_attr_addr_hi */
136 0x00000000FFFFFFFFull, /* dma_attr_count_max */
137 QEDE_PAGE_ALIGNMENT, /* dma_attr_align */
138 0xfff8ull, /* dma_attr_burstsizes */
139 1, /* dma_attr_minxfer */
140 0xffffffffull, /* dma_attr_maxxfer */
141 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
142 1, /* dma_attr_sgllen */
143 1, /* dma_attr_granular */
144 0 /* dma_attr_flags */
145 };
146
147 ddi_dma_attr_t qede_dma_attr_rxbuf =
148 {
149 DMA_ATTR_V0, /* dma_attr_version */
150 0, /* dma_attr_addr_lo */
151 0xffffffffffffffffull, /* dma_attr_addr_hi */
152 0x00000000FFFFFFFFull, /* dma counter max */
153 QEDE_PAGE_ALIGNMENT, /* dma_attr_align */
154 0xfff8ull, /* dma_attr_burstsizes */
155 1, /* dma_attr_minxfer */
156 0xffffffffull, /* dma_attr_maxxfer */
157 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
158 1, /* dma_attr_sgllen */
159 1, /* dma_attr_granular */
160 DDI_DMA_RELAXED_ORDERING /* dma_attr_flags */
161 };
162
163 /* LINTED E_STATIC_UNUSED */
164 static ddi_dma_attr_t qede_dma_attr_cmddesc =
165 {
166 DMA_ATTR_V0, /* dma_attr_version */
167 0, /* dma_attr_addr_lo */
168 0xffffffffffffffffull, /* dma_attr_addr_hi */
169 0xffffffffull, /* dma_attr_count_max */
170 1, /* dma_attr_align */
171 0xfff8ull, /* dma_attr_burstsizes */
172 1, /* dma_attr_minxfer */
173 0xffffffff, /* dma_attr_maxxfer */
174 0xffffffff, /* dma_attr_seg */
175 ETH_TX_MAX_BDS_PER_NON_LSO_PACKET, /* dma_attr_sgllen */
176 1, /* dma_attr_granular */
177 0 /* dma_attr_flags */
178 };
179
180
181
182 /*
183 * Generic dma attribute for single sg
184 */
185 /* LINTED E_STATIC_UNUSED */
186 static ddi_dma_attr_t qede_gen_dma_attr_desc =
187 {
188 DMA_ATTR_V0, /* dma_attr_version */
189 0, /* dma_attr_addr_lo */
190 0xffffffffffffffffull, /* dma_attr_addr_hi */
191 0x000fffffull, /* dma_attr_count_max */
192 4096, /* dma_attr_align */
193 0x000fffffull, /* dma_attr_burstsizes */
194 4, /* dma_attr_minxfer */
195 0xffffffffull, /* dma_attr_maxxfer */
196 0xffffffffull, /* dma_attr_seg */
197 1, /* dma_attr_sgllen */
198 1, /* dma_attr_granular */
199 DDI_DMA_FLAGERR /* dma_attr_flags */
200 };
201
202 ddi_dma_attr_t qede_buf2k_dma_attr_txbuf =
203 {
204 DMA_ATTR_V0, /* dma_attr_version */
205 0, /* dma_attr_addr_lo */
206 0xffffffffffffffffull, /* dma_attr_addr_hi */
207 0x00000000FFFFFFFFull, /* dma_attr_count_max */
208 BUF_2K_ALIGNMENT, /* dma_attr_align */
209 0xfff8ull, /* dma_attr_burstsizes */
210 1, /* dma_attr_minxfer */
211 0xffffffffull, /* dma_attr_maxxfer */
212 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
213 1, /* dma_attr_sgllen */
214 0x00000001, /* dma_attr_granular */
215 0 /* dma_attr_flags */
216 };
217
218 char *
qede_get_ddi_fail(int status)219 qede_get_ddi_fail(int status)
220 {
221 switch (status) {
222 case DDI_FAILURE:
223 return ("DDI_FAILURE");
224 case DDI_NOT_WELL_FORMED:
225 return ("DDI_NOT_WELL_FORMED");
226 case DDI_EAGAIN:
227 return ("DDI_EAGAIN");
228 case DDI_EINVAL:
229 return ("DDI_EINVAL");
230 case DDI_ENOTSUP:
231 return ("DDI_ENOTSUP");
232 case DDI_EPENDING:
233 return ("DDI_EPENDING");
234 case DDI_EALREADY:
235 return ("DDI_EALREADY");
236 case DDI_ENOMEM:
237 return ("DDI_ENOMEM");
238 case DDI_EBUSY:
239 return ("DDI_EBUSY");
240 case DDI_ETRANSPORT:
241 return ("DDI_ETRANSPORT");
242 case DDI_ECONTEXT:
243 return ("DDI_ECONTEXT");
244 default:
245 return ("ERROR CODE NOT FOUND!");
246 }
247 }
248
249 char *
qede_get_ecore_fail(int status)250 qede_get_ecore_fail(int status)
251 {
252 switch (status) {
253 case ECORE_UNKNOWN_ERROR:
254 return ("ECORE_UNKNOWN_ERROR");
255 case ECORE_NORESOURCES:
256 return ("ECORE_NORESOURCES");
257 case ECORE_NODEV:
258 return ("ECORE_NODEV");
259 case ECORE_ABORTED:
260 return ("ECORE_ABORTED");
261 case ECORE_AGAIN:
262 return ("ECORE_AGAIN");
263 case ECORE_NOTIMPL:
264 return ("ECORE_NOTIMPL");
265 case ECORE_EXISTS:
266 return ("ECORE_EXISTS");
267 case ECORE_IO:
268 return ("ECORE_IO");
269 case ECORE_TIMEOUT:
270 return ("ECORE_TIMEOUT");
271 case ECORE_INVAL:
272 return ("ECORE_INVAL");
273 case ECORE_BUSY:
274 return ("ECORE_BUSY");
275 case ECORE_NOMEM:
276 return ("ECORE_NOMEM");
277 case ECORE_SUCCESS:
278 return ("ECORE_SUCCESS");
279 case ECORE_PENDING:
280 return ("ECORE_PENDING");
281 default:
282 return ("ECORE ERROR CODE NOT FOUND!");
283 }
284 }
285
286 #define QEDE_CHIP_NUM(_p)\
287 (((_p)->edev.chip_num) & 0xffff)
288
289 char *
qede_chip_name(qede_t * qede)290 qede_chip_name(qede_t *qede)
291 {
292 switch (QEDE_CHIP_NUM(qede)) {
293 case 0x1634:
294 return ("BCM57980E");
295
296 case 0x1629:
297 return ("BCM57980S");
298
299 case 0x1630:
300 return ("BCM57940_KR2");
301
302 case 0x8070:
303 return ("ARROWHEAD");
304
305 case 0x8071:
306 return ("ARROWHEAD");
307
308 case 0x8072:
309 return ("ARROWHEAD");
310
311 case 0x8073:
312 return ("ARROWHEAD");
313
314 default:
315 return ("UNKNOWN");
316 }
317 }
318
319
320
321
322 static void
qede_destroy_locks(qede_t * qede)323 qede_destroy_locks(qede_t *qede)
324 {
325 qede_fastpath_t *fp = &qede->fp_array[0];
326 qede_rx_ring_t *rx_ring;
327 qede_tx_ring_t *tx_ring;
328 int i, j;
329
330 mutex_destroy(&qede->drv_lock);
331 mutex_destroy(&qede->watch_lock);
332
333 for (i = 0; i < qede->num_fp; i++, fp++) {
334 mutex_destroy(&fp->fp_lock);
335
336 rx_ring = fp->rx_ring;
337 mutex_destroy(&rx_ring->rx_lock);
338 mutex_destroy(&rx_ring->rx_replen_lock);
339
340 for (j = 0; j < qede->num_tc; j++) {
341 tx_ring = fp->tx_ring[j];
342 mutex_destroy(&tx_ring->tx_lock);
343 }
344 }
345 mutex_destroy(&qede->gld_lock);
346 mutex_destroy(&qede->kstat_lock);
347 }
348
349 static void
qede_init_locks(qede_t * qede)350 qede_init_locks(qede_t *qede)
351 {
352 qede_intr_context_t *intr_ctx = &qede->intr_ctx;
353 qede_fastpath_t *fp = &qede->fp_array[0];
354 qede_rx_ring_t *rx_ring;
355 qede_tx_ring_t *tx_ring;
356 int i, tc;
357
358 mutex_init(&qede->drv_lock, NULL,
359 MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
360 mutex_init(&qede->watch_lock, NULL,
361 MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
362
363 for (i = 0; i < qede->num_fp; i++, fp++) {
364 mutex_init(&fp->fp_lock, NULL,
365 MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
366
367 rx_ring = fp->rx_ring;
368 mutex_init(&rx_ring->rx_lock, NULL,
369 MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
370 mutex_init(&rx_ring->rx_replen_lock, NULL,
371 MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
372
373 for (tc = 0; tc < qede->num_tc; tc++) {
374 tx_ring = fp->tx_ring[tc];
375 mutex_init(&tx_ring->tx_lock, NULL,
376 MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
377 }
378 }
379
380 mutex_init(&qede->gld_lock, NULL,
381 MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
382 mutex_init(&qede->kstat_lock, NULL,
383 MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
384 }
385
386 /* LINTED E_FUNC_ARG_UNUSED */
qede_free_io_structs(qede_t * qede)387 static void qede_free_io_structs(qede_t *qede)
388 {
389 }
390
391 static int
qede_alloc_io_structs(qede_t * qede)392 qede_alloc_io_structs(qede_t *qede)
393 {
394 qede_fastpath_t *fp;
395 qede_rx_ring_t *rx_ring;
396 qede_tx_ring_t *tx_array, *tx_ring;
397 int i, tc;
398
399 /*
400 * Put rx ring + tx_ring pointers paired
401 * into the fp data structure array
402 */
403 for (i = 0; i < qede->num_fp; i++) {
404 fp = &qede->fp_array[i];
405 rx_ring = &qede->rx_array[i];
406
407 for (tc = 0; tc < qede->num_tc; tc++) {
408 tx_array = qede->tx_array[tc];
409 tx_ring = &tx_array[i];
410 fp->tx_ring[tc] = tx_ring;
411 }
412
413 fp->rx_ring = rx_ring;
414 rx_ring->group_index = 0;
415 }
416
417 return (DDI_SUCCESS);
418 }
419
420 static int
qede_get_config_params(qede_t * qede)421 qede_get_config_params(qede_t *qede)
422 {
423 struct ecore_dev *edev = &qede->edev;
424
425 qede_cfg_init(qede);
426
427 qede->num_tc = DEFAULT_TRFK_CLASS_COUNT;
428 qede->num_hwfns = edev->num_hwfns;
429 qede->rx_buf_count = qede->rx_ring_size;
430 qede->rx_buf_size = DEFAULT_RX_BUF_SIZE;
431 qede_print("!%s:%d: qede->num_fp = %d\n", __func__, qede->instance,
432 qede->num_fp);
433 qede_print("!%s:%d: qede->rx_ring_size = %d\n", __func__,
434 qede->instance, qede->rx_ring_size);
435 qede_print("!%s:%d: qede->rx_buf_count = %d\n", __func__,
436 qede->instance, qede->rx_buf_count);
437 qede_print("!%s:%d: qede->rx_buf_size = %d\n", __func__,
438 qede->instance, qede->rx_buf_size);
439 qede_print("!%s:%d: qede->rx_copy_threshold = %d\n", __func__,
440 qede->instance, qede->rx_copy_threshold);
441 qede_print("!%s:%d: qede->tx_ring_size = %d\n", __func__,
442 qede->instance, qede->tx_ring_size);
443 qede_print("!%s:%d: qede->tx_copy_threshold = %d\n", __func__,
444 qede->instance, qede->tx_bcopy_threshold);
445 qede_print("!%s:%d: qede->lso_enable = %d\n", __func__,
446 qede->instance, qede->lso_enable);
447 qede_print("!%s:%d: qede->lro_enable = %d\n", __func__,
448 qede->instance, qede->lro_enable);
449 qede_print("!%s:%d: qede->jumbo_enable = %d\n", __func__,
450 qede->instance, qede->jumbo_enable);
451 qede_print("!%s:%d: qede->log_enable = %d\n", __func__,
452 qede->instance, qede->log_enable);
453 qede_print("!%s:%d: qede->checksum = %d\n", __func__,
454 qede->instance, qede->checksum);
455 qede_print("!%s:%d: qede->debug_level = 0x%x\n", __func__,
456 qede->instance, qede->ecore_debug_level);
457 qede_print("!%s:%d: qede->num_hwfns = %d\n", __func__,
458 qede->instance,qede->num_hwfns);
459
460 //qede->tx_buf_size = qede->mtu + QEDE_MAX_ETHER_HDR;
461 qede->tx_buf_size = BUF_2K_SIZE;
462 return (DDI_SUCCESS);
463 }
464
465 void
qede_config_debug(qede_t * qede)466 qede_config_debug(qede_t *qede)
467 {
468
469 struct ecore_dev *edev = &qede->edev;
470 u32 dp_level = 0;
471 u8 dp_module = 0;
472
473 dp_level = qede->ecore_debug_level;
474 dp_module = qede->ecore_debug_module;
475 ecore_init_dp(edev, dp_module, dp_level, NULL);
476 }
477
478
479
480 static int
qede_set_operating_params(qede_t * qede)481 qede_set_operating_params(qede_t *qede)
482 {
483 int status = 0;
484 qede_intr_context_t *intr_ctx = &qede->intr_ctx;
485
486 /* Get qede.conf paramters from user */
487 status = qede_get_config_params(qede);
488 if (status != DDI_SUCCESS) {
489 return (DDI_FAILURE);
490 }
491 /* config debug level */
492 qede_config_debug(qede);
493
494
495 intr_ctx->intr_vect_to_request =
496 qede->num_fp + qede->num_hwfns;
497 intr_ctx->intr_fp_vector_count = qede->num_fp - qede->num_hwfns;
498
499 /* set max number of Unicast list */
500 qede->ucst_total = QEDE_MAX_UCST_CNT;
501 qede->ucst_avail = QEDE_MAX_UCST_CNT;
502 bzero(&qede->ucst_mac[0], sizeof (qede_mac_addr_t) * qede->ucst_total);
503 qede->params.multi_promisc_fl = B_FALSE;
504 qede->params.promisc_fl = B_FALSE;
505 qede->mc_cnt = 0;
506 qede->rx_low_buffer_threshold = RX_LOW_BUFFER_THRESHOLD;
507
508 return (status);
509 }
510
511 /* Resume the interface */
512 static int
qede_resume(qede_t * qede)513 qede_resume(qede_t *qede)
514 {
515 mutex_enter(&qede->drv_lock);
516 cmn_err(CE_NOTE, "%s:%d Enter\n", __func__, qede->instance);
517 qede->qede_state = QEDE_STATE_ATTACHED;
518 mutex_exit(&qede->drv_lock);
519 return (DDI_FAILURE);
520 }
521
522 /*
523 * Write dword to doorbell from tx_path
524 * Avoid use of qede_t * pointer
525 */
526 #pragma inline(qede_bar2_write32_tx_doorbell)
527 void
qede_bar2_write32_tx_doorbell(qede_tx_ring_t * tx_ring,u32 val)528 qede_bar2_write32_tx_doorbell(qede_tx_ring_t *tx_ring, u32 val)
529 {
530 u64 addr = (u64)tx_ring->doorbell_addr;
531 ddi_put32(tx_ring->doorbell_handle, (u32 *)addr, val);
532 }
533
534 static void
qede_unconfig_pci(qede_t * qede)535 qede_unconfig_pci(qede_t *qede)
536 {
537 if (qede->doorbell_handle != NULL) {
538 ddi_regs_map_free(&(qede->doorbell_handle));
539 qede->doorbell_handle = NULL;
540 }
541
542 if (qede->regs_handle != NULL) {
543 ddi_regs_map_free(&qede->regs_handle);
544 qede->regs_handle = NULL;
545 }
546 if (qede->pci_cfg_handle != NULL) {
547 pci_config_teardown(&qede->pci_cfg_handle);
548 qede->pci_cfg_handle = NULL;
549 }
550 }
551
552 static int
qede_config_pci(qede_t * qede)553 qede_config_pci(qede_t *qede)
554 {
555 int ret;
556
557 ret = pci_config_setup(qede->dip, &qede->pci_cfg_handle);
558 if (ret != DDI_SUCCESS) {
559 cmn_err(CE_NOTE, "%s:%d Failed to get PCI config handle\n",
560 __func__, qede->instance);
561 return (DDI_FAILURE);
562 }
563
564 /* get register size */
565 ret = ddi_dev_regsize(qede->dip, 1, &qede->regview_size);
566 if (ret != DDI_SUCCESS) {
567 cmn_err(CE_WARN, "%s%d: failed to read reg size for bar0",
568 __func__, qede->instance);
569 goto err_exit;
570 }
571
572 /* get doorbell size */
573 ret = ddi_dev_regsize(qede->dip, 3, &qede->doorbell_size);
574 if (ret != DDI_SUCCESS) {
575 cmn_err(CE_WARN, "%s%d: failed to read doorbell size for bar2",
576 __func__, qede->instance);
577 goto err_exit;
578 }
579
580 /* map register space */
581 ret = ddi_regs_map_setup(
582 /* Pointer to the device's dev_info structure. */
583 qede->dip,
584 /*
585 * Index number to the register address space set.
586 * A value of 0 indicates PCI configuration space,
587 * while a value of 1 indicates the real start of
588 * device register sets.
589 */
590 1,
591 /*
592 * A platform-dependent value that, when added to
593 * an offset that is less than or equal to the len
594 * parameter (see below), is used for the dev_addr
595 * argument to the ddi_get, ddi_mem_get, and
596 * ddi_io_get/put routines.
597 */
598 &qede->regview,
599 /*
600 * Offset into the register address space.
601 */
602 0,
603 /* Length to be mapped. */
604 qede->regview_size,
605 /*
606 * Pointer to a device access attribute structure
607 * of this mapping.
608 */
609 &qede_regs_acc_attr,
610 /* Pointer to a data access handle. */
611 &qede->regs_handle);
612
613 if (ret != DDI_SUCCESS) {
614 cmn_err(CE_WARN, "!qede(%d): failed to map registers, err %d",
615 qede->instance, ret);
616 goto err_exit;
617 }
618
619 qede->pci_bar0_base = (unsigned long)qede->regview;
620
621 /* map doorbell space */
622 ret = ddi_regs_map_setup(qede->dip,
623 2,
624 &qede->doorbell,
625 0,
626 qede->doorbell_size,
627 &qede_regs_acc_attr,
628 &qede->doorbell_handle);
629
630 if (ret != DDI_SUCCESS) {
631 cmn_err(CE_WARN, "qede%d: failed to map doorbell, err %d",
632 qede->instance, ret);
633 goto err_exit;
634 }
635
636 qede->pci_bar2_base = (unsigned long)qede->doorbell;
637
638 return (ret);
639 err_exit:
640 qede_unconfig_pci(qede);
641 return (DDI_FAILURE);
642 }
643
644 static uint_t
qede_sp_handler(caddr_t arg1,caddr_t arg2)645 qede_sp_handler(caddr_t arg1, caddr_t arg2)
646 {
647 /*LINTED E_BAD_PTR_CAST_ALIGN*/
648 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)arg1;
649 /* LINTED E_BAD_PTR_CAST_ALIGN */
650 qede_vector_info_t *vect_info = (qede_vector_info_t *)arg2;
651 struct ecore_dev *edev = p_hwfn->p_dev;
652 qede_t *qede = (qede_t *)edev;
653
654 if ((arg1 == NULL) || (arg2 == NULL)) {
655 cmn_err(CE_WARN, "qede_sp_handler: invalid parameters");
656 /*
657 * MSIX intr should always
658 * return DDI_INTR_CLAIMED
659 */
660 return (DDI_INTR_CLAIMED);
661 }
662
663
664 vect_info->in_isr = B_TRUE;
665
666 atomic_add_64((volatile uint64_t *)&qede->intrFired, 1);
667 qede->intrSbCnt[vect_info->vect_index]++;
668
669
670 ecore_int_sp_dpc((osal_int_ptr_t)p_hwfn);
671
672 vect_info->in_isr = B_FALSE;
673
674 return (DDI_INTR_CLAIMED);
675 }
676
677 void
qede_enable_hw_intr(qede_fastpath_t * fp)678 qede_enable_hw_intr(qede_fastpath_t *fp)
679 {
680 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
681 ddi_dma_sync(fp->sb_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
682 }
683
684 void
qede_disable_hw_intr(qede_fastpath_t * fp)685 qede_disable_hw_intr(qede_fastpath_t *fp)
686 {
687 ddi_dma_sync(fp->sb_dma_handle, 0, 0, DDI_DMA_SYNC_FORKERNEL);
688 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
689 }
690
691
692 static uint_t
qede_fp_handler(caddr_t arg1,caddr_t arg2)693 qede_fp_handler(caddr_t arg1, caddr_t arg2)
694 {
695 /* LINTED E_BAD_PTR_CAST_ALIGN */
696 qede_vector_info_t *vect_info = (qede_vector_info_t *)arg1;
697 /* LINTED E_BAD_PTR_CAST_ALIGN */
698 qede_t *qede = (qede_t *)arg2;
699 qede_fastpath_t *fp;
700 qede_rx_ring_t *rx_ring;
701 mblk_t *mp;
702 int work_done = 0;
703
704 if ((vect_info == NULL) || (vect_info->fp == NULL)) {
705 cmn_err(CE_WARN, "qede_fp_handler: invalid parameters");
706 return (DDI_INTR_UNCLAIMED);
707 }
708
709 fp = (qede_fastpath_t *)vect_info->fp;
710 rx_ring = fp->rx_ring;
711
712 mutex_enter(&fp->fp_lock);
713
714 atomic_add_64((volatile uint64_t *)&qede->intrFired, 1);
715 qede->intrSbCnt[vect_info->vect_index]++;
716
717 mutex_enter(&fp->qede->drv_lock);
718 qede_disable_hw_intr(fp);
719 mutex_exit(&fp->qede->drv_lock);
720
721 mp = qede_process_fastpath(fp, QEDE_POLL_ALL,
722 QEDE_MAX_RX_PKTS_PER_INTR, &work_done);
723
724 if (mp)
725 #ifndef NO_CROSSBOW
726 {
727 mac_rx_ring(rx_ring->qede->mac_handle,
728 rx_ring->mac_ring_handle,
729 mp,
730 rx_ring->mr_gen_num);
731 }
732 #else
733 {
734 mac_rx(qede->mac_handle, NULL, mp);
735 }
736 #endif
737 else if (!mp && (work_done == 0)) {
738 qede->intrSbNoChangeCnt[vect_info->vect_index]++;
739 }
740
741
742 mutex_enter(&fp->qede->drv_lock);
743 /*
744 * The mac layer may disabled interrupts
745 * in the context of the mac_rx_ring call
746 * above while readying for poll process.
747 * In this case we do not want to
748 * enable them here.
749 */
750 if (fp->disabled_by_poll == 0) {
751 qede_enable_hw_intr(fp);
752 }
753 mutex_exit(&fp->qede->drv_lock);
754
755 mutex_exit(&fp->fp_lock);
756
757 return (work_done ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
758 }
759
760 static int
qede_disable_intr(qede_t * qede,uint32_t index)761 qede_disable_intr(qede_t *qede, uint32_t index)
762 {
763 int status;
764 qede_intr_context_t *intr_ctx = &qede->intr_ctx;
765
766 status = ddi_intr_disable(intr_ctx->intr_hdl_array[index]);
767 if (status != DDI_SUCCESS) {
768 cmn_err(CE_WARN, "qede:%s: Failed ddi_intr_enable with %s"
769 " for index %d\n",
770 __func__, qede_get_ddi_fail(status), index);
771 return (status);
772 }
773 atomic_and_32(&intr_ctx->intr_state, ~(1 << index));
774
775 return (status);
776 }
777
778 static int
qede_enable_intr(qede_t * qede,int index)779 qede_enable_intr(qede_t *qede, int index)
780 {
781 int status = 0;
782
783 qede_intr_context_t *intr_ctx = &qede->intr_ctx;
784
785 status = ddi_intr_enable(intr_ctx->intr_hdl_array[index]);
786
787 if (status != DDI_SUCCESS) {
788 cmn_err(CE_WARN, "qede:%s: Failed ddi_intr_enable with %s"
789 " for index %d\n",
790 __func__, qede_get_ddi_fail(status), index);
791 return (status);
792 }
793
794 atomic_or_32(&intr_ctx->intr_state, (1 << index));
795
796 return (status);
797 }
798
799 static int
qede_disable_all_fastpath_intrs(qede_t * qede)800 qede_disable_all_fastpath_intrs(qede_t *qede)
801 {
802 int i, status;
803
804 for (i = qede->num_hwfns; i <= qede->num_fp; i++) {
805 status = qede_disable_intr(qede, i);
806 if (status != DDI_SUCCESS) {
807 return (status);
808 }
809 }
810 return (DDI_SUCCESS);
811 }
812
813 static int
qede_enable_all_fastpath_intrs(qede_t * qede)814 qede_enable_all_fastpath_intrs(qede_t *qede)
815 {
816 int status = 0, i;
817
818 for (i = qede->num_hwfns; i <= qede->num_fp; i++) {
819 status = qede_enable_intr(qede, i);
820 if (status != DDI_SUCCESS) {
821 return (status);
822 }
823 }
824 return (DDI_SUCCESS);
825 }
826
827 static int
qede_disable_slowpath_intrs(qede_t * qede)828 qede_disable_slowpath_intrs(qede_t *qede)
829 {
830 int i, status;
831
832 for (i = 0; i < qede->num_hwfns; i++) {
833 status = qede_disable_intr(qede, i);
834 if (status != DDI_SUCCESS) {
835 return (status);
836 }
837 }
838 return (DDI_SUCCESS);
839 }
840
841 static int
qede_enable_slowpath_intrs(qede_t * qede)842 qede_enable_slowpath_intrs(qede_t *qede)
843 {
844 int i, status;
845
846 for (i = 0; i < qede->num_hwfns; i++) {
847 status = qede_enable_intr(qede, i);
848 if (status != DDI_SUCCESS) {
849 return (status);
850 }
851 }
852 return (DDI_SUCCESS);
853 }
854
855 static int
qede_prepare_edev(qede_t * qede)856 qede_prepare_edev(qede_t *qede)
857 {
858 struct ecore_dev *edev = &qede->edev;
859 struct ecore_hw_prepare_params p_params;
860
861 /*
862 * Setup the bar0 and bar2 base address
863 * in ecore_device
864 */
865 edev->regview = (void *)qede->regview;
866 edev->doorbells = (void *)qede->doorbell;
867
868 /* LINTED E_FUNC_RET_MAYBE_IGNORED2 */
869 strcpy(edev->name, qede->name);
870 ecore_init_struct(edev);
871
872 p_params.personality = ECORE_PCI_ETH;
873 p_params.drv_resc_alloc = 0;
874 p_params.chk_reg_fifo = 1;
875 p_params.initiate_pf_flr = 1;
876 //p_params->epoch = time(&epoch);
877 p_params.allow_mdump = 1;
878 p_params.b_relaxed_probe = 0;
879 return (ecore_hw_prepare(edev, &p_params));
880 }
881
882 static int
qede_config_edev(qede_t * qede)883 qede_config_edev(qede_t *qede)
884 {
885 int status, i;
886 struct ecore_dev *edev = &qede->edev;
887 struct ecore_pf_params *params;
888
889 for (i = 0; i < qede->num_hwfns; i++) {
890 struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
891 params = &p_hwfn->pf_params;
892 memset((void *)params, 0, sizeof (struct ecore_pf_params));
893 params->eth_pf_params.num_cons = 32;
894 }
895 status = ecore_resc_alloc(edev);
896 if (status != ECORE_SUCCESS) {
897 cmn_err(CE_NOTE, "%s: Could not allocate ecore resources\n",
898 __func__);
899 return (DDI_ENOMEM);
900 }
901 ecore_resc_setup(edev);
902 return (DDI_SUCCESS);
903 }
904
905 static void
qede_unconfig_intrs(qede_t * qede)906 qede_unconfig_intrs(qede_t *qede)
907 {
908 qede_intr_context_t *intr_ctx = &qede->intr_ctx;
909 qede_vector_info_t *vect_info;
910 int i, status = 0;
911
912 for (i = 0; i < intr_ctx->intr_vect_allocated; i++) {
913 vect_info = &intr_ctx->intr_vect_info[i];
914 if (intr_ctx->intr_vect_info[i].handler_added == B_TRUE) {
915 status = ddi_intr_remove_handler(
916 intr_ctx->intr_hdl_array[i]);
917 if (status != DDI_SUCCESS) {
918 cmn_err(CE_WARN, "qede:%s: Failed"
919 " ddi_intr_remove_handler with %s"
920 " for index %d\n",
921 __func__, qede_get_ddi_fail(
922 status), i);
923 }
924
925 (void) ddi_intr_free(intr_ctx->intr_hdl_array[i]);
926
927 vect_info->handler_added = B_FALSE;
928 intr_ctx->intr_hdl_array[i] = NULL;
929 }
930 }
931 }
932
933 static int
qede_config_intrs(qede_t * qede)934 qede_config_intrs(qede_t *qede)
935 {
936 qede_intr_context_t *intr_ctx = &qede->intr_ctx;
937 qede_vector_info_t *vect_info;
938 struct ecore_dev *edev = &qede->edev;
939 int i, status = DDI_FAILURE;
940 ddi_intr_handler_t *handler;
941 void *arg1, *arg2;
942
943 /*
944 * Set up the interrupt handler argument
945 * for the slowpath
946 */
947 for (i = 0; i < intr_ctx->intr_vect_allocated; i++) {
948 vect_info = &intr_ctx->intr_vect_info[i];
949 /* Store the table index */
950 vect_info->vect_index = i;
951 vect_info->qede = qede;
952 /*
953 * Store the interrupt handler's argument.
954 * This will be the a pointer to ecore_dev->hwfns
955 * for slowpath, a pointer to the fastpath
956 * structure for fastpath.
957 */
958 if (i < qede->num_hwfns) {
959 vect_info->fp = (void *)&edev->hwfns[i];
960 handler = qede_sp_handler;
961 arg1 = (caddr_t)&qede->edev.hwfns[i];
962 arg2 = (caddr_t)vect_info;
963 } else {
964 /*
965 * loop index includes hwfns
966 * so they need to be subtracked
967 * for fp_array
968 */
969 vect_info->fp =
970 (void *)&qede->fp_array[i - qede->num_hwfns];
971 handler = qede_fp_handler;
972 arg1 = (caddr_t)vect_info;
973 arg2 = (caddr_t)qede;
974 }
975
976 status = ddi_intr_add_handler(
977 intr_ctx->intr_hdl_array[i],
978 handler,
979 arg1,
980 arg2);
981 if (status != DDI_SUCCESS) {
982 cmn_err(CE_WARN, "qede:%s: Failed "
983 " ddi_intr_add_handler with %s"
984 " for index %d\n",
985 __func__, qede_get_ddi_fail(
986 status), i);
987 qede_unconfig_intrs(qede);
988 return (DDI_FAILURE);
989 }
990 vect_info->handler_added = B_TRUE;
991 }
992
993 return (status);
994 }
995
996 static void
qede_free_intrs(qede_t * qede)997 qede_free_intrs(qede_t *qede)
998 {
999 qede_intr_context_t *intr_ctx;
1000 int i, status;
1001
1002 ASSERT(qede != NULL);
1003 intr_ctx = &qede->intr_ctx;
1004 ASSERT(intr_ctx != NULL);
1005
1006 if (intr_ctx->intr_hdl_array) {
1007 for (i = 0; i < intr_ctx->intr_vect_allocated; i++) {
1008 if (intr_ctx->intr_hdl_array[i]) {
1009 status =
1010 ddi_intr_free(intr_ctx->intr_hdl_array[i]);
1011 if (status != DDI_SUCCESS) {
1012 cmn_err(CE_NOTE,
1013 "qede:%s: Failed ddi_intr_free"
1014 " with %s\n",
1015 __func__,
1016 qede_get_ddi_fail(status));
1017 }
1018 }
1019 }
1020 intr_ctx->intr_hdl_array = NULL;
1021 }
1022
1023 if (intr_ctx->intr_hdl_array) {
1024 kmem_free(intr_ctx->intr_hdl_array,
1025 intr_ctx->intr_hdl_array_size);
1026 intr_ctx->intr_hdl_array = NULL;
1027 }
1028
1029 if (intr_ctx->intr_vect_info) {
1030 kmem_free(intr_ctx->intr_vect_info,
1031 intr_ctx->intr_vect_info_array_size);
1032 intr_ctx->intr_vect_info = NULL;
1033 }
1034 }
1035
1036 static int
qede_alloc_intrs(qede_t * qede)1037 qede_alloc_intrs(qede_t *qede)
1038 {
1039 int status, type_supported, num_supported;
1040 int actual, num_available, num_to_request;
1041 dev_info_t *dip;
1042 qede_intr_context_t *intr_ctx = &qede->intr_ctx;
1043
1044 dip = qede->dip;
1045
1046 status = ddi_intr_get_supported_types(dip, &type_supported);
1047 if (status != DDI_SUCCESS) {
1048 cmn_err(CE_WARN,
1049 "qede:%s: Failed ddi_intr_get_supported_types with %s\n",
1050 __func__, qede_get_ddi_fail(status));
1051 return (status);
1052 }
1053 intr_ctx->intr_types_available = type_supported;
1054
1055 if (type_supported & DDI_INTR_TYPE_MSIX) {
1056 intr_ctx->intr_type_in_use = DDI_INTR_TYPE_MSIX;
1057
1058 /*
1059 * get the total number of vectors
1060 * supported by the device
1061 */
1062 status = ddi_intr_get_nintrs(qede->dip,
1063 DDI_INTR_TYPE_MSIX, &num_supported);
1064 if (status != DDI_SUCCESS) {
1065 cmn_err(CE_WARN,
1066 "qede:%s: Failed ddi_intr_get_nintrs with %s\n",
1067 __func__, qede_get_ddi_fail(status));
1068 return (status);
1069 }
1070 intr_ctx->intr_vect_supported = num_supported;
1071
1072 /*
1073 * get the total number of vectors
1074 * available for this instance
1075 */
1076 status = ddi_intr_get_navail(dip, DDI_INTR_TYPE_MSIX,
1077 &num_available);
1078 if (status != DDI_SUCCESS) {
1079 cmn_err(CE_WARN,
1080 "qede:%s: Failed ddi_intr_get_navail with %s\n",
1081 __func__, qede_get_ddi_fail(status));
1082 return (status);
1083 }
1084
1085 if ((num_available < intr_ctx->intr_vect_to_request) &&
1086 (num_available >= 2)) {
1087 qede->num_fp = num_available - qede->num_hwfns;
1088 cmn_err(CE_NOTE,
1089 "qede:%s: allocated %d interrupts"
1090 " requested was %d\n",
1091 __func__, num_available,
1092 intr_ctx->intr_vect_to_request);
1093 intr_ctx->intr_vect_to_request = num_available;
1094 } else if(num_available < 2) {
1095 cmn_err(CE_WARN,
1096 "qede:%s: Failed ddi_intr_get_navail with %s\n",
1097 __func__, qede_get_ddi_fail(status));
1098 return (DDI_FAILURE);
1099 }
1100
1101 intr_ctx->intr_vect_available = num_available;
1102 num_to_request = intr_ctx->intr_vect_to_request;
1103 intr_ctx->intr_hdl_array_size = num_to_request *
1104 sizeof (ddi_intr_handle_t);
1105 intr_ctx->intr_vect_info_array_size = num_to_request *
1106 sizeof (qede_vector_info_t);
1107
1108 /* Allocate an array big enough for maximum supported */
1109 intr_ctx->intr_hdl_array = kmem_zalloc(
1110 intr_ctx->intr_hdl_array_size, KM_SLEEP);
1111
1112 intr_ctx->intr_vect_info = kmem_zalloc(
1113 intr_ctx->intr_vect_info_array_size, KM_SLEEP);
1114
1115 /*
1116 * Use strict allocation. It will fail if we do not get
1117 * exactly what we want. Later we can shift through with
1118 * power of two like this:
1119 * for (i = intr_ctx->intr_requested; i > 0; i >>= 1)
1120 * (Though we would need to account for the slowpath vector)
1121 */
1122 status = ddi_intr_alloc(qede->dip,
1123 intr_ctx->intr_hdl_array,
1124 DDI_INTR_TYPE_MSIX,
1125 0,
1126 num_to_request,
1127 &actual,
1128 DDI_INTR_ALLOC_STRICT);
1129 if (status != DDI_SUCCESS) {
1130 cmn_err(CE_WARN,
1131 "qede:%s: Failed to allocate"
1132 " %d interrupts with %s\n",
1133 __func__, num_to_request,
1134 qede_get_ddi_fail(status));
1135 cmn_err(CE_WARN,
1136 "qede:%s: Only %d interrupts available.\n",
1137 __func__, actual);
1138 goto err_exit;
1139 }
1140 intr_ctx->intr_vect_allocated = num_to_request;
1141
1142 status = ddi_intr_get_pri(intr_ctx->intr_hdl_array[0],
1143 &intr_ctx->intr_pri);
1144 if (status != DDI_SUCCESS) {
1145 cmn_err(CE_WARN,
1146 "qede:%s: Failed ddi_intr_get_pri with %s\n",
1147 __func__, qede_get_ddi_fail(status));
1148 goto err_exit;
1149 }
1150
1151 status = ddi_intr_get_cap(intr_ctx->intr_hdl_array[0],
1152 &intr_ctx->intr_cap);
1153 if (status != DDI_SUCCESS) {
1154 cmn_err(CE_WARN,
1155 "qede:%s: Failed ddi_intr_get_cap with %s\n",
1156 __func__, qede_get_ddi_fail(status));
1157 goto err_exit;
1158 }
1159
1160 } else {
1161 /* For now we only support type MSIX */
1162 cmn_err(CE_WARN,
1163 "qede:%s: Failed to allocate intr_ctx->intr_hdl_array\n",
1164 __func__);
1165 return (DDI_FAILURE);
1166 }
1167
1168 intr_ctx->intr_mode = ECORE_INT_MODE_MSIX;
1169 return (status);
1170 err_exit:
1171 qede_free_intrs(qede);
1172 return (status);
1173 }
1174
1175 static void
1176 /* LINTED E_FUNC_ARG_UNUSED */
qede_unconfig_fm(qede_t * qede)1177 qede_unconfig_fm(qede_t *qede)
1178 {
1179 }
1180
1181 /* LINTED E_FUNC_ARG_UNUSED */
1182 static int
qede_fm_err_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)1183 qede_fm_err_cb(dev_info_t *dip, ddi_fm_error_t *err,
1184 const void *impl_data)
1185 {
1186 pci_ereport_post(dip, err, NULL);
1187 return (err->fme_status);
1188 }
1189
1190
1191 static int
qede_config_fm(qede_t * qede)1192 qede_config_fm(qede_t * qede)
1193 {
1194 ddi_iblock_cookie_t iblk;
1195
1196 cmn_err(CE_NOTE, "Entered qede_config_fm\n");
1197 qede_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1198 qede_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1199 qede_buf_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1200 qede_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1201 qede_gen_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1202 qede_tx_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1203 qede_dma_attr_desc.dma_attr_flags = DDI_DMA_FLAGERR;
1204 qede_dma_attr_txbuf.dma_attr_flags = DDI_DMA_FLAGERR;
1205 qede_dma_attr_rxbuf.dma_attr_flags = DDI_DMA_FLAGERR;
1206 qede_dma_attr_cmddesc.dma_attr_flags = DDI_DMA_FLAGERR;
1207 qede_gen_dma_attr_desc.dma_attr_flags = DDI_DMA_FLAGERR;
1208 qede_buf2k_dma_attr_txbuf.dma_attr_flags = DDI_DMA_FLAGERR;
1209
1210 ddi_fm_init(qede->dip, &qede->fm_cap, &iblk);
1211
1212 if (DDI_FM_EREPORT_CAP(qede->fm_cap) ||
1213 DDI_FM_ERRCB_CAP(qede->fm_cap)) {
1214 pci_ereport_setup(qede->dip);
1215 }
1216
1217 if (DDI_FM_ERRCB_CAP(qede->fm_cap)) {
1218 ddi_fm_handler_register(qede->dip,
1219 qede_fm_err_cb, (void *)qede);
1220 }
1221 return (DDI_SUCCESS);
1222
1223 }
1224
1225 int
qede_dma_mem_alloc(qede_t * qede,int size,uint_t dma_flags,caddr_t * address,ddi_dma_cookie_t * cookie,ddi_dma_handle_t * dma_handle,ddi_acc_handle_t * handlep,ddi_dma_attr_t * dma_attr,ddi_device_acc_attr_t * dev_acc_attr)1226 qede_dma_mem_alloc(qede_t *qede,
1227 int size, uint_t dma_flags, caddr_t *address, ddi_dma_cookie_t *cookie,
1228 ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *handlep,
1229 ddi_dma_attr_t *dma_attr, ddi_device_acc_attr_t *dev_acc_attr)
1230 {
1231 int err;
1232 uint32_t ncookies;
1233 size_t ring_len;
1234
1235 *dma_handle = NULL;
1236
1237 if (size <= 0) {
1238 return (DDI_ENOMEM);
1239 }
1240
1241 err = ddi_dma_alloc_handle(qede->dip,
1242 dma_attr,
1243 DDI_DMA_DONTWAIT, NULL, dma_handle);
1244 if (err != DDI_SUCCESS) {
1245 cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1246 "ddi_dma_alloc_handle FAILED: %d", qede->instance, err);
1247 *dma_handle = NULL;
1248 return (DDI_ENOMEM);
1249 }
1250
1251 err = ddi_dma_mem_alloc(*dma_handle,
1252 size, dev_acc_attr,
1253 dma_flags,
1254 DDI_DMA_DONTWAIT, NULL, address, &ring_len,
1255 handlep);
1256 if (err != DDI_SUCCESS) {
1257 cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1258 "ddi_dma_mem_alloc FAILED: %d, request size: %d",
1259 qede->instance, err, size);
1260 ddi_dma_free_handle(dma_handle);
1261 *dma_handle = NULL;
1262 *handlep = NULL;
1263 return (DDI_ENOMEM);
1264 }
1265
1266 if (ring_len < size) {
1267 cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1268 "could not allocate required: %d, request size: %d",
1269 qede->instance, err, size);
1270 ddi_dma_mem_free(handlep);
1271 ddi_dma_free_handle(dma_handle);
1272 *dma_handle = NULL;
1273 *handlep = NULL;
1274 return (DDI_FAILURE);
1275 }
1276
1277 (void) memset(*address, 0, size);
1278
1279 if (((err = ddi_dma_addr_bind_handle(*dma_handle,
1280 NULL, *address, ring_len,
1281 dma_flags,
1282 DDI_DMA_DONTWAIT, NULL,
1283 cookie, &ncookies)) != DDI_DMA_MAPPED) ||
1284 (ncookies != 1)) {
1285 cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1286 "ddi_dma_addr_bind_handle Failed: %d",
1287 qede->instance, err);
1288 ddi_dma_mem_free(handlep);
1289 ddi_dma_free_handle(dma_handle);
1290 *dma_handle = NULL;
1291 *handlep = NULL;
1292 return (DDI_FAILURE);
1293 }
1294
1295 return (DDI_SUCCESS);
1296 }
1297
1298 void
qede_pci_free_consistent(ddi_dma_handle_t * dma_handle,ddi_acc_handle_t * acc_handle)1299 qede_pci_free_consistent(ddi_dma_handle_t *dma_handle,
1300 ddi_acc_handle_t *acc_handle)
1301 {
1302 int err;
1303
1304 if (*dma_handle != NULL) {
1305 err = ddi_dma_unbind_handle(*dma_handle);
1306 if (err != DDI_SUCCESS) {
1307 cmn_err(CE_WARN, "!pci_free_consistent: "
1308 "Error unbinding memory, err %d", err);
1309 return;
1310 }
1311 } else {
1312 goto exit;
1313 }
1314 ddi_dma_mem_free(acc_handle);
1315 ddi_dma_free_handle(dma_handle);
1316 exit:
1317 *dma_handle = NULL;
1318 *acc_handle = NULL;
1319 }
1320
1321 static int
qede_vport_stop(qede_t * qede)1322 qede_vport_stop(qede_t *qede)
1323 {
1324 struct ecore_dev *edev = &qede->edev;
1325 struct ecore_hwfn *p_hwfn;
1326 int i, status = ECORE_BUSY;
1327
1328 for (i = 0; i < edev->num_hwfns; i++) {
1329 p_hwfn = &edev->hwfns[i];
1330
1331 if (qede->vport_state[i] !=
1332 QEDE_VPORT_STARTED) {
1333 qede_info(qede, "vport %d not started", i);
1334 continue;
1335 }
1336
1337 status = ecore_sp_vport_stop(p_hwfn,
1338 p_hwfn->hw_info.opaque_fid,
1339 i); /* vport needs fix */
1340 if (status != ECORE_SUCCESS) {
1341 cmn_err(CE_WARN, "!qede_vport_stop: "
1342 "FAILED for hwfn%d ", i);
1343 return (DDI_FAILURE);
1344 }
1345 cmn_err(CE_WARN, "!qede_vport_stop: "
1346 "SUCCESS for hwfn%d ", i);
1347
1348 qede->vport_state[i] =
1349 QEDE_VPORT_STOPPED;
1350 }
1351
1352 return (status);
1353 }
1354
1355 static uint8_t
qede_get_active_rss_params(qede_t * qede,u8 hwfn_id)1356 qede_get_active_rss_params(qede_t *qede, u8 hwfn_id)
1357 {
1358 struct ecore_rss_params rss_params;
1359 qede_fastpath_t *fp;
1360 int i;
1361 const uint64_t hash_key[] =
1362 {
1363 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
1364 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
1365 0x255b0ec26d5a56daULL
1366 };
1367 uint8_t enable_rss = 0;
1368
1369 bzero(&rss_params, sizeof (rss_params));
1370 if (qede->num_fp > 1) {
1371 qede_info(qede, "Configuring RSS parameters");
1372 enable_rss = 1;
1373 } else {
1374 qede_info(qede, "RSS configuration not needed");
1375 enable_rss = 0;
1376 goto exit;
1377 }
1378
1379 rss_params.update_rss_config = 1;
1380 rss_params.rss_enable = 1;
1381 rss_params.update_rss_capabilities = 1;
1382 rss_params.update_rss_ind_table = 1;
1383 rss_params.update_rss_key = 1;
1384
1385 rss_params.rss_caps = ECORE_RSS_IPV4 |
1386 ECORE_RSS_IPV6 |
1387 ECORE_RSS_IPV4_TCP |
1388 ECORE_RSS_IPV6_TCP |
1389 ECORE_RSS_IPV4_UDP |
1390 ECORE_RSS_IPV6_UDP;
1391
1392 rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
1393
1394 bcopy(&hash_key[0], &rss_params.rss_key[0],
1395 sizeof (rss_params.rss_key));
1396
1397 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1398 fp = &qede->fp_array[i % qede->num_fp];
1399 rss_params.rss_ind_table[i] = (void *)(fp->rx_ring->p_cid);
1400 }
1401 exit:
1402 bcopy(&rss_params, &qede->rss_params[hwfn_id], sizeof (rss_params));
1403 return (enable_rss);
1404 }
1405
1406 static int
qede_vport_update(qede_t * qede,enum qede_vport_state state)1407 qede_vport_update(qede_t *qede,
1408 enum qede_vport_state state)
1409 {
1410 struct ecore_dev *edev = &qede->edev;
1411 struct ecore_hwfn *p_hwfn;
1412 struct ecore_sp_vport_update_params *vport_params;
1413 struct ecore_sge_tpa_params tpa_params;
1414 int status = DDI_SUCCESS;
1415 bool new_state;
1416 uint8_t i;
1417
1418 cmn_err(CE_NOTE, "qede_vport_update: "
1419 "Enter, state = %s%s%s%s%s",
1420 state == QEDE_VPORT_STARTED ? "QEDE_VPORT_STARTED" : "",
1421 state == QEDE_VPORT_ON ? "QEDE_VPORT_ON" : "",
1422 state == QEDE_VPORT_OFF ? "QEDE_VPORT_OFF" : "",
1423 state == QEDE_VPORT_STOPPED ? "QEDE_VPORT_STOPPED" : "",
1424 state == QEDE_VPORT_UNKNOWN ? "" : "");
1425
1426 /*
1427 * Update only does on and off.
1428 * For now we combine TX and RX
1429 * together. Later we can split them
1430 * and set other params as well.
1431 */
1432 if (state == QEDE_VPORT_ON) {
1433 new_state = B_TRUE;
1434 } else if (state == QEDE_VPORT_OFF) {
1435 new_state = B_FALSE;
1436 } else {
1437 cmn_err(CE_WARN, "qede_vport_update: "
1438 "invalid, state = %d", state);
1439 return (DDI_EINVAL);
1440 }
1441
1442 for (i = 0; i < edev->num_hwfns; i++) {
1443 p_hwfn = &edev->hwfns[i];
1444 vport_params = &qede->vport_params[i];
1445
1446 vport_params->opaque_fid =
1447 p_hwfn->hw_info.opaque_fid;
1448 vport_params->vport_id =
1449 i;
1450
1451 vport_params->update_vport_active_rx_flg =
1452 1;
1453 if (new_state == B_TRUE)
1454 vport_params->vport_active_rx_flg = 1;
1455 else
1456 vport_params->vport_active_rx_flg = 0;
1457
1458 vport_params->update_vport_active_tx_flg =
1459 1;
1460 if (new_state == B_TRUE)
1461 vport_params->vport_active_tx_flg = 1;
1462 else
1463 vport_params->vport_active_tx_flg = 0;
1464
1465 vport_params->update_inner_vlan_removal_flg =
1466 0;
1467 vport_params->inner_vlan_removal_flg =
1468 0;
1469 vport_params->update_default_vlan_enable_flg =
1470 0;
1471 vport_params->default_vlan_enable_flg =
1472 0;
1473 vport_params->update_default_vlan_flg =
1474 1;
1475 vport_params->default_vlan =
1476 0;
1477 vport_params->update_tx_switching_flg =
1478 0;
1479 vport_params->tx_switching_flg =
1480 0;
1481 vport_params->update_approx_mcast_flg =
1482 0;
1483 vport_params->update_anti_spoofing_en_flg =
1484 0;
1485 vport_params->anti_spoofing_en = 0;
1486 vport_params->update_accept_any_vlan_flg =
1487 1;
1488 vport_params->accept_any_vlan = 1;
1489
1490 vport_params->accept_flags.update_rx_mode_config = 1;
1491 vport_params->accept_flags.update_tx_mode_config = 1;
1492 vport_params->accept_flags.rx_accept_filter =
1493 ECORE_ACCEPT_BCAST |
1494 ECORE_ACCEPT_UCAST_UNMATCHED |
1495 ECORE_ACCEPT_MCAST_UNMATCHED;
1496 vport_params->accept_flags.tx_accept_filter =
1497 ECORE_ACCEPT_BCAST |
1498 ECORE_ACCEPT_UCAST_UNMATCHED |
1499 ECORE_ACCEPT_MCAST_UNMATCHED;
1500
1501 vport_params->sge_tpa_params = NULL;
1502
1503 if (qede->lro_enable &&
1504 (new_state == B_TRUE)) {
1505 qede_print("!%s(%d): enabling LRO ",
1506 __func__, qede->instance);
1507
1508 memset(&tpa_params, 0,
1509 sizeof (struct ecore_sge_tpa_params));
1510 tpa_params.max_buffers_per_cqe = 5;
1511 tpa_params.update_tpa_en_flg = 1;
1512 tpa_params.tpa_ipv4_en_flg = 1;
1513 tpa_params.tpa_ipv6_en_flg = 1;
1514 tpa_params.tpa_ipv4_tunn_en_flg = 0;
1515 tpa_params.tpa_ipv6_tunn_en_flg = 0;
1516 tpa_params.update_tpa_param_flg = 1;
1517 tpa_params.tpa_pkt_split_flg = 0;
1518 tpa_params.tpa_hdr_data_split_flg = 0;
1519 tpa_params.tpa_gro_consistent_flg = 0;
1520 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
1521 tpa_params.tpa_max_size = 65535;
1522 tpa_params.tpa_min_size_to_start = qede->mtu/2;
1523 tpa_params.tpa_min_size_to_cont = qede->mtu/2;
1524 vport_params->sge_tpa_params = &tpa_params;
1525 }
1526
1527 /*
1528 * Get the rss_params to be configured
1529 */
1530 if (qede_get_active_rss_params(qede, i /* hwfn id */)) {
1531 vport_params->rss_params = &qede->rss_params[i];
1532 } else {
1533 vport_params->rss_params = NULL;
1534 }
1535
1536 status = ecore_sp_vport_update(p_hwfn,
1537 vport_params,
1538 ECORE_SPQ_MODE_EBLOCK,
1539 NULL);
1540
1541 if (status != ECORE_SUCCESS) {
1542 cmn_err(CE_WARN, "ecore_sp_vport_update: "
1543 "FAILED for hwfn%d "
1544 " with ", i);
1545 return (DDI_FAILURE);
1546 }
1547 cmn_err(CE_NOTE, "!ecore_sp_vport_update: "
1548 "SUCCESS for hwfn%d ", i);
1549
1550
1551 }
1552 return (DDI_SUCCESS);
1553 }
1554
1555
1556 static int
qede_vport_start(qede_t * qede)1557 qede_vport_start(qede_t *qede)
1558 {
1559 struct ecore_dev *edev = &qede->edev;
1560 struct ecore_hwfn *p_hwfn;
1561 struct ecore_sp_vport_start_params params;
1562 uint8_t i;
1563 int status = ECORE_BUSY;
1564
1565 for (i = 0; i < edev->num_hwfns; i++) {
1566 p_hwfn = &edev->hwfns[i];
1567 if ((qede->vport_state[i] !=
1568 QEDE_VPORT_UNKNOWN) &&
1569 (qede->vport_state[i] !=
1570 QEDE_VPORT_STOPPED)) {
1571 continue;
1572 }
1573
1574 params.tpa_mode = ECORE_TPA_MODE_NONE;
1575 params.remove_inner_vlan = 0;
1576 params.tx_switching = 0;
1577 params.handle_ptp_pkts = 0;
1578 params.only_untagged = 0;
1579 params.drop_ttl0 = 1;
1580 params.max_buffers_per_cqe = 16;
1581 params.concrete_fid = p_hwfn->hw_info.concrete_fid;
1582 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1583 params.vport_id = i;
1584 params.mtu = qede->mtu;
1585 status = ecore_sp_vport_start(p_hwfn, ¶ms);
1586 if (status != ECORE_SUCCESS) {
1587 cmn_err(CE_WARN, "qede_vport_start: "
1588 "FAILED for hwfn%d", i);
1589 return (DDI_FAILURE);
1590 }
1591 cmn_err(CE_NOTE, "!ecore_sp_vport_start: "
1592 "SUCCESS for hwfn%d ", i);
1593
1594 ecore_hw_start_fastpath(p_hwfn);
1595 qede->vport_state[i] = QEDE_VPORT_STARTED;
1596 }
1597 ecore_reset_vport_stats(edev);
1598 return (status);
1599 }
1600
1601 void
qede_update_rx_q_producer(qede_rx_ring_t * rx_ring)1602 qede_update_rx_q_producer(qede_rx_ring_t *rx_ring)
1603 {
1604 u16 bd_prod = ecore_chain_get_prod_idx(&rx_ring->rx_bd_ring);
1605 u16 cqe_prod = ecore_chain_get_prod_idx(&rx_ring->rx_cqe_ring);
1606 /* LINTED E_FUNC_SET_NOT_USED */
1607 struct eth_rx_prod_data rx_prod_cmd = { 0 };
1608
1609
1610 rx_prod_cmd.bd_prod = HOST_TO_LE_32(bd_prod);
1611 rx_prod_cmd.cqe_prod = HOST_TO_LE_32(cqe_prod);
1612 UPDATE_RX_PROD(rx_ring, rx_prod_cmd);
1613 }
1614
1615 static int
qede_fastpath_stop_queues(qede_t * qede)1616 qede_fastpath_stop_queues(qede_t *qede)
1617 {
1618 int i, j;
1619 int status = DDI_FAILURE;
1620 struct ecore_dev *edev;
1621 struct ecore_hwfn *p_hwfn;
1622 struct ecore_queue_cid *p_tx_cid, *p_rx_cid;
1623
1624 qede_fastpath_t *fp;
1625 qede_rx_ring_t *rx_ring;
1626 qede_tx_ring_t *tx_ring;
1627
1628 ASSERT(qede != NULL);
1629 /* ASSERT(qede->edev != NULL); */
1630
1631 edev = &qede->edev;
1632
1633 status = qede_vport_update(qede, QEDE_VPORT_OFF);
1634 if (status != DDI_SUCCESS) {
1635 cmn_err(CE_WARN, "FAILED to "
1636 "update vports");
1637 return (DDI_FAILURE);
1638 }
1639
1640 for (i = 0; i < qede->num_fp; i++) {
1641 fp = &qede->fp_array[i];
1642 rx_ring = fp->rx_ring;
1643 p_hwfn = &edev->hwfns[fp->fp_hw_eng_index];
1644 for (j = 0; j < qede->num_tc; j++) {
1645 tx_ring = fp->tx_ring[j];
1646 if (tx_ring->queue_started == B_TRUE) {
1647 cmn_err(CE_WARN, "Stopping tx queue "
1648 "%d:%d. ", i, j);
1649 p_tx_cid = tx_ring->p_cid;
1650 status = ecore_eth_tx_queue_stop(p_hwfn,
1651 (void *)p_tx_cid);
1652 if (status != ECORE_SUCCESS) {
1653 cmn_err(CE_WARN, "FAILED to "
1654 "stop tx queue %d:%d", i, j);
1655 return (DDI_FAILURE);
1656 }
1657 tx_ring->queue_started = B_FALSE;
1658 cmn_err(CE_NOTE, "tx_ring %d:%d stopped\n", i,
1659 j);
1660 }
1661 }
1662
1663 if (rx_ring->queue_started == B_TRUE) {
1664 cmn_err(CE_WARN, "Stopping rx queue "
1665 "%d. ", i);
1666 p_rx_cid = rx_ring->p_cid;
1667 status = ecore_eth_rx_queue_stop(p_hwfn,
1668 (void *)p_rx_cid, B_TRUE, B_FALSE);
1669 if (status != ECORE_SUCCESS) {
1670 cmn_err(CE_WARN, "FAILED to "
1671 "stop rx queue %d "
1672 "with ecore status %s",
1673 i, qede_get_ecore_fail(status));
1674 return (DDI_FAILURE);
1675 }
1676 rx_ring->queue_started = B_FALSE;
1677 cmn_err(CE_NOTE, "rx_ring%d stopped\n", i);
1678 }
1679 }
1680
1681 status = qede_vport_stop(qede);
1682 if (status != DDI_SUCCESS) {
1683 cmn_err(CE_WARN, "qede_vport_stop "
1684 "FAILED to stop vports");
1685 return (DDI_FAILURE);
1686 }
1687
1688 ecore_hw_stop_fastpath(edev);
1689
1690 return (DDI_SUCCESS);
1691 }
1692
1693 static int
qede_fastpath_start_queues(qede_t * qede)1694 qede_fastpath_start_queues(qede_t *qede)
1695 {
1696 int i, j;
1697 int status = DDI_FAILURE;
1698 struct ecore_dev *edev;
1699 struct ecore_hwfn *p_hwfn;
1700 struct ecore_queue_start_common_params params;
1701 struct ecore_txq_start_ret_params tx_ret_params;
1702 struct ecore_rxq_start_ret_params rx_ret_params;
1703 qede_fastpath_t *fp;
1704 qede_rx_ring_t *rx_ring;
1705 qede_tx_ring_t *tx_ring;
1706 dma_addr_t p_phys_table;
1707 u16 page_cnt;
1708
1709 ASSERT(qede != NULL);
1710 /* ASSERT(qede->edev != NULL); */
1711 edev = &qede->edev;
1712
1713 status = qede_vport_start(qede);
1714 if (status != DDI_SUCCESS) {
1715 cmn_err(CE_WARN, "Failed to "
1716 "start vports");
1717 return (DDI_FAILURE);
1718 }
1719
1720 for (i = 0; i < qede->num_fp; i++) {
1721 fp = &qede->fp_array[i];
1722 rx_ring = fp->rx_ring;
1723 p_hwfn = &edev->hwfns[fp->fp_hw_eng_index];
1724
1725 params.vport_id = fp->vport_id;
1726 params.queue_id = fp->rx_queue_index;
1727 params.stats_id = fp->stats_id;
1728 params.p_sb = fp->sb_info;
1729 params.sb_idx = RX_PI;
1730 p_phys_table = ecore_chain_get_pbl_phys(&rx_ring->rx_cqe_ring);
1731 page_cnt = ecore_chain_get_page_cnt(&rx_ring->rx_cqe_ring);
1732
1733 status = ecore_eth_rx_queue_start(p_hwfn,
1734 p_hwfn->hw_info.opaque_fid,
1735 ¶ms,
1736 qede->rx_buf_size,
1737 rx_ring->rx_bd_ring.p_phys_addr,
1738 p_phys_table,
1739 page_cnt,
1740 &rx_ret_params);
1741
1742 rx_ring->hw_rxq_prod_addr = rx_ret_params.p_prod;
1743 rx_ring->p_cid = rx_ret_params.p_handle;
1744 if (status != DDI_SUCCESS) {
1745 cmn_err(CE_WARN, "ecore_sp_eth_rx_queue_start "
1746 "FAILED for rxq%d", i);
1747 return (DDI_FAILURE);
1748 }
1749 rx_ring->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
1750
1751 OSAL_MSLEEP(20);
1752 *rx_ring->hw_cons_ptr = 0;
1753
1754 qede_update_rx_q_producer(rx_ring);
1755 rx_ring->queue_started = B_TRUE;
1756 cmn_err(CE_NOTE, "rx_ring%d started\n", i);
1757
1758 for (j = 0; j < qede->num_tc; j++) {
1759 tx_ring = fp->tx_ring[j];
1760
1761 params.vport_id = fp->vport_id;
1762 params.queue_id = tx_ring->tx_queue_index;
1763 params.stats_id = fp->stats_id;
1764 params.p_sb = fp->sb_info;
1765 params.sb_idx = TX_PI(j);
1766
1767 p_phys_table = ecore_chain_get_pbl_phys(
1768 &tx_ring->tx_bd_ring);
1769 page_cnt = ecore_chain_get_page_cnt(
1770 &tx_ring->tx_bd_ring);
1771 status = ecore_eth_tx_queue_start(p_hwfn,
1772 p_hwfn->hw_info.opaque_fid,
1773 ¶ms,
1774 0,
1775 p_phys_table,
1776 page_cnt,
1777 &tx_ret_params);
1778 tx_ring->doorbell_addr = tx_ret_params.p_doorbell;
1779 tx_ring->p_cid = tx_ret_params.p_handle;
1780 if (status != DDI_SUCCESS) {
1781 cmn_err(CE_WARN, "ecore_sp_eth_tx_queue_start "
1782 "FAILED for txq%d:%d", i,j);
1783 return (DDI_FAILURE);
1784 }
1785 tx_ring->hw_cons_ptr =
1786 &fp->sb_info->sb_virt->pi_array[TX_PI(j)];
1787 /* LINTED E_CONSTANT_CONDITION */
1788 SET_FIELD(tx_ring->tx_db.data.params,
1789 ETH_DB_DATA_DEST, DB_DEST_XCM);
1790 /* LINTED E_CONSTANT_CONDITION */
1791 SET_FIELD(tx_ring->tx_db.data.params,
1792 ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1793 /* LINTED E_CONSTANT_CONDITION */
1794 SET_FIELD(tx_ring->tx_db.data.params,
1795 ETH_DB_DATA_AGG_VAL_SEL, DQ_XCM_ETH_TX_BD_PROD_CMD);
1796 tx_ring->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
1797 tx_ring->queue_started = B_TRUE;
1798 cmn_err(CE_NOTE, "tx_ring %d:%d started\n", i, j);
1799 }
1800 }
1801
1802 status = qede_vport_update(qede, QEDE_VPORT_ON);
1803 if (status != DDI_SUCCESS) {
1804 cmn_err(CE_WARN, "Failed to "
1805 "update vports");
1806 return (DDI_FAILURE);
1807 }
1808 return (status);
1809 }
1810
1811 static void
qede_free_mag_elem(qede_rx_ring_t * rx_ring,qede_rx_buffer_t * rx_buffer,struct eth_rx_bd * bd)1812 qede_free_mag_elem(qede_rx_ring_t *rx_ring, qede_rx_buffer_t *rx_buffer,
1813 struct eth_rx_bd *bd)
1814 {
1815 int i;
1816
1817 if (bd != NULL) {
1818 bzero(bd, sizeof (*bd));
1819 }
1820
1821 if (rx_buffer->mp != NULL) {
1822 freemsg(rx_buffer->mp);
1823 rx_buffer->mp = NULL;
1824 }
1825 }
1826
1827 static void
qede_free_lro_rx_buffers(qede_rx_ring_t * rx_ring)1828 qede_free_lro_rx_buffers(qede_rx_ring_t *rx_ring)
1829 {
1830 int i, j;
1831 qede_lro_info_t *lro_info;
1832
1833 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1834 lro_info = &rx_ring->lro_info[i];
1835 if (lro_info->agg_state == QEDE_AGG_STATE_NONE) {
1836 continue;
1837 }
1838 for (j = 0; j < QEDE_MAX_BD_PER_AGG; j++) {
1839 if (lro_info->rx_buffer[j] == NULL) {
1840 break;
1841 }
1842 qede_recycle_copied_rx_buffer(
1843 lro_info->rx_buffer[j]);
1844 lro_info->rx_buffer[j] = NULL;
1845 }
1846 lro_info->agg_state = QEDE_AGG_STATE_NONE;
1847 }
1848 }
1849
1850 static void
qede_free_rx_buffers_legacy(qede_t * qede,qede_rx_buf_area_t * rx_buf_area)1851 qede_free_rx_buffers_legacy(qede_t *qede, qede_rx_buf_area_t *rx_buf_area)
1852 {
1853 int i, j;
1854 u32 ref_cnt, bufs_per_page;
1855 qede_rx_buffer_t *rx_buffer, *first_rx_buf_in_page = 0;
1856 qede_rx_ring_t *rx_ring = rx_buf_area->rx_ring;
1857 bool free_rx_buffer;
1858
1859 bufs_per_page = rx_buf_area->bufs_per_page;
1860
1861 rx_buffer = &rx_buf_area->rx_buf_pool[0];
1862
1863 if (rx_buf_area) {
1864 for (i = 0; i < rx_ring->rx_buf_count; i += bufs_per_page) {
1865 free_rx_buffer = B_TRUE;
1866 for (j = 0; j < bufs_per_page; j++) {
1867 if (!j) {
1868 first_rx_buf_in_page = rx_buffer;
1869 }
1870 if (rx_buffer->ref_cnt != 0) {
1871 ref_cnt = atomic_dec_32_nv(
1872 &rx_buffer->ref_cnt);
1873 if (ref_cnt == 0) {
1874 /*
1875 * Buffer is now
1876 * completely free
1877 */
1878 if (rx_buffer->mp) {
1879 freemsg(rx_buffer->mp);
1880 rx_buffer->mp = NULL;
1881 }
1882 } else {
1883 /*
1884 * Since Buffer still
1885 * held up in Stack,
1886 * we cant free the whole page
1887 */
1888 free_rx_buffer = B_FALSE;
1889 }
1890 }
1891 rx_buffer++;
1892 }
1893
1894 if (free_rx_buffer == B_TRUE) {
1895 qede_pci_free_consistent(
1896 &first_rx_buf_in_page->dma_info.dma_handle,
1897 &first_rx_buf_in_page->dma_info.acc_handle);
1898 }
1899 }
1900
1901 /*
1902 * If no more buffers are with the stack
1903 * then free the buf pools
1904 */
1905 if (rx_buf_area->buf_upstream == 0) {
1906 mutex_destroy(&rx_buf_area->active_buf_list.lock);
1907 mutex_destroy(&rx_buf_area->passive_buf_list.lock);
1908
1909 kmem_free(rx_buf_area, sizeof (qede_rx_buf_area_t));
1910 rx_buf_area = NULL;
1911 if (atomic_cas_32(&qede->detach_unsafe, 2, 2)) {
1912 atomic_dec_32(&qede->detach_unsafe);
1913 }
1914
1915 }
1916 }
1917 }
1918
1919
1920 static void
qede_free_rx_buffers(qede_t * qede,qede_rx_ring_t * rx_ring)1921 qede_free_rx_buffers(qede_t *qede, qede_rx_ring_t *rx_ring)
1922 {
1923 qede_free_lro_rx_buffers(rx_ring);
1924 qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
1925 qede_free_rx_buffers_legacy(qede, rx_buf_area);
1926 }
1927
1928 static void
qede_free_rx_ring_phys(qede_t * qede,qede_fastpath_t * fp)1929 qede_free_rx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
1930 {
1931 qede_rx_ring_t *rx_ring;
1932
1933 ASSERT(qede != NULL);
1934 ASSERT(fp != NULL);
1935
1936
1937 rx_ring = fp->rx_ring;
1938 rx_ring->rx_buf_area->inactive = 1;
1939
1940 qede_free_rx_buffers(qede, rx_ring);
1941
1942
1943 if (rx_ring->rx_bd_ring.p_virt_addr) {
1944 ecore_chain_free(&qede->edev, &rx_ring->rx_bd_ring);
1945 rx_ring->rx_bd_ring.p_virt_addr = NULL;
1946 }
1947
1948 if (rx_ring->rx_cqe_ring.p_virt_addr) {
1949 ecore_chain_free(&qede->edev, &rx_ring->rx_cqe_ring);
1950 rx_ring->rx_cqe_ring.p_virt_addr = NULL;
1951 if (rx_ring->rx_cqe_ring.pbl_sp.p_virt_table) {
1952 rx_ring->rx_cqe_ring.pbl_sp.p_virt_table = NULL;
1953 }
1954 }
1955 rx_ring->hw_cons_ptr = NULL;
1956 rx_ring->hw_rxq_prod_addr = NULL;
1957 rx_ring->sw_rx_cons = 0;
1958 rx_ring->sw_rx_prod = 0;
1959
1960 }
1961
1962
1963 static int
qede_init_bd(qede_t * qede,qede_rx_ring_t * rx_ring)1964 qede_init_bd(qede_t *qede, qede_rx_ring_t *rx_ring)
1965 {
1966 struct eth_rx_bd *bd = NULL;
1967 int ret = DDI_SUCCESS;
1968 int i;
1969 qede_rx_buffer_t *rx_buffer;
1970 qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
1971 qede_rx_buf_list_t *active_buf_list = &rx_buf_area->active_buf_list;
1972
1973 for (i = 0; i < rx_ring->rx_buf_count; i++) {
1974 rx_buffer = &rx_buf_area->rx_buf_pool[i];
1975 active_buf_list->buf_list[i] = rx_buffer;
1976 active_buf_list->num_entries++;
1977 bd = ecore_chain_produce(&rx_ring->rx_bd_ring);
1978 if (bd == NULL) {
1979 qede_print_err("!%s(%d): invalid NULL bd in "
1980 "rx_bd_ring", __func__, qede->instance);
1981 ret = DDI_FAILURE;
1982 goto err;
1983 }
1984
1985 bd->addr.lo = HOST_TO_LE_32(U64_LO(
1986 rx_buffer->dma_info.phys_addr));
1987 bd->addr.hi = HOST_TO_LE_32(U64_HI(
1988 rx_buffer->dma_info.phys_addr));
1989
1990 }
1991 active_buf_list->tail = 0;
1992 err:
1993 return (ret);
1994 }
1995
1996
1997 qede_rx_buffer_t *
qede_get_from_active_list(qede_rx_ring_t * rx_ring,uint32_t * num_entries)1998 qede_get_from_active_list(qede_rx_ring_t *rx_ring,
1999 uint32_t *num_entries)
2000 {
2001 qede_rx_buffer_t *rx_buffer;
2002 qede_rx_buf_list_t *active_buf_list =
2003 &rx_ring->rx_buf_area->active_buf_list;
2004 u16 head = active_buf_list->head;
2005
2006 rx_buffer = active_buf_list->buf_list[head];
2007 active_buf_list->buf_list[head] = NULL;
2008 head = (head + 1) & RX_RING_MASK;
2009
2010 if (rx_buffer) {
2011 atomic_dec_32(&active_buf_list->num_entries);
2012 atomic_inc_32(&rx_ring->rx_buf_area->buf_upstream);
2013 atomic_inc_32(&rx_buffer->ref_cnt);
2014 rx_buffer->buf_state = RX_BUF_STATE_WITH_OS;
2015
2016 if (rx_buffer->mp == NULL) {
2017 rx_buffer->mp =
2018 desballoc(rx_buffer->dma_info.virt_addr,
2019 rx_ring->rx_buf_size, 0, &rx_buffer->recycle);
2020 }
2021 }
2022
2023 *num_entries = active_buf_list->num_entries;
2024 active_buf_list->head = head;
2025
2026 return (rx_buffer);
2027 }
2028
2029 qede_rx_buffer_t *
qede_get_from_passive_list(qede_rx_ring_t * rx_ring)2030 qede_get_from_passive_list(qede_rx_ring_t *rx_ring)
2031 {
2032 qede_rx_buf_list_t *passive_buf_list =
2033 &rx_ring->rx_buf_area->passive_buf_list;
2034 qede_rx_buffer_t *rx_buffer;
2035 u32 head;
2036
2037 mutex_enter(&passive_buf_list->lock);
2038 head = passive_buf_list->head;
2039 if (passive_buf_list->buf_list[head] == NULL) {
2040 mutex_exit(&passive_buf_list->lock);
2041 return (NULL);
2042 }
2043
2044 rx_buffer = passive_buf_list->buf_list[head];
2045 passive_buf_list->buf_list[head] = NULL;
2046
2047 passive_buf_list->head = (passive_buf_list->head + 1) & RX_RING_MASK;
2048 mutex_exit(&passive_buf_list->lock);
2049
2050 atomic_dec_32(&passive_buf_list->num_entries);
2051
2052 return (rx_buffer);
2053 }
2054
2055 void
qede_put_to_active_list(qede_rx_ring_t * rx_ring,qede_rx_buffer_t * rx_buffer)2056 qede_put_to_active_list(qede_rx_ring_t *rx_ring, qede_rx_buffer_t *rx_buffer)
2057 {
2058 qede_rx_buf_list_t *active_buf_list =
2059 &rx_ring->rx_buf_area->active_buf_list;
2060 u16 tail = active_buf_list->tail;
2061
2062 active_buf_list->buf_list[tail] = rx_buffer;
2063 tail = (tail + 1) & RX_RING_MASK;
2064
2065 active_buf_list->tail = tail;
2066 atomic_inc_32(&active_buf_list->num_entries);
2067 }
2068
2069 void
qede_replenish_rx_buffers(qede_rx_ring_t * rx_ring)2070 qede_replenish_rx_buffers(qede_rx_ring_t *rx_ring)
2071 {
2072 qede_rx_buffer_t *rx_buffer;
2073 int count = 0;
2074 struct eth_rx_bd *bd;
2075
2076 /*
2077 * Only replenish when we have at least
2078 * 1/4th of the ring to do. We don't want
2079 * to incur many lock contentions and
2080 * cycles for just a few buffers.
2081 * We don't bother with the passive area lock
2082 * here because we're just getting an
2083 * estimate. Also, we only pull from
2084 * the passive list in this function.
2085 */
2086
2087 /*
2088 * Use a replenish lock because we can do the
2089 * replenish operation at the end of
2090 * processing the rx_ring, but also when
2091 * we get buffers back from the upper
2092 * layers.
2093 */
2094 if (mutex_tryenter(&rx_ring->rx_replen_lock) == 0) {
2095 qede_info(rx_ring->qede, "!%s(%d): Failed to take"
2096 " replenish_lock",
2097 __func__, rx_ring->qede->instance);
2098 return;
2099 }
2100
2101 rx_buffer = qede_get_from_passive_list(rx_ring);
2102
2103 while (rx_buffer != NULL) {
2104 bd = ecore_chain_produce(&rx_ring->rx_bd_ring);
2105 if (bd == NULL) {
2106 qede_info(rx_ring->qede, "!%s(%d): bd = null",
2107 __func__, rx_ring->qede->instance);
2108 qede_put_to_passive_list(rx_ring, rx_buffer);
2109 break;
2110 }
2111
2112 bd->addr.lo = HOST_TO_LE_32(U64_LO(
2113 rx_buffer->dma_info.phys_addr));
2114 bd->addr.hi = HOST_TO_LE_32(
2115 U64_HI(rx_buffer->dma_info.phys_addr));
2116
2117 /*
2118 * Put the buffer in active list since it will be
2119 * posted to fw now
2120 */
2121 qede_put_to_active_list(rx_ring, rx_buffer);
2122 rx_buffer->buf_state = RX_BUF_STATE_WITH_FW;
2123 count++;
2124 rx_buffer = qede_get_from_passive_list(rx_ring);
2125 }
2126 mutex_exit(&rx_ring->rx_replen_lock);
2127 }
2128
2129 /*
2130 * Put the rx_buffer to the passive_buf_list
2131 */
2132 int
qede_put_to_passive_list(qede_rx_ring_t * rx_ring,qede_rx_buffer_t * rx_buffer)2133 qede_put_to_passive_list(qede_rx_ring_t *rx_ring, qede_rx_buffer_t *rx_buffer)
2134 {
2135 qede_rx_buf_list_t *passive_buf_list =
2136 &rx_ring->rx_buf_area->passive_buf_list;
2137 qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
2138 int tail = 0;
2139
2140 mutex_enter(&passive_buf_list->lock);
2141
2142 tail = passive_buf_list->tail;
2143 passive_buf_list->tail = (passive_buf_list->tail + 1) & RX_RING_MASK;
2144
2145 rx_buf_area->passive_buf_list.buf_list[tail] = rx_buffer;
2146 atomic_inc_32(&passive_buf_list->num_entries);
2147
2148 if (passive_buf_list->num_entries > rx_ring->rx_buf_count) {
2149 /* Sanity check */
2150 qede_info(rx_ring->qede, "ERROR: num_entries (%d)"
2151 " > max count (%d)",
2152 passive_buf_list->num_entries,
2153 rx_ring->rx_buf_count);
2154 }
2155 mutex_exit(&passive_buf_list->lock);
2156 return (passive_buf_list->num_entries);
2157 }
2158
2159 void
qede_recycle_rx_buffer(char * arg)2160 qede_recycle_rx_buffer(char *arg)
2161 {
2162 /* LINTED E_BAD_PTR_CAST_ALIGN */
2163 qede_rx_buffer_t *rx_buffer = (qede_rx_buffer_t *)arg;
2164 qede_rx_ring_t *rx_ring = rx_buffer->rx_ring;
2165 qede_rx_buf_area_t *rx_buf_area = rx_buffer->rx_buf_area;
2166 qede_t *qede = rx_ring->qede;
2167 u32 buf_upstream = 0, ref_cnt;
2168 u32 num_entries;
2169
2170 if (rx_buffer->ref_cnt == 0) {
2171 return;
2172 }
2173
2174 /*
2175 * Since the data buffer associated with the mblk is free'ed
2176 * by upper layer, allocate it again to contain proper
2177 * free_func pointer
2178 */
2179 rx_buffer->mp = desballoc(rx_buffer->dma_info.virt_addr,
2180 rx_ring->rx_buf_size, 0, &rx_buffer->recycle);
2181
2182 ref_cnt = atomic_dec_32_nv(&rx_buffer->ref_cnt);
2183 if (ref_cnt == 1) {
2184 /* Put the buffer into passive_buf_list to be reused */
2185 num_entries = qede_put_to_passive_list(rx_ring, rx_buffer);
2186 if(num_entries >= 32) {
2187 if(mutex_tryenter(&rx_ring->rx_lock) != 0) {
2188 qede_replenish_rx_buffers(rx_ring);
2189 qede_update_rx_q_producer(rx_ring);
2190 mutex_exit(&rx_ring->rx_lock);
2191 }
2192 }
2193 } else if (ref_cnt == 0) {
2194 /*
2195 * This is a buffer from a previous load instance of
2196 * rx_buf_area. Free the rx_buffer and if no more
2197 * buffers are upstream from this rx_buf_area instance
2198 * then free the rx_buf_area;
2199 */
2200 if (rx_buffer->mp != NULL) {
2201 freemsg(rx_buffer->mp);
2202 rx_buffer->mp = NULL;
2203 }
2204 mutex_enter(&qede->drv_lock);
2205
2206 buf_upstream = atomic_cas_32(&rx_buf_area->buf_upstream, 1, 1);
2207 if (buf_upstream >= 1) {
2208 atomic_dec_32(&rx_buf_area->buf_upstream);
2209 }
2210 if (rx_buf_area->inactive && (rx_buf_area->buf_upstream == 0)) {
2211 qede_free_rx_buffers_legacy(qede, rx_buf_area);
2212 }
2213
2214 mutex_exit(&qede->drv_lock);
2215 } else {
2216 /* Sanity check */
2217 qede_info(rx_ring->qede, "rx_buffer %p"
2218 " ref_cnt %d is invalid",
2219 rx_buffer, ref_cnt);
2220 }
2221 }
2222
2223 void
qede_recycle_copied_rx_buffer(qede_rx_buffer_t * rx_buffer)2224 qede_recycle_copied_rx_buffer(qede_rx_buffer_t *rx_buffer)
2225 {
2226 qede_rx_ring_t *rx_ring = rx_buffer->rx_ring;
2227 qede_rx_buf_area_t *rx_buf_area = rx_buffer->rx_buf_area;
2228 qede_t *qede = rx_ring->qede;
2229 u32 buf_upstream = 0, ref_cnt;
2230
2231 if (rx_buffer->ref_cnt == 0) {
2232 /*
2233 * Can happen if the buffer is being free'd
2234 * in the stop routine
2235 */
2236 qede_info(qede, "!%s(%d): rx_buffer->ref_cnt = 0",
2237 __func__, qede->instance);
2238 return;
2239 }
2240
2241 buf_upstream = atomic_cas_32(&rx_buf_area->buf_upstream, 1, 1);
2242 if (buf_upstream >= 1) {
2243 atomic_dec_32(&rx_buf_area->buf_upstream);
2244 }
2245
2246 /*
2247 * Since the data buffer associated with the mblk is free'ed
2248 * by upper layer, allocate it again to contain proper
2249 * free_func pointer
2250 * Though we could also be recycling a buffer that got copied,
2251 * so in that case the mp would still be intact.
2252 */
2253
2254 ref_cnt = atomic_dec_32_nv(&rx_buffer->ref_cnt);
2255 if (ref_cnt == 1) {
2256 qede_put_to_passive_list(rx_ring, rx_buffer);
2257 /* Put the buffer into passive_buf_list to be reused */
2258 } else if (ref_cnt == 0) {
2259 /*
2260 * This is a buffer from a previous load instance of
2261 * rx_buf_area. Free the rx_buffer and if no more
2262 * buffers are upstream from this rx_buf_area instance
2263 * then free the rx_buf_area;
2264 */
2265 qede_info(rx_ring->qede, "Free up rx_buffer %p, index %d"
2266 " ref_cnt %d from a previous driver iteration",
2267 rx_buffer, rx_buffer->index, ref_cnt);
2268 if (rx_buffer->mp != NULL) {
2269 freemsg(rx_buffer->mp);
2270 rx_buffer->mp = NULL;
2271 }
2272
2273 if (rx_buf_area->inactive && (rx_buf_area->buf_upstream == 0)) {
2274 mutex_enter(&qede->drv_lock);
2275 qede_free_rx_buffers_legacy(qede, rx_buf_area);
2276 mutex_exit(&qede->drv_lock);
2277 }
2278 } else {
2279 /* Sanity check */
2280 qede_info(rx_ring->qede, "rx_buffer %p"
2281 " ref_cnt %d is invalid",
2282 rx_buffer, ref_cnt);
2283 }
2284 }
2285
2286
2287 static int
qede_alloc_rx_buffers(qede_t * qede,qede_rx_ring_t * rx_ring)2288 qede_alloc_rx_buffers(qede_t *qede, qede_rx_ring_t *rx_ring)
2289 {
2290 int ret = DDI_SUCCESS, i, j;
2291 qede_rx_buffer_t *rx_buffer;
2292 qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
2293 u32 bufs_per_page, buf_size;
2294 int page_size = (int)ddi_ptob(qede->dip, 1);
2295 qede_dma_info_t *dma_info;
2296 ddi_dma_cookie_t temp_cookie;
2297 int allocated = 0;
2298 u64 dma_addr;
2299 u8 *vaddr;
2300 ddi_dma_handle_t dma_handle;
2301 ddi_acc_handle_t acc_handle;
2302
2303 if (rx_ring->rx_buf_size > page_size) {
2304 bufs_per_page = 1;
2305 buf_size = rx_ring->rx_buf_size;
2306 } else {
2307 bufs_per_page =
2308 (page_size) / DEFAULT_RX_BUF_SIZE;
2309 buf_size = page_size;
2310 }
2311
2312 rx_buffer = &rx_buf_area->rx_buf_pool[0];
2313 rx_buf_area->bufs_per_page = bufs_per_page;
2314
2315 mutex_init(&rx_buf_area->active_buf_list.lock, NULL,
2316 MUTEX_DRIVER, 0);
2317 mutex_init(&rx_buf_area->passive_buf_list.lock, NULL,
2318 MUTEX_DRIVER, 0);
2319
2320 for (i = 0; i < rx_ring->rx_buf_count; i += bufs_per_page) {
2321 dma_info = &rx_buffer->dma_info;
2322
2323 ret = qede_dma_mem_alloc(qede,
2324 buf_size,
2325 DDI_DMA_READ | DDI_DMA_STREAMING | DDI_DMA_CONSISTENT,
2326 (caddr_t *)&dma_info->virt_addr,
2327 &temp_cookie,
2328 &dma_info->dma_handle,
2329 &dma_info->acc_handle,
2330 &qede_dma_attr_rxbuf,
2331 &qede_buf_acc_attr);
2332 if (ret != DDI_SUCCESS) {
2333 goto err;
2334 }
2335
2336 allocated++;
2337 vaddr = dma_info->virt_addr;
2338 dma_addr = temp_cookie.dmac_laddress;
2339 dma_handle = dma_info->dma_handle;
2340 acc_handle = dma_info->acc_handle;
2341
2342 for (j = 0; j < bufs_per_page; j++) {
2343 dma_info = &rx_buffer->dma_info;
2344 dma_info->virt_addr = vaddr;
2345 dma_info->phys_addr = dma_addr;
2346 dma_info->dma_handle = dma_handle;
2347 dma_info->acc_handle = acc_handle;
2348 dma_info->offset = j * rx_ring->rx_buf_size;
2349 /* Populate the recycle func and arg for the buffer */
2350 rx_buffer->recycle.free_func = qede_recycle_rx_buffer;
2351 rx_buffer->recycle.free_arg = (caddr_t)rx_buffer;
2352
2353 rx_buffer->mp = desballoc(dma_info->virt_addr,
2354 rx_ring->rx_buf_size, 0,
2355 &rx_buffer->recycle);
2356 if (rx_buffer->mp == NULL) {
2357 qede_warn(qede, "desballoc() failed, index %d",
2358 i);
2359 }
2360 rx_buffer->rx_ring = rx_ring;
2361 rx_buffer->rx_buf_area = rx_buf_area;
2362 rx_buffer->index = i + j;
2363 rx_buffer->ref_cnt = 1;
2364 rx_buffer++;
2365
2366 vaddr += rx_ring->rx_buf_size;
2367 dma_addr += rx_ring->rx_buf_size;
2368 }
2369 rx_ring->sw_rx_prod++;
2370 }
2371
2372 /*
2373 * Fill the rx_bd_ring with the allocated
2374 * buffers
2375 */
2376 ret = qede_init_bd(qede, rx_ring);
2377 if (ret != DDI_SUCCESS) {
2378 goto err;
2379 }
2380
2381 rx_buf_area->buf_upstream = 0;
2382
2383 return (ret);
2384 err:
2385 qede_free_rx_buffers(qede, rx_ring);
2386 return (ret);
2387 }
2388
2389 static int
qede_alloc_rx_ring_phys(qede_t * qede,qede_fastpath_t * fp)2390 qede_alloc_rx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
2391 {
2392 qede_rx_ring_t *rx_ring;
2393 qede_rx_buf_area_t *rx_buf_area;
2394 size_t size;
2395
2396 ASSERT(qede != NULL);
2397 ASSERT(fp != NULL);
2398
2399 rx_ring = fp->rx_ring;
2400
2401 atomic_inc_32(&qede->detach_unsafe);
2402 /*
2403 * Allocate rx_buf_area for the plumb instance
2404 */
2405 rx_buf_area = kmem_zalloc(sizeof (*rx_buf_area), KM_SLEEP);
2406 if (rx_buf_area == NULL) {
2407 qede_info(qede, "!%s(%d): Cannot alloc rx_buf_area",
2408 __func__, qede->instance);
2409 return (DDI_FAILURE);
2410 }
2411
2412 rx_buf_area->inactive = 0;
2413 rx_buf_area->rx_ring = rx_ring;
2414 rx_ring->rx_buf_area = rx_buf_area;
2415 /* Rx Buffer descriptor queue */
2416 if (ecore_chain_alloc(&qede->edev,
2417 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2418 ECORE_CHAIN_MODE_NEXT_PTR,
2419 ECORE_CHAIN_CNT_TYPE_U16,
2420 qede->rx_ring_size,
2421 sizeof (struct eth_rx_bd),
2422 &rx_ring->rx_bd_ring,
2423 NULL) != ECORE_SUCCESS) {
2424 cmn_err(CE_WARN, "Failed to allocate "
2425 "ecore cqe chain");
2426 return (DDI_FAILURE);
2427 }
2428
2429 /* Rx Completion Descriptor queue */
2430 if (ecore_chain_alloc(&qede->edev,
2431 ECORE_CHAIN_USE_TO_CONSUME,
2432 ECORE_CHAIN_MODE_PBL,
2433 ECORE_CHAIN_CNT_TYPE_U16,
2434 qede->rx_ring_size,
2435 sizeof (union eth_rx_cqe),
2436 &rx_ring->rx_cqe_ring,
2437 NULL) != ECORE_SUCCESS) {
2438 cmn_err(CE_WARN, "Failed to allocate "
2439 "ecore bd chain");
2440 return (DDI_FAILURE);
2441 }
2442
2443 /* Rx Data buffers */
2444 if (qede_alloc_rx_buffers(qede, rx_ring) != DDI_SUCCESS) {
2445 qede_print_err("!%s(%d): Failed to alloc rx buffers",
2446 __func__, qede->instance);
2447 return (DDI_FAILURE);
2448 }
2449 return (DDI_SUCCESS);
2450 }
2451
2452 static void
qede_free_tx_bd_ring(qede_t * qede,qede_fastpath_t * fp)2453 qede_free_tx_bd_ring(qede_t *qede, qede_fastpath_t *fp)
2454 {
2455 int i;
2456 qede_tx_ring_t *tx_ring;
2457
2458 ASSERT(qede != NULL);
2459 ASSERT(fp != NULL);
2460
2461 for (i = 0; i < qede->num_tc; i++) {
2462 tx_ring = fp->tx_ring[i];
2463
2464 if (tx_ring->tx_bd_ring.p_virt_addr) {
2465 ecore_chain_free(&qede->edev, &tx_ring->tx_bd_ring);
2466 tx_ring->tx_bd_ring.p_virt_addr = NULL;
2467 }
2468 tx_ring->hw_cons_ptr = NULL;
2469 tx_ring->sw_tx_cons = 0;
2470 tx_ring->sw_tx_prod = 0;
2471
2472 }
2473 }
2474
2475 static u32
qede_alloc_tx_bd_ring(qede_t * qede,qede_tx_ring_t * tx_ring)2476 qede_alloc_tx_bd_ring(qede_t *qede, qede_tx_ring_t *tx_ring)
2477 {
2478 u32 ret = 0;
2479
2480 ret = ecore_chain_alloc(&qede->edev,
2481 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2482 ECORE_CHAIN_MODE_PBL,
2483 ECORE_CHAIN_CNT_TYPE_U16,
2484 tx_ring->bd_ring_size,
2485 sizeof (union eth_tx_bd_types),
2486 &tx_ring->tx_bd_ring,
2487 NULL);
2488 if (ret) {
2489 cmn_err(CE_WARN, "!%s(%d): Failed to alloc tx bd chain",
2490 __func__, qede->instance);
2491 goto error;
2492 }
2493
2494
2495 error:
2496 return (ret);
2497 }
2498
2499 static void
qede_free_tx_bcopy_buffers(qede_tx_ring_t * tx_ring)2500 qede_free_tx_bcopy_buffers(qede_tx_ring_t *tx_ring)
2501 {
2502 qede_tx_bcopy_pkt_t *bcopy_pkt;
2503 int i;
2504
2505 for (i = 0; i < tx_ring->tx_ring_size; i++) {
2506 bcopy_pkt = &tx_ring->bcopy_list.bcopy_pool[i];
2507 if(bcopy_pkt->dma_handle != NULL)
2508 (void) ddi_dma_unbind_handle(bcopy_pkt->dma_handle);
2509 if(bcopy_pkt->acc_handle != NULL) {
2510 ddi_dma_mem_free(&bcopy_pkt->acc_handle);
2511 bcopy_pkt->acc_handle = NULL;
2512 }
2513 if(bcopy_pkt->dma_handle != NULL) {
2514 ddi_dma_free_handle(&bcopy_pkt->dma_handle);
2515 bcopy_pkt->dma_handle = NULL;
2516 }
2517 if (bcopy_pkt) {
2518 if (bcopy_pkt->mp) {
2519 freemsg(bcopy_pkt->mp);
2520 }
2521 }
2522 }
2523
2524 if (tx_ring->bcopy_list.bcopy_pool != NULL) {
2525 kmem_free(tx_ring->bcopy_list.bcopy_pool,
2526 tx_ring->bcopy_list.size);
2527 tx_ring->bcopy_list.bcopy_pool = NULL;
2528 }
2529
2530 mutex_destroy(&tx_ring->bcopy_list.lock);
2531 }
2532
2533 static u32
qede_alloc_tx_bcopy_buffers(qede_t * qede,qede_tx_ring_t * tx_ring)2534 qede_alloc_tx_bcopy_buffers(qede_t *qede, qede_tx_ring_t *tx_ring)
2535 {
2536 u32 ret = DDI_SUCCESS;
2537 int page_size = (int)ddi_ptob(qede->dip, 1);
2538 size_t size;
2539 qede_tx_bcopy_pkt_t *bcopy_pkt, *bcopy_list;
2540 int i;
2541 qede_dma_info_t dma_info;
2542 ddi_dma_cookie_t temp_cookie;
2543
2544 /*
2545 * If the tx_buffers size if less than the page size
2546 * then try to use multiple copy buffers inside the
2547 * same page. Otherwise use the whole page (or more)
2548 * for the copy buffers
2549 */
2550 if (qede->tx_buf_size > page_size) {
2551 size = qede->tx_buf_size;
2552 } else {
2553 size = page_size;
2554 }
2555
2556 size = sizeof (qede_tx_bcopy_pkt_t) * qede->tx_ring_size;
2557 bcopy_list = kmem_zalloc(size, KM_SLEEP);
2558 if (bcopy_list == NULL) {
2559 qede_warn(qede, "!%s(%d): Failed to allocate bcopy_list",
2560 __func__, qede->instance);
2561 ret = DDI_FAILURE;
2562 goto exit;
2563 }
2564
2565 tx_ring->bcopy_list.size = size;
2566 tx_ring->bcopy_list.bcopy_pool = bcopy_list;
2567 bcopy_pkt = bcopy_list;
2568
2569 tx_ring->bcopy_list.head = 0;
2570 tx_ring->bcopy_list.tail = 0;
2571 mutex_init(&tx_ring->bcopy_list.lock, NULL, MUTEX_DRIVER, 0);
2572
2573 for (i = 0; i < qede->tx_ring_size; i++) {
2574
2575 ret = qede_dma_mem_alloc(qede,
2576 qede->tx_buf_size,
2577 DDI_DMA_READ | DDI_DMA_STREAMING | DDI_DMA_CONSISTENT,
2578 (caddr_t *)&dma_info.virt_addr,
2579 &temp_cookie,
2580 &dma_info.dma_handle,
2581 &dma_info.acc_handle,
2582 &qede_dma_attr_txbuf,
2583 &qede_buf_acc_attr);
2584 if(ret) {
2585 ret = DDI_FAILURE;
2586 goto exit;
2587 }
2588
2589
2590 bcopy_pkt->virt_addr = dma_info.virt_addr;
2591 bcopy_pkt->phys_addr = temp_cookie.dmac_laddress;
2592 bcopy_pkt->dma_handle = dma_info.dma_handle;
2593 bcopy_pkt->acc_handle = dma_info.acc_handle;
2594
2595 tx_ring->bcopy_list.free_list[i] = bcopy_pkt;
2596 bcopy_pkt++;
2597 }
2598
2599 exit:
2600 return (ret);
2601 }
2602
2603 static void
qede_free_tx_dma_handles(qede_t * qede,qede_tx_ring_t * tx_ring)2604 qede_free_tx_dma_handles(qede_t *qede, qede_tx_ring_t *tx_ring)
2605 {
2606 qede_dma_handle_entry_t *dmah_entry;
2607 int i;
2608
2609 for (i = 0; i < tx_ring->tx_ring_size; i++) {
2610 dmah_entry = &tx_ring->dmah_list.dmah_pool[i];
2611 if (dmah_entry) {
2612 if (dmah_entry->dma_handle != NULL) {
2613 ddi_dma_free_handle(&dmah_entry->dma_handle);
2614 dmah_entry->dma_handle = NULL;
2615 } else {
2616 qede_info(qede, "dmah_entry %p, handle is NULL",
2617 dmah_entry);
2618 }
2619 }
2620 }
2621
2622 if (tx_ring->dmah_list.dmah_pool != NULL) {
2623 kmem_free(tx_ring->dmah_list.dmah_pool,
2624 tx_ring->dmah_list.size);
2625 tx_ring->dmah_list.dmah_pool = NULL;
2626 }
2627
2628 mutex_destroy(&tx_ring->dmah_list.lock);
2629 }
2630
2631 static u32
qede_alloc_tx_dma_handles(qede_t * qede,qede_tx_ring_t * tx_ring)2632 qede_alloc_tx_dma_handles(qede_t *qede, qede_tx_ring_t *tx_ring)
2633 {
2634 int i;
2635 size_t size;
2636 u32 ret = DDI_SUCCESS;
2637 qede_dma_handle_entry_t *dmah_entry, *dmah_list;
2638
2639 size = sizeof (qede_dma_handle_entry_t) * qede->tx_ring_size;
2640 dmah_list = kmem_zalloc(size, KM_SLEEP);
2641 if (dmah_list == NULL) {
2642 qede_warn(qede, "!%s(%d): Failed to allocated dmah_list",
2643 __func__, qede->instance);
2644 /* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2645 ret = DDI_FAILURE;
2646 goto exit;
2647 }
2648
2649 tx_ring->dmah_list.size = size;
2650 tx_ring->dmah_list.dmah_pool = dmah_list;
2651 dmah_entry = dmah_list;
2652
2653 tx_ring->dmah_list.head = 0;
2654 tx_ring->dmah_list.tail = 0;
2655 mutex_init(&tx_ring->dmah_list.lock, NULL, MUTEX_DRIVER, 0);
2656
2657 /*
2658 *
2659 */
2660 for (i = 0; i < qede->tx_ring_size; i++) {
2661 ret = ddi_dma_alloc_handle(qede->dip,
2662 &qede_tx_buf_dma_attr,
2663 DDI_DMA_DONTWAIT,
2664 NULL,
2665 &dmah_entry->dma_handle);
2666 if (ret != DDI_SUCCESS) {
2667 qede_print_err("!%s(%d): dma alloc handle failed "
2668 "for index %d",
2669 __func__, qede->instance, i);
2670 /* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2671 ret = DDI_FAILURE;
2672 goto exit;
2673 }
2674
2675 tx_ring->dmah_list.free_list[i] = dmah_entry;
2676 dmah_entry++;
2677 }
2678 exit:
2679 return (ret);
2680 }
2681
2682 static u32
qede_alloc_tx_ring_phys(qede_t * qede,qede_fastpath_t * fp)2683 qede_alloc_tx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
2684 {
2685 int i;
2686 qede_tx_ring_t *tx_ring;
2687 u32 ret = DDI_SUCCESS;
2688 size_t size;
2689 qede_tx_recycle_list_t *recycle_list;
2690
2691 ASSERT(qede != NULL);
2692 ASSERT(fp != NULL);
2693
2694 for (i = 0; i < qede->num_tc; i++) {
2695 tx_ring = fp->tx_ring[i];
2696 tx_ring->bd_ring_size = qede->tx_ring_size;
2697
2698 /*
2699 * Allocate the buffer descriptor chain
2700 */
2701 ret = qede_alloc_tx_bd_ring(qede, tx_ring);
2702 if (ret) {
2703 cmn_err(CE_WARN, "!%s(%d): failed, %s",
2704 __func__, qede->instance, qede_get_ddi_fail(ret));
2705 return (ret);
2706 }
2707
2708 /*
2709 * Allocate copy mode buffers
2710 */
2711 ret = qede_alloc_tx_bcopy_buffers(qede, tx_ring);
2712 if (ret) {
2713 qede_print_err("!%s(%d): Failed to alloc tx copy "
2714 "buffers", __func__, qede->instance);
2715 /* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2716 ret = DDI_FAILURE;
2717 goto exit;
2718 }
2719
2720 /*
2721 * Allocate dma handles for mapped mode
2722 */
2723 ret = qede_alloc_tx_dma_handles(qede, tx_ring);
2724 if (ret) {
2725 qede_print_err("!%s(%d): Failed to alloc tx dma "
2726 "handles", __func__, qede->instance);
2727 /* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2728 ret = DDI_FAILURE;
2729 goto exit;
2730 }
2731
2732 /* Allocate tx_recycle list */
2733 size = sizeof (qede_tx_recycle_list_t) * qede->tx_ring_size;
2734 recycle_list = kmem_zalloc(size, KM_SLEEP);
2735 if (recycle_list == NULL) {
2736 qede_warn(qede, "!%s(%d): Failed to allocate"
2737 " tx_recycle_list", __func__, qede->instance);
2738 /* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2739 ret = DDI_FAILURE;
2740 goto exit;
2741 }
2742
2743 tx_ring->tx_recycle_list = recycle_list;
2744 }
2745 exit:
2746 return (ret);
2747 }
2748
2749 static void
2750 /* LINTED E_FUNC_ARG_UNUSED */
qede_free_sb_phys(qede_t * qede,qede_fastpath_t * fp)2751 qede_free_sb_phys(qede_t *qede, qede_fastpath_t *fp)
2752 {
2753 qede_pci_free_consistent(&fp->sb_dma_handle, &fp->sb_acc_handle);
2754 fp->sb_virt = NULL;
2755 fp->sb_phys = 0;
2756 }
2757
2758 static int
qede_alloc_sb_phys(qede_t * qede,qede_fastpath_t * fp)2759 qede_alloc_sb_phys(qede_t *qede, qede_fastpath_t *fp)
2760 {
2761 int status;
2762 int sb_id;
2763 struct ecore_dev *edev = &qede->edev;
2764 struct ecore_hwfn *p_hwfn;
2765 qede_vector_info_t *vect_info = fp->vect_info;
2766 ddi_dma_cookie_t sb_cookie;
2767
2768 ASSERT(qede != NULL);
2769 ASSERT(fp != NULL);
2770
2771 /*
2772 * In the case of multiple hardware engines,
2773 * interrupts are spread across all of them.
2774 * In the case of only one engine, all
2775 * interrupts are handled by that engine.
2776 * In the case of 2 engines, each has half
2777 * of the interrupts.
2778 */
2779 sb_id = vect_info->vect_index;
2780 p_hwfn = &edev->hwfns[sb_id % qede->num_hwfns];
2781
2782 /* Allocate dma mem. for status_block */
2783 status = qede_dma_mem_alloc(qede,
2784 sizeof (struct status_block),
2785 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
2786 (caddr_t *)&fp->sb_virt,
2787 &sb_cookie,
2788 &fp->sb_dma_handle,
2789 &fp->sb_acc_handle,
2790 &qede_desc_dma_attr,
2791 &qede_desc_acc_attr);
2792
2793 if (status != DDI_SUCCESS) {
2794 qede_info(qede, "Failed to allocate status_block dma mem");
2795 return (status);
2796 }
2797
2798 fp->sb_phys = sb_cookie.dmac_laddress;
2799
2800
2801 status = ecore_int_sb_init(p_hwfn,
2802 p_hwfn->p_main_ptt,
2803 fp->sb_info,
2804 (void *)fp->sb_virt,
2805 fp->sb_phys,
2806 fp->fp_index);
2807 if (status != ECORE_SUCCESS) {
2808 cmn_err(CE_WARN, "Failed ecore_int_sb_init");
2809 return (DDI_FAILURE);
2810 }
2811
2812 return (status);
2813 }
2814
2815 static void
qede_free_tx_ring_phys(qede_t * qede,qede_fastpath_t * fp)2816 qede_free_tx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
2817 {
2818 qede_tx_ring_t *tx_ring;
2819 int i;
2820
2821 for (i = 0; i < qede->num_tc; i++) {
2822 tx_ring = fp->tx_ring[i];
2823 qede_free_tx_dma_handles(qede, tx_ring);
2824 qede_free_tx_bcopy_buffers(tx_ring);
2825 qede_free_tx_bd_ring(qede, fp);
2826
2827 if (tx_ring->tx_recycle_list) {
2828 kmem_free(tx_ring->tx_recycle_list,
2829 sizeof (qede_tx_recycle_list_t)
2830 * qede->tx_ring_size);
2831 }
2832 }
2833 }
2834
2835 static void
qede_fastpath_free_phys_mem(qede_t * qede)2836 qede_fastpath_free_phys_mem(qede_t *qede)
2837 {
2838 int i;
2839 qede_fastpath_t *fp;
2840
2841 for (i = 0; i < qede->num_fp; i++) {
2842 fp = &qede->fp_array[i];
2843
2844 qede_free_rx_ring_phys(qede, fp);
2845 qede_free_tx_ring_phys(qede, fp);
2846 qede_free_sb_phys(qede, fp);
2847 }
2848 }
2849
2850 /*
2851 * Save dma_handles associated with the fastpath elements
2852 * allocate by ecore for doing dma_sync in the fast_path
2853 */
2854 static int
qede_save_fp_dma_handles(qede_t * qede,qede_fastpath_t * fp)2855 qede_save_fp_dma_handles(qede_t *qede, qede_fastpath_t *fp)
2856 {
2857 int ret, i;
2858 qede_rx_ring_t *rx_ring;
2859 qede_tx_ring_t *tx_ring;
2860
2861 rx_ring = fp->rx_ring;
2862
2863 /* Rx bd ring dma_handle */
2864 ret = qede_osal_find_dma_handle_for_block(qede,
2865 (void *)rx_ring->rx_bd_ring.p_phys_addr,
2866 &rx_ring->rx_bd_dmah);
2867 if (ret != DDI_SUCCESS) {
2868 qede_print_err("!%s(%d): Cannot find dma_handle for "
2869 "rx_bd_ring, addr %p", __func__, qede->instance,
2870 rx_ring->rx_bd_ring.p_phys_addr);
2871 goto exit;
2872 }
2873
2874 /* rx cqe ring dma_handle */
2875 ret = qede_osal_find_dma_handle_for_block(qede,
2876 (void *)rx_ring->rx_cqe_ring.p_phys_addr,
2877 &rx_ring->rx_cqe_dmah);
2878 if (ret != DDI_SUCCESS) {
2879 qede_print_err("!%s(%d): Cannot find dma_handle for "
2880 "rx_cqe_ring, addr %p", __func__, qede->instance,
2881 rx_ring->rx_cqe_ring.p_phys_addr);
2882 goto exit;
2883 }
2884 /* rx cqe ring pbl */
2885 ret = qede_osal_find_dma_handle_for_block(qede,
2886 (void *)rx_ring->rx_cqe_ring.pbl_sp.p_phys_table,
2887 &rx_ring->rx_cqe_pbl_dmah);
2888 if (ret) {
2889 qede_print_err("!%s(%d): Cannot find dma_handle for "
2890 "rx_cqe pbl, addr %p", __func__, qede->instance,
2891 rx_ring->rx_cqe_ring.pbl_sp.p_phys_table);
2892 goto exit;
2893 }
2894
2895 /* tx_bd ring dma_handle(s) */
2896 for (i = 0; i < qede->num_tc; i++) {
2897 tx_ring = fp->tx_ring[i];
2898
2899 ret = qede_osal_find_dma_handle_for_block(qede,
2900 (void *)tx_ring->tx_bd_ring.p_phys_addr,
2901 &tx_ring->tx_bd_dmah);
2902 if (ret != DDI_SUCCESS) {
2903 qede_print_err("!%s(%d): Cannot find dma_handle "
2904 "for tx_bd_ring, addr %p", __func__,
2905 qede->instance,
2906 tx_ring->tx_bd_ring.p_phys_addr);
2907 goto exit;
2908 }
2909
2910 ret = qede_osal_find_dma_handle_for_block(qede,
2911 (void *)tx_ring->tx_bd_ring.pbl_sp.p_phys_table,
2912 &tx_ring->tx_pbl_dmah);
2913 if (ret) {
2914 qede_print_err("!%s(%d): Cannot find dma_handle for "
2915 "tx_bd pbl, addr %p", __func__, qede->instance,
2916 tx_ring->tx_bd_ring.pbl_sp.p_phys_table);
2917 goto exit;
2918 }
2919 }
2920
2921 exit:
2922 return (ret);
2923 }
2924
2925 int
qede_fastpath_alloc_phys_mem(qede_t * qede)2926 qede_fastpath_alloc_phys_mem(qede_t *qede)
2927 {
2928 int status = 0, i;
2929 qede_fastpath_t *fp;
2930
2931 for (i = 0; i < qede->num_fp; i++) {
2932 fp = &qede->fp_array[i];
2933
2934 status = qede_alloc_sb_phys(qede, fp);
2935 if (status != DDI_SUCCESS) {
2936 goto err;
2937 }
2938
2939 status = qede_alloc_rx_ring_phys(qede, fp);
2940 if (status != DDI_SUCCESS) {
2941 goto err;
2942 }
2943
2944 status = qede_alloc_tx_ring_phys(qede, fp);
2945 if (status != DDI_SUCCESS) {
2946 goto err;
2947 }
2948 status = qede_save_fp_dma_handles(qede, fp);
2949 if (status != DDI_SUCCESS) {
2950 goto err;
2951 }
2952 }
2953 return (status);
2954 err:
2955 qede_fastpath_free_phys_mem(qede);
2956 return (status);
2957 }
2958
2959 static int
qede_fastpath_config(qede_t * qede)2960 qede_fastpath_config(qede_t *qede)
2961 {
2962 int i, j;
2963 qede_fastpath_t *fp;
2964 qede_rx_ring_t *rx_ring;
2965 qede_tx_ring_t *tx_ring;
2966 qede_vector_info_t *vect_info;
2967 int num_fp, num_hwfns;
2968
2969 ASSERT(qede != NULL);
2970
2971 num_fp = qede->num_fp;
2972 num_hwfns = qede->num_hwfns;
2973
2974 vect_info = &qede->intr_ctx.intr_vect_info[num_hwfns];
2975 fp = &qede->fp_array[0];
2976 tx_ring = &qede->tx_array[0][0];
2977
2978 for (i = 0; i < num_fp; i++, fp++, vect_info++) {
2979 fp->sb_info = &qede->sb_array[i];
2980 fp->qede = qede;
2981 fp->fp_index = i;
2982 /*
2983 * With a single hwfn, all fp's hwfn index should be zero
2984 * for all fp entries. If there are two engines this
2985 * index should altenate between 0 and 1.
2986 */
2987 fp->fp_hw_eng_index = fp->fp_index % num_hwfns;
2988 fp->vport_id = 0;
2989 fp->stats_id = 0;
2990 fp->rss_id = fp->fp_index;
2991 fp->rx_queue_index = fp->fp_index;
2992 fp->vect_info = vect_info;
2993 /*
2994 * After vport update, interrupts will be
2995 * running, so we need to intialize our
2996 * enable/disable gate as such.
2997 */
2998 fp->disabled_by_poll = 0;
2999
3000 /* rx_ring setup */
3001 rx_ring = &qede->rx_array[i];
3002 fp->rx_ring = rx_ring;
3003 rx_ring->fp = fp;
3004 rx_ring->rx_buf_count = qede->rx_buf_count;
3005 rx_ring->rx_buf_size = qede->rx_buf_size;
3006 rx_ring->qede = qede;
3007 rx_ring->sw_rx_cons = 0;
3008 rx_ring->rx_copy_threshold = qede->rx_copy_threshold;
3009 rx_ring->rx_low_buffer_threshold =
3010 qede->rx_low_buffer_threshold;
3011 rx_ring->queue_started = B_FALSE;
3012
3013 /* tx_ring setup */
3014 for (j = 0; j < qede->num_tc; j++) {
3015 tx_ring = &qede->tx_array[j][i];
3016 fp->tx_ring[j] = tx_ring;
3017 tx_ring->qede = qede;
3018 tx_ring->fp = fp;
3019 tx_ring->fp_idx = i;
3020 tx_ring->tx_queue_index = i * qede->num_fp +
3021 fp->fp_index;
3022 tx_ring->tx_buf_size = qede->tx_buf_size;
3023 tx_ring->tx_ring_size = qede->tx_ring_size;
3024 tx_ring->queue_started = B_FALSE;
3025 #ifdef DBLK_DMA_PREMAP
3026 tx_ring->pm_handle = qede->pm_handle;
3027 #endif
3028
3029 tx_ring->doorbell_addr =
3030 qede->doorbell;
3031 tx_ring->doorbell_handle =
3032 qede->doorbell_handle;
3033 }
3034 }
3035
3036 return (DDI_SUCCESS);
3037 }
3038
3039 /*
3040 * op = 1, Initialize link
3041 * op = 0, Destroy link
3042 */
3043 int
qede_configure_link(qede_t * qede,bool op)3044 qede_configure_link(qede_t *qede, bool op)
3045 {
3046 struct ecore_dev *edev = &qede->edev;
3047 struct ecore_hwfn *hwfn;
3048 struct ecore_ptt *ptt = NULL;
3049 int i, ret = DDI_SUCCESS;
3050
3051 for_each_hwfn(edev, i) {
3052 hwfn = &edev->hwfns[i];
3053 qede_info(qede, "Configuring link for hwfn#%d", i);
3054
3055 ptt = ecore_ptt_acquire(hwfn);
3056 if (ptt == NULL) {
3057 qede_info(qede, "Cannot reserver ptt from ecore");
3058 ret = DDI_FAILURE;
3059 goto exit;
3060 }
3061
3062 ret = ecore_mcp_set_link(hwfn, ptt, op);
3063
3064 ecore_ptt_release(hwfn, ptt);
3065 if (ret) {
3066 /* if link config fails, make sure ptt is released */
3067 goto exit;
3068 }
3069 }
3070 exit:
3071 return (ret);
3072 }
3073
3074 /*
3075 * drv_lock must be held by the caller.
3076 */
3077 int
qede_stop(qede_t * qede)3078 qede_stop(qede_t *qede)
3079 {
3080 int status;
3081
3082 ASSERT(mutex_owned(&qede->drv_lock));
3083 qede->qede_state = QEDE_STATE_STOPPING;
3084
3085 mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
3086
3087 qede_disable_all_fastpath_intrs(qede);
3088 status = qede_configure_link(qede, 0 /* Re-Set */);
3089 if (status) {
3090 /* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3091 cmn_err(CE_NOTE, "!%s(%d): Failed to reset link",
3092 __func__, qede->instance);
3093 return (status);
3094 }
3095 qede_clear_filters(qede);
3096 status = qede_fastpath_stop_queues(qede);
3097 if (status != DDI_SUCCESS) {
3098 /* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3099 cmn_err(CE_WARN, "qede_stop:"
3100 " qede_fastpath_stop_queues FAILED "
3101 " qede=%p\n",
3102 qede);
3103 return (status);
3104 }
3105
3106 qede_fastpath_free_phys_mem(qede);
3107
3108 qede->qede_state = QEDE_STATE_STOPPED;
3109 /* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3110 cmn_err(CE_WARN, "qede_stop SUCCESS =%p\n", qede);
3111 return (DDI_SUCCESS);
3112 }
3113
3114 /*
3115 * drv_lock must be held by the caller.
3116 */
3117 int
qede_start(qede_t * qede)3118 qede_start(qede_t *qede)
3119 {
3120 int status;
3121
3122 ASSERT(mutex_owned(&qede->drv_lock));
3123
3124 qede->qede_state = QEDE_STATE_STARTING;
3125
3126 mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
3127
3128 /*
3129 * Configure the fastpath blocks with
3130 * the sb_info, rx_ring and tx_rings
3131 */
3132 if (qede_fastpath_config(qede) != DDI_SUCCESS) {
3133 /* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3134 qede_print_err("!%s(%d): qede_fastpath_config failed",
3135 __func__, qede->instance);
3136 return (DDI_FAILURE);
3137 }
3138
3139
3140 /*
3141 * Allocate the physical memory
3142 * for fastpath.
3143 */
3144 status = qede_fastpath_alloc_phys_mem(qede);
3145 if (status) {
3146 cmn_err(CE_NOTE, "fastpath_alloc_phys_mem "
3147 " failed qede=%p\n", qede);
3148 return (DDI_FAILURE);
3149 }
3150
3151 status = qede_fastpath_start_queues(qede);
3152 if (status) {
3153 cmn_err(CE_NOTE, "fp_start_queues "
3154 " failed qede=%p\n", qede);
3155 goto err_out1;
3156 }
3157
3158 cmn_err(CE_NOTE, "qede_start fp_start_queues qede=%p\n", qede);
3159
3160 status = qede_configure_link(qede, 1 /* Set */);
3161 if (status) {
3162 cmn_err(CE_NOTE, "!%s(%d): Failed to configure link",
3163 __func__, qede->instance);
3164 goto err_out1;
3165 }
3166
3167 /*
3168 * Put interface in regular mode
3169 */
3170 if (qede_set_filter_rx_mode(qede,
3171 QEDE_FILTER_RX_MODE_REGULAR) != DDI_SUCCESS) {
3172 cmn_err(CE_NOTE, "!%s(%d): Failed to set filter mode",
3173 __func__, qede->instance);
3174 goto err_out1;
3175 }
3176
3177 status = qede_enable_all_fastpath_intrs(qede);
3178 if (status) {
3179 /* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3180 cmn_err(CE_NOTE, "!%s(%d): Failed to enable intrs",
3181 __func__, qede->instance);
3182 goto err_out2;
3183 }
3184 qede->qede_state = QEDE_STATE_STARTED;
3185 cmn_err(CE_NOTE, "!%s(%d): SUCCESS",
3186 __func__, qede->instance);
3187
3188 return (status);
3189
3190 err_out2:
3191 (void) qede_fastpath_stop_queues(qede);
3192 err_out1:
3193 qede_fastpath_free_phys_mem(qede);
3194 return (DDI_FAILURE);
3195 }
3196
3197 static void
qede_free_attach_resources(qede_t * qede)3198 qede_free_attach_resources(qede_t *qede)
3199 {
3200 struct ecore_dev *edev;
3201 int status;
3202
3203 edev = &qede->edev;
3204
3205 if (qede->attach_resources & QEDE_ECORE_HW_INIT) {
3206 if (ecore_hw_stop(edev) != ECORE_SUCCESS) {
3207 cmn_err(CE_NOTE, "%s(%d): ecore_hw_stop: failed\n",
3208 __func__, qede->instance);
3209 }
3210 qede->attach_resources &= ~QEDE_ECORE_HW_INIT;
3211 }
3212
3213 if (qede->attach_resources & QEDE_SP_INTR_ENBL) {
3214 status = qede_disable_slowpath_intrs(qede);
3215 if (status != DDI_SUCCESS) {
3216 qede_print("%s(%d): qede_disable_slowpath_intrs Failed",
3217 __func__, qede->instance);
3218 }
3219 qede->attach_resources &= ~QEDE_SP_INTR_ENBL;
3220 }
3221 if (qede->attach_resources & QEDE_KSTAT_INIT) {
3222 qede_kstat_fini(qede);
3223 qede->attach_resources &= ~QEDE_KSTAT_INIT;
3224 }
3225
3226
3227 if (qede->attach_resources & QEDE_GLD_INIT) {
3228 status = mac_unregister(qede->mac_handle);
3229 if (status != 0) {
3230 qede_print("%s(%d): mac_unregister Failed",
3231 __func__, qede->instance);
3232 }
3233 qede->attach_resources &= ~QEDE_GLD_INIT;
3234 }
3235
3236 if (qede->attach_resources & QEDE_EDEV_CONFIG) {
3237 ecore_resc_free(edev);
3238 qede->attach_resources &= ~QEDE_EDEV_CONFIG;
3239 }
3240
3241 if (qede->attach_resources & QEDE_INTR_CONFIG) {
3242 qede_unconfig_intrs(qede);
3243 qede->attach_resources &= ~QEDE_INTR_CONFIG;
3244 }
3245
3246 if (qede->attach_resources & QEDE_INTR_ALLOC) {
3247 qede_free_intrs(qede);
3248 qede->attach_resources &= ~QEDE_INTR_ALLOC;
3249 }
3250
3251 if (qede->attach_resources & QEDE_INIT_LOCKS) {
3252 qede_destroy_locks(qede);
3253 qede->attach_resources &= ~QEDE_INIT_LOCKS;
3254 }
3255
3256 if (qede->attach_resources & QEDE_IO_STRUCT_ALLOC) {
3257 qede_free_io_structs(qede);
3258 qede->attach_resources &= ~QEDE_IO_STRUCT_ALLOC;
3259 }
3260 #ifdef QEDE_LSR
3261 if (qede->attach_resources & QEDE_CALLBACK) {
3262
3263
3264 status = ddi_cb_unregister(qede->callback_hdl);
3265 if (status != DDI_SUCCESS) {
3266 }
3267 qede->attach_resources &= ~QEDE_CALLBACK;
3268 }
3269 #endif
3270 if (qede->attach_resources & QEDE_ECORE_HW_PREP) {
3271 ecore_hw_remove(edev);
3272 qede->attach_resources &= ~QEDE_ECORE_HW_PREP;
3273 }
3274
3275 if (qede->attach_resources & QEDE_PCI) {
3276 qede_unconfig_pci(qede);
3277 qede->attach_resources &= ~QEDE_PCI;
3278 }
3279
3280 if (qede->attach_resources & QEDE_FM) {
3281 qede_unconfig_fm(qede);
3282 qede->attach_resources &= ~QEDE_FM;
3283 }
3284
3285 /*
3286 * Check for possible mem. left behind by ecore
3287 */
3288 (void) qede_osal_cleanup(qede);
3289
3290 if (qede->attach_resources & QEDE_STRUCT_ALLOC) {
3291 ddi_set_driver_private(qede->dip, NULL);
3292 qede->attach_resources &= ~QEDE_STRUCT_ALLOC;
3293 kmem_free(qede, sizeof (qede_t));
3294 }
3295 }
3296
3297 /*
3298 * drv_lock must be held by the caller.
3299 */
3300 static int
qede_suspend(qede_t * qede)3301 qede_suspend(qede_t *qede)
3302 {
3303 // STUB
3304 ASSERT(mutex_owned(&qede->drv_lock));
3305 printf("in qede_suspend\n");
3306 return (DDI_FAILURE);
3307 }
3308
3309 static int
qede_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)3310 qede_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
3311 {
3312 qede_t *qede;
3313 struct ecore_dev *edev;
3314 int instance;
3315 uint32_t vendor_id;
3316 uint32_t device_id;
3317 struct ecore_hwfn *p_hwfn;
3318 struct ecore_ptt *p_ptt;
3319 struct ecore_mcp_link_params *link_params;
3320 struct ecore_hw_init_params hw_init_params;
3321 struct ecore_drv_load_params load_params;
3322 int *props;
3323 uint32_t num_props;
3324 int rc = 0;
3325
3326 switch (cmd) {
3327 default:
3328 return (DDI_FAILURE);
3329
3330 case DDI_RESUME:
3331 {
3332 qede = (qede_t * )ddi_get_driver_private(dip);
3333 if (qede == NULL || qede->dip != dip) {
3334 cmn_err(CE_NOTE, "qede:%s: Could not allocate"
3335 " adapter structure\n", __func__);
3336 return (DDI_FAILURE);
3337 }
3338
3339 mutex_enter(&qede->drv_lock);
3340 if (qede->qede_state != QEDE_STATE_SUSPENDED) {
3341 mutex_exit(&qede->drv_lock);
3342 return (DDI_FAILURE);
3343 }
3344
3345 if (qede_resume(qede) != DDI_SUCCESS) {
3346 cmn_err(CE_NOTE, "%s:%d resume operation failure\n",
3347 __func__, qede->instance);
3348 mutex_exit(&qede->drv_lock);
3349 return (DDI_FAILURE);
3350 }
3351
3352 qede->qede_state = QEDE_STATE_ATTACHED;
3353 mutex_exit(&qede->drv_lock);
3354 return (DDI_SUCCESS);
3355 }
3356 case DDI_ATTACH:
3357 {
3358 instance = ddi_get_instance(dip);
3359 cmn_err(CE_NOTE, "qede_attach(%d): Enter",
3360 instance);
3361
3362 /* Allocate main structure rounded up to cache line size */
3363 if ((qede = kmem_zalloc(sizeof (qede_t), KM_SLEEP)) == NULL) {
3364 cmn_err(CE_NOTE, "!%s(%d): Could not allocate adapter "
3365 "structure\n", __func__, instance);
3366 return (DDI_FAILURE);
3367 }
3368
3369 qede->attach_resources |= QEDE_STRUCT_ALLOC;
3370 ddi_set_driver_private(dip, qede);
3371 qede->dip = dip;
3372 qede->instance = instance;
3373 snprintf(qede->name, sizeof (qede->name), "qede%d", instance);
3374 edev = &qede->edev;
3375
3376 if (qede_config_fm(qede) != DDI_SUCCESS) {
3377 goto exit_with_err;
3378 }
3379 qede->attach_resources |= QEDE_FM;
3380
3381 /*
3382 * Do PCI config setup and map the register
3383 * and doorbell space */
3384 if (qede_config_pci(qede) != DDI_SUCCESS) {
3385 goto exit_with_err;
3386 }
3387 qede->attach_resources |= QEDE_PCI;
3388
3389 /*
3390 * Setup OSAL mem alloc related locks.
3391 * Do not call any ecore functions without
3392 * initializing these locks
3393 */
3394 mutex_init(&qede->mem_list.mem_list_lock, NULL,
3395 MUTEX_DRIVER, 0);
3396 mutex_init(&qede->phys_mem_list.lock, NULL,
3397 MUTEX_DRIVER, 0);
3398 QEDE_INIT_LIST_HEAD(&qede->mem_list.mem_list_head);
3399 QEDE_INIT_LIST_HEAD(&qede->phys_mem_list.head);
3400 QEDE_INIT_LIST_HEAD(&qede->mclist.head);
3401
3402
3403 /*
3404 * FIXME: this function calls ecore api, but
3405 * dp_level and module are not yet set
3406 */
3407 if (qede_prepare_edev(qede) != ECORE_SUCCESS) {
3408 // report fma
3409 goto exit_with_err;
3410 }
3411
3412 qede->num_hwfns = edev->num_hwfns;
3413 qede->num_tc = 1;
3414 memcpy(qede->ether_addr, edev->hwfns->hw_info.hw_mac_addr,
3415 ETHERADDRL);
3416 qede_info(qede, "Interface mac_addr : " MAC_STRING,
3417 MACTOSTR(qede->ether_addr));
3418 qede->attach_resources |= QEDE_ECORE_HW_PREP;
3419
3420 if (qede_set_operating_params(qede) != DDI_SUCCESS) {
3421 goto exit_with_err;
3422 }
3423 qede->attach_resources |= QEDE_SET_PARAMS;
3424 #ifdef QEDE_LSR
3425 if (ddi_cb_register(qede->dip,
3426 qede->callback_flags,
3427 qede_callback,
3428 qede,
3429 NULL,
3430 &qede->callback_hdl)) {
3431 goto exit_with_err;
3432 }
3433 qede->attach_resources |= QEDE_CALLBACK;
3434 #endif
3435 qede_cfg_reset(qede);
3436
3437 if (qede_alloc_intrs(qede)) {
3438 cmn_err(CE_NOTE, "%s: Could not allocate interrupts\n",
3439 __func__);
3440 goto exit_with_err;
3441 }
3442
3443 qede->attach_resources |= QEDE_INTR_ALLOC;
3444
3445 if (qede_config_intrs(qede)) {
3446 cmn_err(CE_NOTE, "%s: Could not allocate interrupts\n",
3447 __func__);
3448 goto exit_with_err;
3449 }
3450 qede->attach_resources |= QEDE_INTR_CONFIG;
3451
3452 if (qede_alloc_io_structs(qede) != DDI_SUCCESS) {
3453 cmn_err(CE_NOTE, "%s: Could not allocate data"
3454 " path structures\n", __func__);
3455 goto exit_with_err;
3456 }
3457
3458 qede->attach_resources |= QEDE_IO_STRUCT_ALLOC;
3459
3460 /* Lock init cannot fail */
3461 qede_init_locks(qede);
3462 qede->attach_resources |= QEDE_INIT_LOCKS;
3463
3464
3465 if (qede_config_edev(qede)) {
3466 cmn_err(CE_NOTE, "%s: Could not configure ecore \n",
3467 __func__);
3468 goto exit_with_err;
3469 }
3470 qede->attach_resources |= QEDE_EDEV_CONFIG;
3471
3472 if (qede_kstat_init(qede) == B_FALSE) {
3473 cmn_err(CE_NOTE, "%s: Could not initialize kstat \n",
3474 __func__);
3475 goto exit_with_err;
3476
3477 }
3478 qede->attach_resources |= QEDE_KSTAT_INIT;
3479
3480 if (qede_gld_init(qede) == B_FALSE) {
3481 cmn_err(CE_NOTE, "%s: Failed call to qede_gld_init",
3482 __func__);
3483 goto exit_with_err;
3484 }
3485
3486 qede->attach_resources |= QEDE_GLD_INIT;
3487
3488 if (qede_enable_slowpath_intrs(qede)) {
3489 cmn_err(CE_NOTE, "%s: Could not enable interrupts\n",
3490 __func__);
3491 goto exit_with_err;
3492 }
3493
3494 qede->attach_resources |= QEDE_SP_INTR_ENBL;
3495
3496 cmn_err(CE_NOTE, "qede->attach_resources = %x\n",
3497 qede->attach_resources);
3498
3499 memset((void *)&hw_init_params, 0,
3500 sizeof (struct ecore_hw_init_params));
3501 hw_init_params.p_drv_load_params = &load_params;
3502
3503 hw_init_params.p_tunn = NULL;
3504 hw_init_params.b_hw_start = true;
3505 hw_init_params.int_mode = qede->intr_ctx.intr_mode;
3506 hw_init_params.allow_npar_tx_switch = false;
3507 hw_init_params.bin_fw_data = NULL;
3508 load_params.is_crash_kernel = false;
3509 load_params.mfw_timeout_val = 0;
3510 load_params.avoid_eng_reset = false;
3511 load_params.override_force_load =
3512 ECORE_OVERRIDE_FORCE_LOAD_NONE;
3513
3514 if (ecore_hw_init(edev, &hw_init_params) != ECORE_SUCCESS) {
3515 cmn_err(CE_NOTE,
3516 "%s: Could not initialze ecore block\n",
3517 __func__);
3518 goto exit_with_err;
3519 }
3520 qede->attach_resources |= QEDE_ECORE_HW_INIT;
3521 qede->qede_state = QEDE_STATE_ATTACHED;
3522
3523 qede->detach_unsafe = 0;
3524
3525 snprintf(qede->version,
3526 sizeof (qede->version),
3527 "%d.%d.%d",
3528 MAJVERSION,
3529 MINVERSION,
3530 REVVERSION);
3531
3532 snprintf(qede->versionFW,
3533 sizeof (qede->versionFW),
3534 "%d.%d.%d.%d",
3535 FW_MAJOR_VERSION,
3536 FW_MINOR_VERSION,
3537 FW_REVISION_VERSION,
3538 FW_ENGINEERING_VERSION);
3539
3540 p_hwfn = &qede->edev.hwfns[0];
3541 p_ptt = ecore_ptt_acquire(p_hwfn);
3542 /*
3543 * (test) : saving the default link_input params
3544 */
3545 link_params = ecore_mcp_get_link_params(p_hwfn);
3546 memset(&qede->link_input_params, 0,
3547 sizeof (qede_link_input_params_t));
3548 memcpy(&qede->link_input_params.default_link_params,
3549 link_params,
3550 sizeof (struct ecore_mcp_link_params));
3551
3552 p_hwfn = ECORE_LEADING_HWFN(edev);
3553 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &qede->mfw_ver, NULL);
3554
3555 ecore_ptt_release(p_hwfn, p_ptt);
3556
3557 snprintf(qede->versionMFW,
3558 sizeof (qede->versionMFW),
3559 "%d.%d.%d.%d",
3560 (qede->mfw_ver >> 24) & 0xFF,
3561 (qede->mfw_ver >> 16) & 0xFF,
3562 (qede->mfw_ver >> 8) & 0xFF,
3563 qede->mfw_ver & 0xFF);
3564
3565 snprintf(qede->chip_name,
3566 sizeof (qede->chip_name),
3567 "%s",
3568 ECORE_IS_BB(edev) ? "BB" : "AH");
3569
3570 snprintf(qede->chipID,
3571 sizeof (qede->chipID),
3572 "0x%x",
3573 qede->edev.chip_num);
3574
3575 *qede->bus_dev_func = 0;
3576 vendor_id = 0;
3577 device_id = 0;
3578
3579
3580 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, qede->dip,
3581 0, "reg", &props, &num_props);
3582 if((rc == DDI_PROP_SUCCESS) && (num_props > 0)) {
3583
3584 snprintf(qede->bus_dev_func,
3585 sizeof (qede->bus_dev_func),
3586 "%04x:%02x:%02x",
3587 PCI_REG_BUS_G(props[0]),
3588 PCI_REG_DEV_G(props[0]),
3589 PCI_REG_FUNC_G(props[0]));
3590
3591 /*
3592 * This information is used
3593 * in the QEDE_FUNC_INFO ioctl
3594 */
3595 qede->pci_func = (uint8_t) PCI_REG_FUNC_G(props[0]);
3596
3597 ddi_prop_free(props);
3598
3599 }
3600
3601 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, qede->dip,
3602 0, "vendor-id", &props, &num_props);
3603 if((rc == DDI_PROP_SUCCESS) && (num_props > 0)) {
3604 vendor_id = props[0];
3605 ddi_prop_free(props);
3606 }
3607 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, qede->dip,
3608 0, "device-id", &props, &num_props);
3609 if((rc == DDI_PROP_SUCCESS) && (num_props > 0)) {
3610 device_id = props[0];
3611 ddi_prop_free(props);
3612 }
3613
3614
3615 snprintf(qede->vendor_device,
3616 sizeof (qede->vendor_device),
3617 "%04x:%04x",
3618 vendor_id,
3619 device_id);
3620
3621
3622 snprintf(qede->intrAlloc,
3623 sizeof (qede->intrAlloc), "%d %s",
3624 (qede->intr_ctx.intr_type_in_use == DDI_INTR_TYPE_FIXED)
3625 ? 1 :
3626 qede->intr_ctx.intr_vect_allocated,
3627 (qede->intr_ctx.intr_type_in_use == DDI_INTR_TYPE_MSIX)
3628 ? "MSIX" :
3629 (qede->intr_ctx.intr_type_in_use == DDI_INTR_TYPE_MSI)
3630 ? "MSI" : "Fixed");
3631
3632 qede_print("%s(%d): success, addr %p chip %s id %s intr %s\n",
3633 __func__, qede->instance, qede, qede->chip_name,
3634 qede->vendor_device,qede->intrAlloc);
3635
3636 qede_print("%s(%d): version %s FW %s MFW %s\n",
3637 __func__, qede->instance, qede->version,
3638 qede->versionFW, qede->versionMFW);
3639
3640 return (DDI_SUCCESS);
3641 }
3642 }
3643 exit_with_err:
3644 cmn_err(CE_WARN, "%s:%d failed %x\n", __func__, qede->instance,
3645 qede->attach_resources);
3646 (void)qede_free_attach_resources(qede);
3647 return (DDI_FAILURE);
3648 }
3649
3650 static int
qede_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)3651 qede_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
3652 {
3653
3654 qede_t *qede;
3655 int status;
3656 uint32_t count = 0;
3657
3658 qede = (qede_t *)ddi_get_driver_private(dip);
3659 if ((qede == NULL) || (qede->dip != dip)) {
3660 return (DDI_FAILURE);
3661 }
3662
3663 switch (cmd) {
3664 default:
3665 return (DDI_FAILURE);
3666 case DDI_SUSPEND:
3667 mutex_enter(&qede->drv_lock);
3668 status = qede_suspend(qede);
3669 if (status != DDI_SUCCESS) {
3670 mutex_exit(&qede->drv_lock);
3671 return (DDI_FAILURE);
3672 }
3673
3674 qede->qede_state = QEDE_STATE_SUSPENDED;
3675 mutex_exit(&qede->drv_lock);
3676 return (DDI_SUCCESS);
3677
3678 case DDI_DETACH:
3679 mutex_enter(&qede->drv_lock);
3680 if (qede->qede_state == QEDE_STATE_STARTED) {
3681 qede->plumbed = 0;
3682 status = qede_stop(qede);
3683 if (status != DDI_SUCCESS) {
3684 qede->qede_state = QEDE_STATE_FAILED;
3685 mutex_exit(&qede->drv_lock);
3686 return (DDI_FAILURE);
3687 }
3688 }
3689 mutex_exit(&qede->drv_lock);
3690 if (qede->detach_unsafe) {
3691 /*
3692 * wait for rx buffers to be returned from
3693 * upper layers
3694 */
3695 count = 0;
3696 while ((qede->detach_unsafe) && (count < 100)) {
3697 qede_delay(100);
3698 count++;
3699 }
3700 if (qede->detach_unsafe) {
3701 qede_info(qede, "!%s(%d) : Buffers still with"
3702 " OS, failing detach\n",
3703 qede->name, qede->instance);
3704 return (DDI_FAILURE);
3705 }
3706 }
3707 qede_free_attach_resources(qede);
3708 return (DDI_SUCCESS);
3709 }
3710 }
3711
3712 static int
3713 /* LINTED E_FUNC_ARG_UNUSED */
qede_quiesce(dev_info_t * dip)3714 qede_quiesce(dev_info_t *dip)
3715 {
3716 qede_t *qede = (qede_t *)ddi_get_driver_private(dip);
3717 struct ecore_dev *edev = &qede->edev;
3718 int status = DDI_SUCCESS;
3719 struct ecore_hwfn *p_hwfn;
3720 struct ecore_ptt *p_ptt = NULL;
3721
3722 mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
3723 p_hwfn = ECORE_LEADING_HWFN(edev);
3724 p_ptt = ecore_ptt_acquire(p_hwfn);
3725 if (p_ptt) {
3726 status = ecore_start_recovery_process(p_hwfn, p_ptt);
3727 ecore_ptt_release(p_hwfn, p_ptt);
3728 OSAL_MSLEEP(5000);
3729 }
3730 return (status);
3731
3732 }
3733
3734
3735 DDI_DEFINE_STREAM_OPS(qede_dev_ops, nulldev, nulldev, qede_attach, qede_detach,
3736 nodev, NULL, D_MP, NULL, qede_quiesce);
3737
3738 static struct modldrv qede_modldrv =
3739 {
3740 &mod_driverops, /* drv_modops (must be mod_driverops for drivers) */
3741 QEDE_PRODUCT_INFO, /* drv_linkinfo (string displayed by modinfo) */
3742 &qede_dev_ops /* drv_dev_ops */
3743 };
3744
3745
3746 static struct modlinkage qede_modlinkage =
3747 {
3748 MODREV_1, /* ml_rev */
3749 (&qede_modldrv), /* ml_linkage */
3750 NULL /* NULL termination */
3751 };
3752
3753 int
_init(void)3754 _init(void)
3755 {
3756 int rc;
3757
3758 qede_dev_ops.devo_cb_ops->cb_str = NULL;
3759 mac_init_ops(&qede_dev_ops, "qede");
3760
3761 /* Install module information with O/S */
3762 if ((rc = mod_install(&qede_modlinkage)) != DDI_SUCCESS) {
3763 mac_fini_ops(&qede_dev_ops);
3764 cmn_err(CE_NOTE, "mod_install failed");
3765 return (rc);
3766 }
3767
3768 return (rc);
3769 }
3770
3771
3772 int
_fini(void)3773 _fini(void)
3774 {
3775 int rc;
3776
3777 if ((rc = mod_remove(&qede_modlinkage)) == DDI_SUCCESS) {
3778 mac_fini_ops(&qede_dev_ops);
3779 }
3780
3781 return (rc);
3782 }
3783
3784
3785 int
_info(struct modinfo * modinfop)3786 _info(struct modinfo * modinfop)
3787 {
3788 return (mod_info(&qede_modlinkage, modinfop));
3789 }
3790