1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1, (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * Copyright 2018 Joyent, Inc.
25 * Copyright 2025 Oxide Computer Company
26 */
27
28 #include "qede.h"
29
30 #define FP_LOCK(ptr) \
31 mutex_enter(&ptr->fp_lock);
32 #define FP_UNLOCK(ptr) \
33 mutex_exit(&ptr->fp_lock);
34
35 int
qede_ucst_find(qede_t * qede,const uint8_t * mac_addr)36 qede_ucst_find(qede_t *qede, const uint8_t *mac_addr)
37 {
38 int slot;
39
40 for(slot = 0; slot < qede->ucst_total; slot++) {
41 if (bcmp(qede->ucst_mac[slot].mac_addr.ether_addr_octet,
42 mac_addr, ETHERADDRL) == 0) {
43 return (slot);
44 }
45 }
46 return (-1);
47
48 }
49
50 static int
qede_set_mac_addr(qede_t * qede,uint8_t * mac_addr,uint8_t fl)51 qede_set_mac_addr(qede_t *qede, uint8_t *mac_addr, uint8_t fl)
52 {
53 struct ecore_filter_ucast params;
54
55 memset(¶ms, 0, sizeof (params));
56
57 params.opcode = fl;
58 params.type = ECORE_FILTER_MAC;
59 params.is_rx_filter = true;
60 params.is_tx_filter = true;
61 COPY_ETH_ADDRESS(mac_addr, params.mac);
62
63 return (ecore_filter_ucast_cmd(&qede->edev,
64 ¶ms, ECORE_SPQ_MODE_EBLOCK, NULL));
65
66
67 }
68 static int
qede_add_macaddr(qede_t * qede,uint8_t * mac_addr)69 qede_add_macaddr(qede_t *qede, uint8_t *mac_addr)
70 {
71 int i, ret = 0;
72
73 i = qede_ucst_find(qede, mac_addr);
74 if (i != -1) {
75 /* LINTED E_ARGUMENT_MISMATCH */
76 qede_info(qede, "mac addr already added %d\n",
77 qede->ucst_avail);
78 return (0);
79 }
80 if (qede->ucst_avail == 0) {
81 qede_info(qede, "add macaddr ignored \n");
82 return (ENOSPC);
83 }
84 for (i = 0; i < qede->ucst_total; i++) {
85 if (qede->ucst_mac[i].set == 0) {
86 break;
87 }
88 }
89 if (i >= qede->ucst_total) {
90 qede_info(qede, "add macaddr ignored no space");
91 return (ENOSPC);
92 }
93 ret = qede_set_mac_addr(qede, (uint8_t *)mac_addr, ECORE_FILTER_ADD);
94 if (ret == 0) {
95 bcopy(mac_addr,
96 qede->ucst_mac[i].mac_addr.ether_addr_octet,
97 ETHERADDRL);
98 qede->ucst_mac[i].set = 1;
99 qede->ucst_avail--;
100 /* LINTED E_ARGUMENT_MISMATCH */
101 qede_info(qede, " add macaddr passed for addr "
102 "%02x:%02x:%02x:%02x:%02x:%02x",
103 mac_addr[0], mac_addr[1],
104 mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]);
105 } else {
106 /* LINTED E_ARGUMENT_MISMATCH */
107 qede_info(qede, "add macaddr failed for addr "
108 "%02x:%02x:%02x:%02x:%02x:%02x",
109 mac_addr[0], mac_addr[1],
110 mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]);
111
112 }
113 if (qede->ucst_avail == (qede->ucst_total -1)) {
114 u8 bcast_addr[] =
115 {
116 0xff, 0xff, 0xff, 0xff, 0xff,
117 0xff
118 };
119 for (i = 0; i < qede->ucst_total; i++) {
120 if (qede->ucst_mac[i].set == 0)
121 break;
122 }
123 ret = qede_set_mac_addr(qede,
124 (uint8_t *)bcast_addr, ECORE_FILTER_ADD);
125 if (ret == 0) {
126 bcopy(bcast_addr,
127 qede->ucst_mac[i].mac_addr.ether_addr_octet,
128 ETHERADDRL);
129 qede->ucst_mac[i].set = 1;
130 qede->ucst_avail--;
131 } else {
132
133 /* LINTED E_ARGUMENT_MISMATCH */
134 qede_info(qede, "add macaddr failed for addr "
135 "%02x:%02x:%02x:%02x:%02x:%02x",
136 mac_addr[0], mac_addr[1],
137 mac_addr[2], mac_addr[3], mac_addr[4],
138 mac_addr[5]);
139 }
140
141 }
142
143 return (ret);
144
145 }
146
147 #ifndef ILLUMOS
148 static int
qede_add_mac_addr(void * arg,const uint8_t * mac_addr,const uint64_t flags)149 qede_add_mac_addr(void *arg, const uint8_t *mac_addr, const uint64_t flags)
150 #else
151 static int
152 qede_add_mac_addr(void *arg, const uint8_t *mac_addr)
153 #endif
154 {
155 qede_mac_group_t *rx_group = (qede_mac_group_t *)arg;
156 qede_t *qede = rx_group->qede;
157 int ret = DDI_SUCCESS;
158
159 /* LINTED E_ARGUMENT_MISMATCH */
160 qede_info(qede, " mac addr :" MAC_STRING, MACTOSTR(mac_addr));
161
162 mutex_enter(&qede->gld_lock);
163 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
164 mutex_exit(&qede->gld_lock);
165 return (ECANCELED);
166 }
167 ret = qede_add_macaddr(qede, (uint8_t *)mac_addr);
168
169 mutex_exit(&qede->gld_lock);
170
171
172 return (ret);
173 }
174
175 static int
qede_rem_macaddr(qede_t * qede,uint8_t * mac_addr)176 qede_rem_macaddr(qede_t *qede, uint8_t *mac_addr)
177 {
178 int ret = 0;
179 int i;
180
181 i = qede_ucst_find(qede, mac_addr);
182 if (i == -1) {
183 /* LINTED E_ARGUMENT_MISMATCH */
184 qede_info(qede,
185 "mac addr not there to remove",
186 MAC_STRING, MACTOSTR(mac_addr));
187 return (0);
188 }
189 if (qede->ucst_mac[i].set == 0) {
190 return (EINVAL);
191 }
192 ret = qede_set_mac_addr(qede, (uint8_t *)mac_addr, ECORE_FILTER_REMOVE);
193 if (ret == 0) {
194 bzero(qede->ucst_mac[i].mac_addr.ether_addr_octet,ETHERADDRL);
195 qede->ucst_mac[i].set = 0;
196 qede->ucst_avail++;
197 } else {
198 /* LINTED E_ARGUMENT_MISMATCH */
199 qede_info(qede, "mac addr remove failed",
200 MAC_STRING, MACTOSTR(mac_addr));
201 }
202 return (ret);
203
204 }
205
206
207 static int
qede_rem_mac_addr(void * arg,const uint8_t * mac_addr)208 qede_rem_mac_addr(void *arg, const uint8_t *mac_addr)
209 {
210 qede_mac_group_t *rx_group = (qede_mac_group_t *)arg;
211 qede_t *qede = rx_group->qede;
212 int ret = DDI_SUCCESS;
213
214 /* LINTED E_ARGUMENT_MISMATCH */
215 qede_info(qede, "mac addr remove:" MAC_STRING, MACTOSTR(mac_addr));
216 mutex_enter(&qede->gld_lock);
217 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
218 mutex_exit(&qede->gld_lock);
219 return (ECANCELED);
220 }
221 ret = qede_rem_macaddr(qede, (uint8_t *)mac_addr);
222 mutex_exit(&qede->gld_lock);
223 return (ret);
224 }
225
226
227 static int
qede_tx_ring_stat(mac_ring_driver_t rh,uint_t stat,uint64_t * val)228 qede_tx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
229 {
230 int ret = 0;
231
232 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
233 qede_tx_ring_t *tx_ring = fp->tx_ring[0];
234 qede_t *qede = fp->qede;
235
236
237 if (qede->qede_state == QEDE_STATE_SUSPENDED)
238 return (ECANCELED);
239
240 switch (stat) {
241 case MAC_STAT_OBYTES:
242 *val = tx_ring->tx_byte_count;
243 break;
244
245 case MAC_STAT_OPACKETS:
246 *val = tx_ring->tx_pkt_count;
247 break;
248
249 default:
250 *val = 0;
251 ret = ENOTSUP;
252 }
253
254 return (ret);
255 }
256
257 #ifndef ILLUMOS
258 static mblk_t *
qede_rx_ring_poll(void * arg,int poll_bytes,int poll_pkts)259 qede_rx_ring_poll(void *arg, int poll_bytes, int poll_pkts)
260 {
261 #else
262 static mblk_t *
263 qede_rx_ring_poll(void *arg, int poll_bytes)
264 {
265 /* XXX pick a value at the moment */
266 int poll_pkts = 100;
267 #endif
268 qede_fastpath_t *fp = (qede_fastpath_t *)arg;
269 mblk_t *mp = NULL;
270 int work_done = 0;
271 qede_t *qede = fp->qede;
272
273 if (poll_bytes == 0) {
274 return (NULL);
275 }
276
277 mutex_enter(&fp->fp_lock);
278 qede->intrSbPollCnt[fp->vect_info->vect_index]++;
279
280 mp = qede_process_fastpath(fp, poll_bytes, poll_pkts, &work_done);
281 if (mp != NULL) {
282 fp->rx_ring->rx_poll_cnt++;
283 } else if ((mp == NULL) && (work_done == 0)) {
284 qede->intrSbPollNoChangeCnt[fp->vect_info->vect_index]++;
285 }
286
287 mutex_exit(&fp->fp_lock);
288 return (mp);
289 }
290
291 #ifndef ILLUMOS
292 static int
293 qede_rx_ring_intr_enable(mac_ring_driver_t rh)
294 #else
295 static int
296 qede_rx_ring_intr_enable(mac_intr_handle_t rh)
297 #endif
298 {
299 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
300
301 mutex_enter(&fp->qede->drv_lock);
302 if (!fp->sb_phys && (fp->sb_dma_handle == NULL)) {
303 mutex_exit(&fp->qede->drv_lock);
304 return (DDI_FAILURE);
305 }
306
307 fp->rx_ring->intrEnableCnt++;
308 qede_enable_hw_intr(fp);
309 fp->disabled_by_poll = 0;
310 mutex_exit(&fp->qede->drv_lock);
311
312 return (DDI_SUCCESS);
313 }
314
315 #ifndef ILLUMOS
316 static int
317 qede_rx_ring_intr_disable(mac_ring_driver_t rh)
318 #else
319 static int
320 qede_rx_ring_intr_disable(mac_intr_handle_t rh)
321 #endif
322 {
323 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
324
325 mutex_enter(&fp->qede->drv_lock);
326 if (!fp->sb_phys && (fp->sb_dma_handle == NULL)) {
327 mutex_exit(&fp->qede->drv_lock);
328 return (DDI_FAILURE);
329 }
330 fp->rx_ring->intrDisableCnt++;
331 qede_disable_hw_intr(fp);
332 fp->disabled_by_poll = 1;
333 mutex_exit(&fp->qede->drv_lock);
334 return (DDI_SUCCESS);
335 }
336
337 static int
338 qede_rx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
339 {
340
341 int ret = 0;
342
343 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
344 qede_t *qede = fp->qede;
345 qede_rx_ring_t *rx_ring = fp->rx_ring;
346
347 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
348 return (ECANCELED);
349 }
350
351 switch (stat) {
352 case MAC_STAT_RBYTES:
353 *val = rx_ring->rx_byte_cnt;
354 break;
355 case MAC_STAT_IPACKETS:
356 *val = rx_ring->rx_pkt_cnt;
357 break;
358 default:
359 *val = 0;
360 ret = ENOTSUP;
361 break;
362 }
363
364 return (ret);
365 }
366
367 static int
368 qede_get_global_ring_index(qede_t *qede, int gindex, int rindex)
369 {
370 qede_fastpath_t *fp;
371 qede_rx_ring_t *rx_ring;
372 int i = 0;
373
374 for (i = 0; i < qede->num_fp; i++) {
375 fp = &qede->fp_array[i];
376 rx_ring = fp->rx_ring;
377
378 if (rx_ring->group_index == gindex) {
379 rindex--;
380 }
381 if (rindex < 0) {
382 return (i);
383 }
384 }
385
386 return (-1);
387 }
388
389 static void
390 qede_rx_ring_stop(mac_ring_driver_t rh)
391 {
392 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
393 qede_rx_ring_t *rx_ring = fp->rx_ring;
394
395 qede_print("!%s(%d): called", __func__,fp->qede->instance);
396 mutex_enter(&fp->fp_lock);
397 rx_ring->mac_ring_started = B_FALSE;
398 mutex_exit(&fp->fp_lock);
399 }
400
401 static int
402 qede_rx_ring_start(mac_ring_driver_t rh, u64 mr_gen_num)
403 {
404 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
405 qede_rx_ring_t *rx_ring = fp->rx_ring;
406
407 qede_print("!%s(%d): called", __func__,fp->qede->instance);
408 mutex_enter(&fp->fp_lock);
409 rx_ring->mr_gen_num = mr_gen_num;
410 rx_ring->mac_ring_started = B_TRUE;
411 rx_ring->intrDisableCnt = 0;
412 rx_ring->intrEnableCnt = 0;
413 fp->disabled_by_poll = 0;
414
415 mutex_exit(&fp->fp_lock);
416
417 return (DDI_SUCCESS);
418 }
419
420 /* Callback function from mac layer to register rings */
421 void
422 qede_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
423 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
424 {
425 qede_t *qede = (qede_t *)arg;
426 mac_intr_t *mintr = &infop->mri_intr;
427
428 switch (rtype) {
429 case MAC_RING_TYPE_RX: {
430 /*
431 * Index passed as a param is the ring index within the
432 * given group index. If multiple groups are supported
433 * then need to search into all groups to find out the
434 * global ring index for the passed group relative
435 * ring index
436 */
437 int global_ring_index = qede_get_global_ring_index(qede,
438 group_index, ring_index);
439 qede_fastpath_t *fp;
440 qede_rx_ring_t *rx_ring;
441 int i;
442
443 /*
444 * global_ring_index < 0 means group index passed
445 * was registered by our driver
446 */
447 ASSERT(global_ring_index >= 0);
448
449 if (rh == NULL) {
450 cmn_err(CE_WARN, "!rx ring(%d) ring handle NULL",
451 global_ring_index);
452 }
453
454 fp = &qede->fp_array[global_ring_index];
455 rx_ring = fp->rx_ring;
456 fp->qede = qede;
457
458 rx_ring->mac_ring_handle = rh;
459
460 qede_info(qede, "rx_ring %d mac_ring_handle %p",
461 rx_ring->rss_id, rh);
462
463 /* mri_driver passed as arg to mac_ring* callbacks */
464 infop->mri_driver = (mac_ring_driver_t)fp;
465 /*
466 * mri_start callback will supply a mac rings generation
467 * number which is needed while indicating packets
468 * upstream via mac_rx_ring() call
469 */
470 infop->mri_start = qede_rx_ring_start;
471 infop->mri_stop = qede_rx_ring_stop;
472 infop->mri_poll = qede_rx_ring_poll;
473 infop->mri_stat = qede_rx_ring_stat;
474
475 mintr->mi_handle = (mac_intr_handle_t)fp;
476 mintr->mi_enable = qede_rx_ring_intr_enable;
477 mintr->mi_disable = qede_rx_ring_intr_disable;
478 if (qede->intr_ctx.intr_type_in_use &
479 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
480 mintr->mi_ddi_handle =
481 qede->intr_ctx.
482 intr_hdl_array[global_ring_index + qede->num_hwfns];
483 }
484 break;
485 }
486 case MAC_RING_TYPE_TX: {
487 qede_fastpath_t *fp;
488 qede_tx_ring_t *tx_ring;
489 int i, tc;
490
491 ASSERT(ring_index < qede->num_fp);
492
493 fp = &qede->fp_array[ring_index];
494 fp->qede = qede;
495 tx_ring = fp->tx_ring[0];
496 tx_ring->mac_ring_handle = rh;
497 qede_info(qede, "tx_ring %d mac_ring_handle %p",
498 tx_ring->tx_queue_index, rh);
499 infop->mri_driver = (mac_ring_driver_t)fp;
500 infop->mri_start = NULL;
501 infop->mri_stop = NULL;
502 infop->mri_tx = qede_ring_tx;
503 infop->mri_stat = qede_tx_ring_stat;
504 if (qede->intr_ctx.intr_type_in_use &
505 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
506 mintr->mi_ddi_handle =
507 qede->intr_ctx.
508 intr_hdl_array[ring_index + qede->num_hwfns];
509 }
510 break;
511 }
512 default:
513 break;
514 }
515 }
516
517 /*
518 * Callback function from mac layer to register group
519 */
520 void
521 qede_fill_group(void *arg, mac_ring_type_t rtype, const int index,
522 mac_group_info_t *infop, mac_group_handle_t gh)
523 {
524 qede_t *qede = (qede_t *)arg;
525
526 switch (rtype) {
527 case MAC_RING_TYPE_RX: {
528 qede_mac_group_t *rx_group;
529
530 rx_group = &qede->rx_groups[index];
531 rx_group->group_handle = gh;
532 rx_group->group_index = index;
533 rx_group->qede = qede;
534 infop->mgi_driver = (mac_group_driver_t)rx_group;
535 infop->mgi_start = NULL;
536 infop->mgi_stop = NULL;
537 #ifndef ILLUMOS
538 infop->mgi_addvlan = NULL;
539 infop->mgi_remvlan = NULL;
540 infop->mgi_getsriov_info = NULL;
541 infop->mgi_setmtu = NULL;
542 #endif
543 infop->mgi_addmac = qede_add_mac_addr;
544 infop->mgi_remmac = qede_rem_mac_addr;
545 infop->mgi_count = qede->num_fp;
546 #ifndef ILLUMOS
547 if (index == 0) {
548 infop->mgi_flags = MAC_GROUP_DEFAULT;
549 }
550 #endif
551
552 break;
553 }
554 case MAC_RING_TYPE_TX: {
555 qede_mac_group_t *tx_group;
556
557 tx_group = &qede->tx_groups[index];
558 tx_group->group_handle = gh;
559 tx_group->group_index = index;
560 tx_group->qede = qede;
561
562 infop->mgi_driver = (mac_group_driver_t)tx_group;
563 infop->mgi_start = NULL;
564 infop->mgi_stop = NULL;
565 infop->mgi_addmac = NULL;
566 infop->mgi_remmac = NULL;
567 #ifndef ILLUMOS
568 infop->mgi_addvlan = NULL;
569 infop->mgi_remvlan = NULL;
570 infop->mgi_setmtu = NULL;
571 infop->mgi_getsriov_info = NULL;
572 #endif
573
574 infop->mgi_count = qede->num_fp;
575
576 #ifndef ILLUMOS
577 if (index == 0) {
578 infop->mgi_flags = MAC_GROUP_DEFAULT;
579 }
580 #endif
581 break;
582 }
583 default:
584 break;
585 }
586 }
587
588 #ifdef ILLUMOS
589 static int
590 qede_transceiver_info(void *arg, uint_t id, mac_transceiver_info_t *infop)
591 {
592 qede_t *qede = arg;
593 struct ecore_dev *edev = &qede->edev;
594 struct ecore_hwfn *hwfn;
595 struct ecore_ptt *ptt;
596 uint32_t transceiver_state;
597
598 if (id >= edev->num_hwfns || arg == NULL || infop == NULL)
599 return (EINVAL);
600
601 hwfn = &edev->hwfns[id];
602 ptt = ecore_ptt_acquire(hwfn);
603 if (ptt == NULL) {
604 return (EIO);
605 }
606 /*
607 * Use the underlying raw API to get this information. While the
608 * ecore_phy routines have some ways of getting to this information, it
609 * ends up writing the raw data as ASCII characters which doesn't help
610 * us one bit.
611 */
612 transceiver_state = ecore_rd(hwfn, ptt, hwfn->mcp_info->port_addr +
613 offsetof(struct public_port, transceiver_data));
614 transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
615 ecore_ptt_release(hwfn, ptt);
616
617 if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) != 0) {
618 mac_transceiver_info_set_present(infop, B_TRUE);
619 /*
620 * Based on our testing, the ETH_TRANSCEIVER_STATE_VALID flag is
621 * not set, so we cannot rely on it. Instead, we have found that
622 * the ETH_TRANSCEIVER_STATE_UPDATING will be set when we cannot
623 * use the transceiver.
624 */
625 if ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) != 0) {
626 mac_transceiver_info_set_usable(infop, B_FALSE);
627 } else {
628 mac_transceiver_info_set_usable(infop, B_TRUE);
629 }
630 } else {
631 mac_transceiver_info_set_present(infop, B_FALSE);
632 mac_transceiver_info_set_usable(infop, B_FALSE);
633 }
634
635 return (0);
636 }
637
638 static int
639 qede_transceiver_read(void *arg, uint_t id, uint_t page, void *buf,
640 size_t nbytes, off_t offset, size_t *nread)
641 {
642 qede_t *qede = arg;
643 struct ecore_dev *edev = &qede->edev;
644 struct ecore_hwfn *hwfn;
645 uint32_t port, lane;
646 struct ecore_ptt *ptt;
647 enum _ecore_status_t ret;
648
649 if (id >= edev->num_hwfns || buf == NULL || nbytes == 0 || nread == NULL ||
650 (page != 0xa0 && page != 0xa2) || offset < 0)
651 return (EINVAL);
652
653 /*
654 * Both supported pages have a length of 256 bytes, ensure nothing asks
655 * us to go beyond that.
656 */
657 if (nbytes > 256 || offset >= 256 || (offset + nbytes > 256)) {
658 return (EINVAL);
659 }
660
661 hwfn = &edev->hwfns[id];
662 ptt = ecore_ptt_acquire(hwfn);
663 if (ptt == NULL) {
664 return (EIO);
665 }
666
667 ret = ecore_mcp_phy_sfp_read(hwfn, ptt, hwfn->port_id, page, offset,
668 nbytes, buf);
669 ecore_ptt_release(hwfn, ptt);
670 if (ret != ECORE_SUCCESS) {
671 return (EIO);
672 }
673 *nread = nbytes;
674 return (0);
675 }
676 #endif /* ILLUMOS */
677
678
679 static int
680 qede_mac_stats(void * arg,
681 uint_t stat,
682 uint64_t * value)
683 {
684 qede_t * qede = (qede_t *)arg;
685 struct ecore_eth_stats vstats;
686 struct ecore_dev *edev = &qede->edev;
687 struct qede_link_cfg lnkcfg;
688 int rc = 0;
689 qede_fastpath_t *fp = &qede->fp_array[0];
690 qede_rx_ring_t *rx_ring;
691 qede_tx_ring_t *tx_ring;
692
693 if ((qede == NULL) || (value == NULL)) {
694 return EINVAL;
695 }
696
697
698 mutex_enter(&qede->gld_lock);
699
700 if(qede->qede_state != QEDE_STATE_STARTED) {
701 mutex_exit(&qede->gld_lock);
702 return EAGAIN;
703 }
704
705 *value = 0;
706
707 memset(&vstats, 0, sizeof(struct ecore_eth_stats));
708 ecore_get_vport_stats(edev, &vstats);
709
710
711 memset(&qede->curcfg, 0, sizeof(struct qede_link_cfg));
712 qede_get_link_info(&edev->hwfns[0], &qede->curcfg);
713
714
715
716 switch (stat)
717 {
718 case MAC_STAT_IFSPEED:
719 *value = (qede->props.link_speed * 1000000ULL);
720 break;
721 case MAC_STAT_MULTIRCV:
722 *value = vstats.common.rx_mcast_pkts;
723 break;
724 case MAC_STAT_BRDCSTRCV:
725 *value = vstats.common.rx_bcast_pkts;
726 break;
727 case MAC_STAT_MULTIXMT:
728 *value = vstats.common.tx_mcast_pkts;
729 break;
730 case MAC_STAT_BRDCSTXMT:
731 *value = vstats.common.tx_bcast_pkts;
732 break;
733 case MAC_STAT_NORCVBUF:
734 *value = vstats.common.no_buff_discards;
735 break;
736 case MAC_STAT_NOXMTBUF:
737 *value = 0;
738 break;
739 case MAC_STAT_IERRORS:
740 case ETHER_STAT_MACRCV_ERRORS:
741 *value = vstats.common.mac_filter_discards +
742 vstats.common.packet_too_big_discard +
743 vstats.common.rx_crc_errors;
744 break;
745
746 case MAC_STAT_OERRORS:
747 break;
748
749 case MAC_STAT_COLLISIONS:
750 *value = vstats.bb.tx_total_collisions;
751 break;
752
753 case MAC_STAT_RBYTES:
754 *value = vstats.common.rx_ucast_bytes +
755 vstats.common.rx_mcast_bytes +
756 vstats.common.rx_bcast_bytes;
757 break;
758
759 case MAC_STAT_IPACKETS:
760 *value = vstats.common.rx_ucast_pkts +
761 vstats.common.rx_mcast_pkts +
762 vstats.common.rx_bcast_pkts;
763 break;
764
765 case MAC_STAT_OBYTES:
766 *value = vstats.common.tx_ucast_bytes +
767 vstats.common.tx_mcast_bytes +
768 vstats.common.tx_bcast_bytes;
769 break;
770
771 case MAC_STAT_OPACKETS:
772 *value = vstats.common.tx_ucast_pkts +
773 vstats.common.tx_mcast_pkts +
774 vstats.common.tx_bcast_pkts;
775 break;
776
777 case ETHER_STAT_ALIGN_ERRORS:
778 *value = vstats.common.rx_align_errors;
779 break;
780
781 case ETHER_STAT_FCS_ERRORS:
782 *value = vstats.common.rx_crc_errors;
783 break;
784
785 case ETHER_STAT_FIRST_COLLISIONS:
786 break;
787
788 case ETHER_STAT_MULTI_COLLISIONS:
789 break;
790
791 case ETHER_STAT_DEFER_XMTS:
792 break;
793
794 case ETHER_STAT_TX_LATE_COLLISIONS:
795 break;
796
797 case ETHER_STAT_EX_COLLISIONS:
798 break;
799
800 case ETHER_STAT_MACXMT_ERRORS:
801 *value = 0;
802 break;
803
804 case ETHER_STAT_CARRIER_ERRORS:
805 break;
806
807 case ETHER_STAT_TOOLONG_ERRORS:
808 *value = vstats.common.rx_oversize_packets;
809 break;
810
811 #if (MAC_VERSION > 1)
812 case ETHER_STAT_TOOSHORT_ERRORS:
813 *value = vstats.common.rx_undersize_packets;
814 break;
815 #endif
816
817 case ETHER_STAT_XCVR_ADDR:
818 *value = 0;
819 break;
820
821 case ETHER_STAT_XCVR_ID:
822 *value = 0;
823 break;
824
825 case ETHER_STAT_XCVR_INUSE:
826 *value = (uint64_t)qede_link_to_media(&qede->curcfg,
827 qede->props.link_speed);
828 break;
829
830 #if (MAC_VERSION > 1)
831 case ETHER_STAT_CAP_10GFDX:
832 *value = 0;
833 break;
834 #endif
835 case ETHER_STAT_CAP_100FDX:
836 *value = 0;
837 break;
838 case ETHER_STAT_CAP_100HDX:
839 *value = 0;
840 break;
841 case ETHER_STAT_CAP_ASMPAUSE:
842 *value = 1;
843 break;
844 case ETHER_STAT_CAP_PAUSE:
845 *value = 1;
846 break;
847 case ETHER_STAT_CAP_AUTONEG:
848 *value = 1;
849 break;
850
851 #if (MAC_VERSION > 1)
852 case ETHER_STAT_CAP_REMFAULT:
853 *value = 0;
854 break;
855 #endif
856
857 #if (MAC_VERSION > 1)
858 case ETHER_STAT_ADV_CAP_10GFDX:
859 *value = 0;
860 break;
861 #endif
862 case ETHER_STAT_ADV_CAP_ASMPAUSE:
863 *value = 1;
864 break;
865
866 case ETHER_STAT_ADV_CAP_PAUSE:
867 *value = 1;
868 break;
869
870 case ETHER_STAT_ADV_CAP_AUTONEG:
871 *value = qede->curcfg.adv_capab.autoneg;
872 break;
873
874 #if (MAC_VERSION > 1)
875 case ETHER_STAT_ADV_REMFAULT:
876 *value = 0;
877 break;
878 #endif
879
880 case ETHER_STAT_LINK_AUTONEG:
881 *value = qede->curcfg.autoneg;
882 break;
883
884 case ETHER_STAT_LINK_DUPLEX:
885 *value = (qede->props.link_duplex == DUPLEX_FULL) ?
886 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
887 break;
888 /*
889 * Supported speeds. These indicate what hardware is capable of.
890 */
891 case ETHER_STAT_CAP_1000HDX:
892 *value = qede->curcfg.supp_capab.param_1000hdx;
893 break;
894
895 case ETHER_STAT_CAP_1000FDX:
896 *value = qede->curcfg.supp_capab.param_1000fdx;
897 break;
898
899 case ETHER_STAT_CAP_10GFDX:
900 *value = qede->curcfg.supp_capab.param_10000fdx;
901 break;
902
903 case ETHER_STAT_CAP_25GFDX:
904 *value = qede->curcfg.supp_capab.param_25000fdx;
905 break;
906
907 case ETHER_STAT_CAP_40GFDX:
908 *value = qede->curcfg.supp_capab.param_40000fdx;
909 break;
910
911 case ETHER_STAT_CAP_50GFDX:
912 *value = qede->curcfg.supp_capab.param_50000fdx;
913 break;
914
915 case ETHER_STAT_CAP_100GFDX:
916 *value = qede->curcfg.supp_capab.param_100000fdx;
917 break;
918
919 /*
920 * Advertised speeds. These indicate what hardware is currently sending.
921 */
922 case ETHER_STAT_ADV_CAP_1000HDX:
923 *value = qede->curcfg.adv_capab.param_1000hdx;
924 break;
925
926 case ETHER_STAT_ADV_CAP_1000FDX:
927 *value = qede->curcfg.adv_capab.param_1000fdx;
928 break;
929
930 case ETHER_STAT_ADV_CAP_10GFDX:
931 *value = qede->curcfg.adv_capab.param_10000fdx;
932 break;
933
934 case ETHER_STAT_ADV_CAP_25GFDX:
935 *value = qede->curcfg.adv_capab.param_25000fdx;
936 break;
937
938 case ETHER_STAT_ADV_CAP_40GFDX:
939 *value = qede->curcfg.adv_capab.param_40000fdx;
940 break;
941
942 case ETHER_STAT_ADV_CAP_50GFDX:
943 *value = qede->curcfg.adv_capab.param_50000fdx;
944 break;
945
946 case ETHER_STAT_ADV_CAP_100GFDX:
947 *value = qede->curcfg.adv_capab.param_100000fdx;
948 break;
949
950 default:
951 rc = ENOTSUP;
952 }
953
954 mutex_exit(&qede->gld_lock);
955 return (rc);
956 }
957
958 /* (flag) TRUE = on, FALSE = off */
959 static int
960 qede_mac_promiscuous(void *arg,
961 boolean_t on)
962 {
963 qede_t *qede = (qede_t *)arg;
964 qede_print("!%s(%d): called", __func__,qede->instance);
965 int ret = DDI_SUCCESS;
966 enum qede_filter_rx_mode_type mode;
967
968 mutex_enter(&qede->drv_lock);
969
970 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
971 ret = ECANCELED;
972 goto exit;
973 }
974
975 if (on) {
976 qede_info(qede, "Entering promiscuous mode");
977 mode = QEDE_FILTER_RX_MODE_PROMISC;
978 qede->params.promisc_fl = B_TRUE;
979 } else {
980 qede_info(qede, "Leaving promiscuous mode");
981 if(qede->params.multi_promisc_fl == B_TRUE) {
982 mode = QEDE_FILTER_RX_MODE_MULTI_PROMISC;
983 } else {
984 mode = QEDE_FILTER_RX_MODE_REGULAR;
985 }
986 qede->params.promisc_fl = B_FALSE;
987 }
988
989 ret = qede_set_filter_rx_mode(qede, mode);
990
991 exit:
992 mutex_exit(&qede->drv_lock);
993 return (ret);
994 }
995
996 int qede_set_rx_mac_mcast(qede_t *qede, enum ecore_filter_opcode opcode,
997 uint8_t *mac, int mc_cnt)
998 {
999 struct ecore_filter_mcast cmd;
1000 int i;
1001 memset(&cmd, 0, sizeof(cmd));
1002 cmd.opcode = opcode;
1003 cmd.num_mc_addrs = mc_cnt;
1004
1005 for (i = 0; i < mc_cnt; i++, mac += ETH_ALLEN) {
1006 COPY_ETH_ADDRESS(mac, cmd.mac[i]);
1007 }
1008
1009
1010 return (ecore_filter_mcast_cmd(&qede->edev, &cmd,
1011 ECORE_SPQ_MODE_CB, NULL));
1012
1013 }
1014
1015 int
1016 qede_set_filter_rx_mode(qede_t * qede, enum qede_filter_rx_mode_type type)
1017 {
1018 struct ecore_filter_accept_flags flg;
1019
1020 memset(&flg, 0, sizeof(flg));
1021
1022 flg.update_rx_mode_config = 1;
1023 flg.update_tx_mode_config = 1;
1024 flg.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
1025 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
1026 flg.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
1027 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
1028
1029 if (type == QEDE_FILTER_RX_MODE_PROMISC)
1030 flg.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
1031 ECORE_ACCEPT_MCAST_UNMATCHED;
1032 else if (type == QEDE_FILTER_RX_MODE_MULTI_PROMISC)
1033 flg.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
1034 qede_info(qede, "rx_mode rx_filter=0x%x tx_filter=0x%x type=0x%x\n",
1035 flg.rx_accept_filter, flg.tx_accept_filter, type);
1036 return (ecore_filter_accept_cmd(&qede->edev, 0, flg,
1037 0, /* update_accept_any_vlan */
1038 0, /* accept_any_vlan */
1039 ECORE_SPQ_MODE_CB, NULL));
1040 }
1041
1042 int
1043 qede_multicast(qede_t *qede, boolean_t flag, const uint8_t *ptr_mcaddr)
1044 {
1045 int i, ret = DDI_SUCCESS;
1046 qede_mcast_list_entry_t *ptr_mlist;
1047 qede_mcast_list_entry_t *ptr_entry;
1048 int mc_cnt;
1049 unsigned char *mc_macs, *tmpmc;
1050 size_t size;
1051 boolean_t mcmac_exists = B_FALSE;
1052 enum qede_filter_rx_mode_type mode;
1053
1054 if (!ptr_mcaddr) {
1055 cmn_err(CE_NOTE, "Removing all multicast");
1056 } else {
1057 cmn_err(CE_NOTE,
1058 "qede=%p %s multicast: %02x:%02x:%02x:%02x:%02x:%02x",
1059 qede, (flag) ? "Adding" : "Removing", ptr_mcaddr[0],
1060 ptr_mcaddr[1],ptr_mcaddr[2],ptr_mcaddr[3],ptr_mcaddr[4],
1061 ptr_mcaddr[5]);
1062 }
1063
1064
1065 if (flag && (ptr_mcaddr == NULL)) {
1066 cmn_err(CE_WARN, "ERROR: Multicast address not specified");
1067 return EINVAL;
1068 }
1069
1070
1071 /* exceeds addition of mcaddr above limit */
1072 if (flag && (qede->mc_cnt >= MAX_MC_SOFT_LIMIT)) {
1073 qede_info(qede, "Cannot add more than MAX_MC_SOFT_LIMIT");
1074 return ENOENT;
1075 }
1076
1077 size = MAX_MC_SOFT_LIMIT * ETH_ALLEN;
1078
1079 mc_macs = kmem_zalloc(size, KM_NOSLEEP);
1080 if (!mc_macs) {
1081 cmn_err(CE_WARN, "ERROR: Failed to allocate for mc_macs");
1082 return EINVAL;
1083 }
1084
1085 tmpmc = mc_macs;
1086
1087 /* remove all multicast - as flag not set and mcaddr not specified*/
1088 if (!flag && (ptr_mcaddr == NULL)) {
1089 QEDE_LIST_FOR_EACH_ENTRY(ptr_entry,
1090 &qede->mclist.head, qede_mcast_list_entry_t, mclist_entry)
1091 {
1092 if (ptr_entry != NULL) {
1093 QEDE_LIST_REMOVE(&ptr_entry->mclist_entry,
1094 &qede->mclist.head);
1095 kmem_free(ptr_entry,
1096 sizeof (qede_mcast_list_entry_t) + ETH_ALLEN);
1097 }
1098 }
1099
1100 ret = qede_set_rx_mac_mcast(qede,
1101 ECORE_FILTER_REMOVE, mc_macs, 1);
1102 qede->mc_cnt = 0;
1103 goto exit;
1104 }
1105
1106 QEDE_LIST_FOR_EACH_ENTRY(ptr_entry,
1107 &qede->mclist.head, qede_mcast_list_entry_t, mclist_entry)
1108 {
1109 if ((ptr_entry != NULL) &&
1110 IS_ETH_ADDRESS_EQUAL(ptr_mcaddr, ptr_entry->mac)) {
1111 mcmac_exists = B_TRUE;
1112 break;
1113 }
1114 }
1115 if (flag && mcmac_exists) {
1116 ret = DDI_SUCCESS;
1117 goto exit;
1118 } else if (!flag && !mcmac_exists) {
1119 ret = DDI_SUCCESS;
1120 goto exit;
1121 }
1122
1123 if (flag) {
1124 ptr_entry = kmem_zalloc((sizeof (qede_mcast_list_entry_t) +
1125 ETH_ALLEN), KM_NOSLEEP);
1126 ptr_entry->mac = (uint8_t *)ptr_entry +
1127 sizeof (qede_mcast_list_entry_t);
1128 COPY_ETH_ADDRESS(ptr_mcaddr, ptr_entry->mac);
1129 QEDE_LIST_ADD(&ptr_entry->mclist_entry, &qede->mclist.head);
1130 } else {
1131 QEDE_LIST_REMOVE(&ptr_entry->mclist_entry, &qede->mclist.head);
1132 kmem_free(ptr_entry, sizeof(qede_mcast_list_entry_t) +
1133 ETH_ALLEN);
1134 }
1135
1136 mc_cnt = 0;
1137 QEDE_LIST_FOR_EACH_ENTRY(ptr_entry, &qede->mclist.head,
1138 qede_mcast_list_entry_t, mclist_entry) {
1139 COPY_ETH_ADDRESS(ptr_entry->mac, tmpmc);
1140 tmpmc += ETH_ALLEN;
1141 mc_cnt++;
1142 }
1143 qede->mc_cnt = mc_cnt;
1144 if (mc_cnt <=64) {
1145 ret = qede_set_rx_mac_mcast(qede, ECORE_FILTER_ADD,
1146 (unsigned char *)mc_macs, mc_cnt);
1147 if ((qede->params.multi_promisc_fl == B_TRUE) &&
1148 (qede->params.promisc_fl == B_FALSE)) {
1149 mode = QEDE_FILTER_RX_MODE_REGULAR;
1150 ret = qede_set_filter_rx_mode(qede, mode);
1151 }
1152 qede->params.multi_promisc_fl = B_FALSE;
1153 } else {
1154 if ((qede->params.multi_promisc_fl == B_FALSE) &&
1155 (qede->params.promisc_fl == B_FALSE)) {
1156 ret = qede_set_filter_rx_mode(qede,
1157 QEDE_FILTER_RX_MODE_MULTI_PROMISC);
1158 }
1159 qede->params.multi_promisc_fl = B_TRUE;
1160 qede_info(qede, "mode is MULTI_PROMISC");
1161 }
1162 exit:
1163 kmem_free(mc_macs, size);
1164 qede_info(qede, "multicast ret %d mc_cnt %d\n", ret, qede->mc_cnt);
1165 return (ret);
1166 }
1167
1168 /*
1169 * This function is used to enable or disable multicast packet reception for
1170 * particular multicast addresses.
1171 * (flag) TRUE = add, FALSE = remove
1172 */
1173 static int
1174 qede_mac_multicast(void *arg,
1175 boolean_t flag,
1176 const uint8_t * mcast_addr)
1177 {
1178 qede_t *qede = (qede_t *)arg;
1179 int ret = DDI_SUCCESS;
1180
1181
1182 mutex_enter(&qede->gld_lock);
1183 if(qede->qede_state != QEDE_STATE_STARTED) {
1184 mutex_exit(&qede->gld_lock);
1185 return (EAGAIN);
1186 }
1187 ret = qede_multicast(qede, flag, mcast_addr);
1188
1189 mutex_exit(&qede->gld_lock);
1190
1191 return (ret);
1192 }
1193 int
1194 qede_clear_filters(qede_t *qede)
1195 {
1196 int ret = 0;
1197 int i;
1198 if ((qede->params.promisc_fl == B_TRUE) ||
1199 (qede->params.multi_promisc_fl == B_TRUE)) {
1200 ret = qede_set_filter_rx_mode(qede,
1201 QEDE_FILTER_RX_MODE_REGULAR);
1202 if (ret) {
1203 qede_info(qede,
1204 "qede_clear_filters failed to set rx_mode");
1205 }
1206 }
1207 for (i=0; i < qede->ucst_total; i++)
1208 {
1209 if (qede->ucst_mac[i].set) {
1210 qede_rem_macaddr(qede,
1211 qede->ucst_mac[i].mac_addr.ether_addr_octet);
1212 }
1213 }
1214 qede_multicast(qede, B_FALSE, NULL);
1215 return (ret);
1216 }
1217
1218
1219 #ifdef NO_CROSSBOW
1220 static int
1221 qede_mac_unicast(void *arg,
1222 const uint8_t * mac_addr)
1223 {
1224 qede_t *qede = (qede_t *)arg;
1225 return 0;
1226 }
1227
1228
1229 static mblk_t *
1230 qede_mac_tx(void *arg,
1231 mblk_t * mblk)
1232 {
1233 qede_t *qede = (qede_t *)arg;
1234 qede_fastpath_t *fp = &qede->fp_array[0];
1235
1236 mblk = qede_ring_tx((void *)fp, mblk);
1237
1238 return (mblk);
1239 }
1240 #endif /* NO_CROSSBOW */
1241
1242
1243 static lb_property_t loopmodes[] = {
1244 { normal, "normal", QEDE_LOOP_NONE },
1245 { internal, "internal", QEDE_LOOP_INTERNAL },
1246 { external, "external", QEDE_LOOP_EXTERNAL },
1247 };
1248
1249 /*
1250 * Set Loopback mode
1251 */
1252
1253 static enum ioc_reply
1254 qede_set_loopback_mode(qede_t *qede, uint32_t mode)
1255 {
1256 int i = 0;
1257 struct ecore_dev *edev = &qede->edev;
1258 struct ecore_hwfn *hwfn;
1259 struct ecore_ptt *ptt = NULL;
1260 struct ecore_mcp_link_params *link_params;
1261
1262 hwfn = &edev->hwfns[0];
1263 link_params = ecore_mcp_get_link_params(hwfn);
1264 ptt = ecore_ptt_acquire(hwfn);
1265
1266 switch(mode) {
1267 default:
1268 qede_info(qede, "unknown loopback mode !!");
1269 ecore_ptt_release(hwfn, ptt);
1270 return IOC_INVAL;
1271
1272 case QEDE_LOOP_NONE:
1273 ecore_mcp_set_link(hwfn, ptt, 0);
1274
1275 while (qede->params.link_state && i < 5000) {
1276 OSAL_MSLEEP(1);
1277 i++;
1278 }
1279 i = 0;
1280
1281 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1282 qede->loop_back_mode = QEDE_LOOP_NONE;
1283 (void) ecore_mcp_set_link(hwfn, ptt, 1);
1284 ecore_ptt_release(hwfn, ptt);
1285
1286 while (!qede->params.link_state && i < 5000) {
1287 OSAL_MSLEEP(1);
1288 i++;
1289 }
1290 return IOC_REPLY;
1291
1292 case QEDE_LOOP_INTERNAL:
1293 qede_print("!%s(%d) : loopback mode (INTERNAL) is set!",
1294 __func__, qede->instance);
1295 ecore_mcp_set_link(hwfn, ptt, 0);
1296
1297 while(qede->params.link_state && i < 5000) {
1298 OSAL_MSLEEP(1);
1299 i++;
1300 }
1301 i = 0;
1302 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1303 qede->loop_back_mode = QEDE_LOOP_INTERNAL;
1304 (void) ecore_mcp_set_link(hwfn, ptt, 1);
1305 ecore_ptt_release(hwfn, ptt);
1306
1307 while(!qede->params.link_state && i < 5000) {
1308 OSAL_MSLEEP(1);
1309 i++;
1310 }
1311 return IOC_REPLY;
1312
1313 case QEDE_LOOP_EXTERNAL:
1314 qede_print("!%s(%d) : External loopback mode is not supported",
1315 __func__, qede->instance);
1316 ecore_ptt_release(hwfn, ptt);
1317 return IOC_INVAL;
1318 }
1319 }
1320
1321 static int
1322 qede_ioctl_pcicfg_rd(qede_t *qede, u32 addr, void *data,
1323 int len)
1324 {
1325 u32 crb, actual_crb;
1326 uint32_t ret = 0;
1327 int cap_offset = 0, cap_id = 0, next_cap = 0;
1328 ddi_acc_handle_t pci_cfg_handle = qede->pci_cfg_handle;
1329 qede_ioctl_data_t * data1 = (qede_ioctl_data_t *) data;
1330
1331 cap_offset = pci_config_get8(pci_cfg_handle, PCI_CONF_CAP_PTR);
1332 while (cap_offset != 0) {
1333 /* Check for an invalid PCI read. */
1334 if (cap_offset == PCI_EINVAL8) {
1335 return DDI_FAILURE;
1336 }
1337 cap_id = pci_config_get8(pci_cfg_handle, cap_offset);
1338 if (cap_id == PCI_CAP_ID_PCI_E) {
1339 /* PCIe expr capab struct found */
1340 break;
1341 } else {
1342 next_cap = pci_config_get8(pci_cfg_handle,
1343 cap_offset + 1);
1344 cap_offset = next_cap;
1345 }
1346 }
1347
1348 switch (len) {
1349 case 1:
1350 ret = pci_config_get8(qede->pci_cfg_handle, addr);
1351 (void) memcpy(data, &ret, sizeof(uint8_t));
1352 break;
1353 case 2:
1354 ret = pci_config_get16(qede->pci_cfg_handle, addr);
1355 (void) memcpy(data, &ret, sizeof(uint16_t));
1356 break;
1357 case 4:
1358 ret = pci_config_get32(qede->pci_cfg_handle, addr);
1359 (void) memcpy(data, &ret, sizeof(uint32_t));
1360 break;
1361 default:
1362 cmn_err(CE_WARN, "bad length for pci config read\n");
1363 return (1);
1364 }
1365 return (0);
1366 }
1367
1368 static int
1369 qede_ioctl_pcicfg_wr(qede_t *qede, u32 addr, void *data,
1370 int len)
1371 {
1372 uint16_t ret = 0;
1373 int cap_offset = 0, cap_id = 0, next_cap = 0;
1374 qede_ioctl_data_t * data1 = (qede_ioctl_data_t *) data;
1375 ddi_acc_handle_t pci_cfg_handle = qede->pci_cfg_handle;
1376 #if 1
1377 cap_offset = pci_config_get8(pci_cfg_handle, PCI_CONF_CAP_PTR);
1378 while (cap_offset != 0) {
1379 cap_id = pci_config_get8(pci_cfg_handle, cap_offset);
1380 if (cap_id == PCI_CAP_ID_PCI_E) {
1381 /* PCIe expr capab struct found */
1382 break;
1383 } else {
1384 next_cap = pci_config_get8(pci_cfg_handle,
1385 cap_offset + 1);
1386 cap_offset = next_cap;
1387 }
1388 }
1389 #endif
1390
1391 switch(len) {
1392 case 1:
1393 pci_config_put8(qede->pci_cfg_handle, addr,
1394 *(char *)&(data));
1395 break;
1396 case 2:
1397 ret = pci_config_get16(qede->pci_cfg_handle, addr);
1398 ret = ret | *(uint16_t *)data1->uabc;
1399
1400 pci_config_put16(qede->pci_cfg_handle, addr,
1401 ret);
1402 break;
1403 case 4:
1404 pci_config_put32(qede->pci_cfg_handle, addr, *(uint32_t *)data1->uabc);
1405 break;
1406
1407 default:
1408 return (1);
1409 }
1410 return (0);
1411 }
1412
1413 static int
1414 qede_ioctl_rd_wr_reg(qede_t *qede, void *data)
1415 {
1416 struct ecore_hwfn *p_hwfn;
1417 struct ecore_dev *edev = &qede->edev;
1418 struct ecore_ptt *ptt;
1419 qede_ioctl_data_t *data1 = (qede_ioctl_data_t *)data;
1420 uint32_t ret = 0;
1421 uint8_t cmd = (uint8_t) data1->unused1;
1422 uint32_t addr = data1->off;
1423 uint32_t val = *(uint32_t *)&data1->uabc[1];
1424 uint32_t hwfn_index = *(uint32_t *)&data1->uabc[5];
1425 uint32_t *reg_addr;
1426
1427 if (hwfn_index > qede->num_hwfns) {
1428 cmn_err(CE_WARN, "invalid hwfn index from application\n");
1429 return (EINVAL);
1430 }
1431 p_hwfn = &edev->hwfns[hwfn_index];
1432
1433 switch(cmd) {
1434 case QEDE_REG_READ:
1435 ret = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, addr);
1436 (void) memcpy(data1->uabc, &ret, sizeof(uint32_t));
1437 break;
1438
1439 case QEDE_REG_WRITE:
1440 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, addr, val);
1441 break;
1442
1443 default:
1444 cmn_err(CE_WARN,
1445 "wrong command in register read/write from application\n");
1446 break;
1447 }
1448 return (ret);
1449 }
1450
1451 static int
1452 qede_ioctl_rd_wr_nvram(qede_t *qede, mblk_t *mp)
1453 {
1454 qede_nvram_data_t *data1 = (qede_nvram_data_t *)(mp->b_cont->b_rptr);
1455 qede_nvram_data_t *data2, *next_data;
1456 struct ecore_dev *edev = &qede->edev;
1457 uint32_t hdr_size = 24, bytes_to_copy, copy_len = 0;
1458 uint32_t copy_len1 = 0;
1459 uint32_t addr = data1->off;
1460 uint32_t size = data1->size, i, buf_size;
1461 uint8_t cmd, cmd2;
1462 uint8_t *buf, *tmp_buf;
1463 mblk_t *mp1;
1464
1465 cmd = (uint8_t)data1->unused1;
1466
1467 switch(cmd) {
1468 case QEDE_NVRAM_CMD_READ:
1469 buf = kmem_zalloc(size, GFP_KERNEL);
1470 if(buf == NULL) {
1471 cmn_err(CE_WARN, "memory allocation failed"
1472 " in nvram read ioctl\n");
1473 return (DDI_FAILURE);
1474 }
1475 (void) ecore_mcp_nvm_read(edev, addr, buf, data1->size);
1476
1477 copy_len = (MBLKL(mp->b_cont)) - hdr_size;
1478 if(copy_len > size) {
1479 (void) memcpy(data1->uabc, buf, size);
1480 kmem_free(buf, size);
1481 //OSAL_FREE(edev, buf);
1482 break;
1483 }
1484 (void) memcpy(data1->uabc, buf, copy_len);
1485 bytes_to_copy = size - copy_len;
1486 tmp_buf = ((uint8_t *)buf) + copy_len;
1487 copy_len1 = copy_len;
1488 mp1 = mp->b_cont;
1489 mp1 = mp1->b_cont;
1490
1491 while (mp1) {
1492 copy_len = MBLKL(mp1);
1493 if(mp1->b_cont == NULL) {
1494 copy_len = MBLKL(mp1) - 4;
1495 }
1496 data2 = (qede_nvram_data_t *)mp1->b_rptr;
1497 if (copy_len > bytes_to_copy) {
1498 (void) memcpy(data2->uabc, tmp_buf,
1499 bytes_to_copy);
1500 kmem_free(buf, size);
1501 //OSAL_FREE(edev, buf);
1502 break;
1503 }
1504 (void) memcpy(data2->uabc, tmp_buf, copy_len);
1505 tmp_buf = tmp_buf + copy_len;
1506 copy_len += copy_len;
1507 mp1 = mp1->b_cont;
1508 bytes_to_copy = bytes_to_copy - copy_len;
1509 }
1510
1511 kmem_free(buf, size);
1512 //OSAL_FREE(edev, buf);
1513 break;
1514
1515 case QEDE_NVRAM_CMD_WRITE:
1516 cmd2 = (uint8_t )data1->cmd2;
1517 size = data1->size;
1518 addr = data1->off;
1519 buf_size = size; //data1->buf_size;
1520 //buf_size = data1->buf_size;
1521
1522 switch(cmd2){
1523 case START_NVM_WRITE:
1524 buf = kmem_zalloc(size, GFP_KERNEL);
1525 //buf = qede->reserved_buf;
1526 qede->nvm_buf_size = data1->size;
1527 if(buf == NULL) {
1528 cmn_err(CE_WARN,
1529 "memory allocation failed in START_NVM_WRITE\n");
1530 return DDI_FAILURE;
1531 }
1532 qede->nvm_buf_start = buf;
1533 cmn_err(CE_NOTE,
1534 "buf = %p, size = %x\n", qede->nvm_buf_start, size);
1535 qede->nvm_buf = buf;
1536 qede->copy_len = 0;
1537 //tmp_buf = buf + addr;
1538 break;
1539
1540 case ACCUMULATE_NVM_BUF:
1541 tmp_buf = qede->nvm_buf;
1542 copy_len = MBLKL(mp->b_cont) - hdr_size;
1543 if(copy_len > buf_size) {
1544 if (buf_size < qede->nvm_buf_size) {
1545 (void) memcpy(tmp_buf, data1->uabc, buf_size);
1546 qede->copy_len = qede->copy_len +
1547 buf_size;
1548 } else {
1549 (void) memcpy(tmp_buf,
1550 data1->uabc, qede->nvm_buf_size);
1551 qede->copy_len =
1552 qede->copy_len + qede->nvm_buf_size;
1553 }
1554 tmp_buf = tmp_buf + buf_size;
1555 qede->nvm_buf = tmp_buf;
1556 //qede->copy_len = qede->copy_len + buf_size;
1557 cmn_err(CE_NOTE,
1558 "buf_size from app = %x\n", copy_len);
1559 break;
1560 }
1561 (void) memcpy(tmp_buf, data1->uabc, copy_len);
1562 tmp_buf = tmp_buf + copy_len;
1563 bytes_to_copy = buf_size - copy_len;
1564 mp1 = mp->b_cont;
1565 mp1 = mp1->b_cont;
1566 copy_len1 = copy_len;
1567
1568 while (mp1) {
1569 copy_len = MBLKL(mp1);
1570 if (mp1->b_cont == NULL) {
1571 copy_len = MBLKL(mp1) - 4;
1572 }
1573 next_data = (qede_nvram_data_t *) mp1->b_rptr;
1574 if (copy_len > bytes_to_copy){
1575 (void) memcpy(tmp_buf, next_data->uabc,
1576 bytes_to_copy);
1577 qede->copy_len = qede->copy_len +
1578 bytes_to_copy;
1579 break;
1580 }
1581 (void) memcpy(tmp_buf, next_data->uabc,
1582 copy_len);
1583 qede->copy_len = qede->copy_len + copy_len;
1584 tmp_buf = tmp_buf + copy_len;
1585 copy_len = copy_len1 + copy_len;
1586 bytes_to_copy = bytes_to_copy - copy_len;
1587 mp1 = mp1->b_cont;
1588 }
1589 qede->nvm_buf = tmp_buf;
1590 break;
1591
1592 case STOP_NVM_WRITE:
1593 //qede->nvm_buf = tmp_buf;
1594 break;
1595 case READ_BUF:
1596 tmp_buf = (uint8_t *)qede->nvm_buf_start;
1597 for(i = 0; i < size ; i++){
1598 cmn_err(CE_NOTE,
1599 "buff (%d) : %d\n", i, *tmp_buf);
1600 tmp_buf ++;
1601 }
1602 break;
1603 }
1604 break;
1605 case QEDE_NVRAM_CMD_PUT_FILE_DATA:
1606 tmp_buf = qede->nvm_buf_start;
1607 (void) ecore_mcp_nvm_write(edev, ECORE_PUT_FILE_DATA,
1608 addr, tmp_buf, size);
1609 kmem_free(qede->nvm_buf_start, size);
1610 //OSAL_FREE(edev, tmp_buf);
1611 cmn_err(CE_NOTE, "total size = %x, copied size = %x\n",
1612 qede->nvm_buf_size, qede->copy_len);
1613 tmp_buf = NULL;
1614 qede->nvm_buf = NULL;
1615 qede->nvm_buf_start = NULL;
1616 break;
1617
1618 case QEDE_NVRAM_CMD_SET_SECURE_MODE:
1619 (void) ecore_mcp_nvm_set_secure_mode(edev, addr);
1620 break;
1621
1622 case QEDE_NVRAM_CMD_DEL_FILE:
1623 (void) ecore_mcp_nvm_del_file(edev, addr);
1624 break;
1625
1626 case QEDE_NVRAM_CMD_PUT_FILE_BEGIN:
1627 (void) ecore_mcp_nvm_put_file_begin(edev, addr);
1628 break;
1629
1630 case QEDE_NVRAM_CMD_GET_NVRAM_RESP:
1631 buf = kmem_zalloc(size, KM_SLEEP);
1632 (void) ecore_mcp_nvm_resp(edev, buf);
1633 (void)memcpy(data1->uabc, buf, size);
1634 kmem_free(buf, size);
1635 break;
1636
1637 default:
1638 cmn_err(CE_WARN,
1639 "wrong command in NVRAM read/write from application\n");
1640 break;
1641 }
1642 return (DDI_SUCCESS);
1643 }
1644
1645 static int
1646 qede_get_func_info(qede_t *qede, void *data)
1647 {
1648 qede_link_output_t link_op;
1649 qede_func_info_t func_info;
1650 qede_ioctl_data_t *data1 = (qede_ioctl_data_t *)data;
1651 struct ecore_dev *edev = &qede->edev;
1652 struct ecore_hwfn *hwfn;
1653 struct ecore_mcp_link_params params;
1654 struct ecore_mcp_link_state link;
1655
1656 hwfn = &edev->hwfns[0];
1657
1658 if(hwfn == NULL){
1659 cmn_err(CE_WARN, "(%s) : cannot acquire hwfn\n",
1660 __func__);
1661 return (DDI_FAILURE);
1662 }
1663 memcpy(¶ms, &hwfn->mcp_info->link_input, sizeof(params));
1664 memcpy(&link, &hwfn->mcp_info->link_output, sizeof(link));
1665
1666 if(link.link_up) {
1667 link_op.link_up = true;
1668 }
1669
1670 link_op.supported_caps = SUPPORTED_FIBRE;
1671 if(params.speed.autoneg) {
1672 link_op.supported_caps |= SUPPORTED_Autoneg;
1673 }
1674
1675 if(params.pause.autoneg ||
1676 (params.pause.forced_rx && params.pause.forced_tx)) {
1677 link_op.supported_caps |= SUPPORTED_Asym_Pause;
1678 }
1679
1680 if (params.pause.autoneg || params.pause.forced_rx ||
1681 params.pause.forced_tx) {
1682 link_op.supported_caps |= SUPPORTED_Pause;
1683 }
1684
1685 if (params.speed.advertised_speeds &
1686 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1687 link_op.supported_caps |= SUPPORTED_1000baseT_Half |
1688 SUPPORTED_1000baseT_Full;
1689 }
1690
1691 if (params.speed.advertised_speeds &
1692 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1693 link_op.supported_caps |= SUPPORTED_10000baseKR_Full;
1694 }
1695
1696 if (params.speed.advertised_speeds &
1697 NVM_CFG1_PORT_DRV_LINK_SPEED_40G) {
1698 link_op.supported_caps |= SUPPORTED_40000baseLR4_Full;
1699 }
1700
1701 link_op.advertised_caps = link_op.supported_caps;
1702
1703 if(link.link_up) {
1704 link_op.speed = link.speed;
1705 } else {
1706 link_op.speed = 0;
1707 }
1708
1709 link_op.duplex = DUPLEX_FULL;
1710 link_op.port = PORT_FIBRE;
1711
1712 link_op.autoneg = params.speed.autoneg;
1713
1714 /* Link partner capabilities */
1715 if (link.partner_adv_speed &
1716 ECORE_LINK_PARTNER_SPEED_1G_HD) {
1717 link_op.lp_caps |= SUPPORTED_1000baseT_Half;
1718 }
1719
1720 if (link.partner_adv_speed &
1721 ECORE_LINK_PARTNER_SPEED_1G_FD) {
1722 link_op.lp_caps |= SUPPORTED_1000baseT_Full;
1723 }
1724
1725 if (link.partner_adv_speed &
1726 ECORE_LINK_PARTNER_SPEED_10G) {
1727 link_op.lp_caps |= SUPPORTED_10000baseKR_Full;
1728 }
1729
1730 if (link.partner_adv_speed &
1731 ECORE_LINK_PARTNER_SPEED_20G) {
1732 link_op.lp_caps |= SUPPORTED_20000baseKR2_Full;
1733 }
1734
1735 if (link.partner_adv_speed &
1736 ECORE_LINK_PARTNER_SPEED_40G) {
1737 link_op.lp_caps |= SUPPORTED_40000baseLR4_Full;
1738 }
1739
1740 if (link.an_complete) {
1741 link_op.lp_caps |= SUPPORTED_Autoneg;
1742 }
1743
1744 if (link.partner_adv_pause) {
1745 link_op.lp_caps |= SUPPORTED_Pause;
1746 }
1747
1748 if (link.partner_adv_pause == ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1749 link.partner_adv_pause == ECORE_LINK_PARTNER_BOTH_PAUSE) {
1750 link_op.lp_caps |= SUPPORTED_Asym_Pause;
1751 }
1752
1753 func_info.supported = link_op.supported_caps;
1754 func_info.advertising = link_op.advertised_caps;
1755 func_info.speed = link_op.speed;
1756 func_info.duplex = link_op.duplex;
1757 func_info.port = qede->pci_func & 0x1;
1758 func_info.autoneg = link_op.autoneg;
1759
1760 (void) memcpy(data1->uabc, &func_info, sizeof(qede_func_info_t));
1761
1762 return (0);
1763 }
1764
1765 static int
1766 qede_do_ioctl(qede_t *qede, queue_t *q, mblk_t *mp)
1767 {
1768 qede_ioctl_data_t *up_data;
1769 qede_driver_info_t driver_info;
1770 struct ecore_dev *edev = &qede->edev;
1771 struct ecore_hwfn *hwfn;
1772 struct ecore_ptt *ptt = NULL;
1773 struct mcp_file_att attrib;
1774 uint32_t flash_size;
1775 uint32_t mcp_resp, mcp_param, txn_size;
1776 uint32_t cmd, size, ret = 0;
1777 uint64_t off;
1778 int * up_data1;
1779 void * ptr;
1780 mblk_t *mp1 = mp;
1781 char mac_addr[32];
1782
1783 up_data = (qede_ioctl_data_t *)(mp->b_cont->b_rptr);
1784
1785 cmd = up_data->cmd;
1786 off = up_data->off;
1787 size = up_data->size;
1788
1789 switch (cmd) {
1790 case QEDE_DRV_INFO:
1791 hwfn = &edev->hwfns[0];
1792 ptt = ecore_ptt_acquire(hwfn);
1793
1794 snprintf(driver_info.drv_name, MAX_QEDE_NAME_LEN, "%s", "qede");
1795 snprintf(driver_info.drv_version, QEDE_STR_SIZE,
1796 "v:%s", qede->version);
1797 snprintf(driver_info.mfw_version, QEDE_STR_SIZE,
1798 "%s", qede->versionMFW);
1799 snprintf(driver_info.stormfw_version, QEDE_STR_SIZE,
1800 "%s", qede->versionFW);
1801 snprintf(driver_info.bus_info, QEDE_STR_SIZE,
1802 "%s", qede->bus_dev_func);
1803
1804
1805 /*
1806 * calling ecore_mcp_nvm_rd_cmd to find the flash length, i
1807 * 0x08 is equivalent of NVM_TYPE_MFW_TRACE1
1808 */
1809 ecore_mcp_get_flash_size(hwfn, ptt, &flash_size);
1810 driver_info.eeprom_dump_len = flash_size;
1811 (void) memcpy(up_data->uabc, &driver_info,
1812 sizeof (qede_driver_info_t));
1813 up_data->size = sizeof (qede_driver_info_t);
1814
1815 ecore_ptt_release(hwfn, ptt);
1816 break;
1817
1818 case QEDE_RD_PCICFG:
1819 ret = qede_ioctl_pcicfg_rd(qede, off, up_data->uabc, size);
1820 break;
1821
1822 case QEDE_WR_PCICFG:
1823 ret = qede_ioctl_pcicfg_wr(qede, off, up_data, size);
1824 break;
1825
1826 case QEDE_RW_REG:
1827 ret = qede_ioctl_rd_wr_reg(qede, (void *)up_data);
1828 break;
1829
1830 case QEDE_RW_NVRAM:
1831 ret = qede_ioctl_rd_wr_nvram(qede, mp1);
1832 break;
1833
1834 case QEDE_FUNC_INFO:
1835 ret = qede_get_func_info(qede, (void *)up_data);
1836 break;
1837
1838 case QEDE_MAC_ADDR:
1839 snprintf(mac_addr, sizeof(mac_addr),
1840 "%02x:%02x:%02x:%02x:%02x:%02x",
1841 qede->ether_addr[0], qede->ether_addr[1],
1842 qede->ether_addr[2], qede->ether_addr[3],
1843 qede->ether_addr[4], qede->ether_addr[5]);
1844 (void) memcpy(up_data->uabc, &mac_addr, sizeof(mac_addr));
1845 break;
1846
1847 }
1848 //if (cmd == QEDE_RW_NVRAM) {
1849 // miocack (q, mp, (sizeof(qede_ioctl_data_t)), 0);
1850 // return IOC_REPLY;
1851 //}
1852 miocack (q, mp, (sizeof(qede_ioctl_data_t)), ret);
1853 //miocack (q, mp, 0, ret);
1854 return (IOC_REPLY);
1855 }
1856
1857 static void
1858 qede_ioctl(qede_t *qede, int cmd, queue_t *q, mblk_t *mp)
1859 {
1860 void *ptr;
1861
1862 switch(cmd) {
1863 case QEDE_CMD:
1864 (void) qede_do_ioctl(qede, q, mp);
1865 break;
1866 default :
1867 cmn_err(CE_WARN, "qede ioctl command %x not supported\n", cmd);
1868 break;
1869 }
1870 return;
1871 }
1872 enum ioc_reply
1873 qede_loopback_ioctl(qede_t *qede, queue_t *wq, mblk_t *mp,
1874 struct iocblk *iocp)
1875 {
1876 lb_info_sz_t *lb_info_size;
1877 lb_property_t *lb_prop;
1878 uint32_t *lb_mode;
1879 int cmd;
1880
1881 /*
1882 * Validate format of ioctl
1883 */
1884 if(mp->b_cont == NULL) {
1885 return IOC_INVAL;
1886 }
1887
1888 cmd = iocp->ioc_cmd;
1889
1890 switch(cmd) {
1891 default:
1892 qede_print("!%s(%d): unknown ioctl command %x\n",
1893 __func__, qede->instance, cmd);
1894 return IOC_INVAL;
1895 case LB_GET_INFO_SIZE:
1896 if (iocp->ioc_count != sizeof(lb_info_sz_t)) {
1897 qede_info(qede, "error: ioc_count %d, sizeof %d",
1898 iocp->ioc_count, sizeof(lb_info_sz_t));
1899 return IOC_INVAL;
1900 }
1901 lb_info_size = (void *)mp->b_cont->b_rptr;
1902 *lb_info_size = sizeof(loopmodes);
1903 return IOC_REPLY;
1904 case LB_GET_INFO:
1905 if (iocp->ioc_count != sizeof (loopmodes)) {
1906 qede_info(qede, "error: iocp->ioc_count %d, sizepof %d",
1907 iocp->ioc_count, sizeof (loopmodes));
1908 return (IOC_INVAL);
1909 }
1910 lb_prop = (void *)mp->b_cont->b_rptr;
1911 bcopy(loopmodes, lb_prop, sizeof (loopmodes));
1912 return IOC_REPLY;
1913 case LB_GET_MODE:
1914 if (iocp->ioc_count != sizeof (uint32_t)) {
1915 qede_info(qede, "iocp->ioc_count %d, sizeof : %d\n",
1916 iocp->ioc_count, sizeof (uint32_t));
1917 return (IOC_INVAL);
1918 }
1919 lb_mode = (void *)mp->b_cont->b_rptr;
1920 *lb_mode = qede->loop_back_mode;
1921 return IOC_REPLY;
1922 case LB_SET_MODE:
1923 if (iocp->ioc_count != sizeof (uint32_t)) {
1924 qede_info(qede, "iocp->ioc_count %d, sizeof : %d\n",
1925 iocp->ioc_count, sizeof (uint32_t));
1926 return (IOC_INVAL);
1927 }
1928 lb_mode = (void *)mp->b_cont->b_rptr;
1929 return (qede_set_loopback_mode(qede,*lb_mode));
1930 }
1931 }
1932
1933 static void
1934 qede_mac_ioctl(void * arg,
1935 queue_t * wq,
1936 mblk_t * mp)
1937 {
1938 int err, cmd;
1939 qede_t * qede = (qede_t *)arg;
1940 struct iocblk *iocp = (struct iocblk *) (uintptr_t)mp->b_rptr;
1941 enum ioc_reply status = IOC_DONE;
1942 boolean_t need_privilege = B_TRUE;
1943
1944 iocp->ioc_error = 0;
1945 cmd = iocp->ioc_cmd;
1946
1947 mutex_enter(&qede->drv_lock);
1948 if ((qede->qede_state == QEDE_STATE_SUSPENDING) ||
1949 (qede->qede_state == QEDE_STATE_SUSPENDED)) {
1950 mutex_exit(&qede->drv_lock);
1951 miocnak(wq, mp, 0, EINVAL);
1952 return;
1953 }
1954
1955 switch(cmd) {
1956 case QEDE_CMD:
1957 break;
1958 case LB_GET_INFO_SIZE:
1959 case LB_GET_INFO:
1960 case LB_GET_MODE:
1961 need_privilege = B_FALSE;
1962 case LB_SET_MODE:
1963 break;
1964 default:
1965 qede_print("!%s(%d) unknown ioctl command %x\n",
1966 __func__, qede->instance, cmd);
1967 miocnak(wq, mp, 0, EINVAL);
1968 mutex_exit(&qede->drv_lock);
1969 return;
1970 }
1971
1972 if(need_privilege) {
1973 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1974 if(err){
1975 qede_info(qede, "secpolicy() failed");
1976 miocnak(wq, mp, 0, err);
1977 mutex_exit(&qede->drv_lock);
1978 return;
1979 }
1980 }
1981
1982 switch (cmd) {
1983 default:
1984 qede_print("!%s(%d) : unknown ioctl command %x\n",
1985 __func__, qede->instance, cmd);
1986 status = IOC_INVAL;
1987 mutex_exit(&qede->drv_lock);
1988 return;
1989 case LB_GET_INFO_SIZE:
1990 case LB_GET_INFO:
1991 case LB_GET_MODE:
1992 case LB_SET_MODE:
1993 status = qede_loopback_ioctl(qede, wq, mp, iocp);
1994 break;
1995 case QEDE_CMD:
1996 qede_ioctl(qede, cmd, wq, mp);
1997 status = IOC_DONE;
1998 break;
1999 }
2000
2001 switch(status){
2002 default:
2003 qede_print("!%s(%d) : invalid status from ioctl",
2004 __func__,qede->instance);
2005 break;
2006 case IOC_DONE:
2007 /*
2008 * OK, Reply already sent
2009 */
2010
2011 break;
2012 case IOC_REPLY:
2013 mp->b_datap->db_type = iocp->ioc_error == 0 ?
2014 M_IOCACK : M_IOCNAK;
2015 qreply(wq, mp);
2016 break;
2017 case IOC_INVAL:
2018 mutex_exit(&qede->drv_lock);
2019 //miocack(wq, mp, 0, 0);
2020 miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
2021 EINVAL : iocp->ioc_error);
2022 return;
2023 }
2024 mutex_exit(&qede->drv_lock);
2025 }
2026
2027 extern ddi_dma_attr_t qede_buf2k_dma_attr_txbuf;
2028 extern ddi_dma_attr_t qede_dma_attr_rxbuf;
2029 extern ddi_dma_attr_t qede_dma_attr_desc;
2030
2031 static boolean_t
2032 qede_mac_get_capability(void *arg,
2033 mac_capab_t capability,
2034 void * cap_data)
2035 {
2036 qede_t * qede = (qede_t *)arg;
2037 uint32_t *txflags = cap_data;
2038 boolean_t ret = B_FALSE;
2039
2040 switch (capability) {
2041 case MAC_CAPAB_HCKSUM: {
2042 u32 *tx_flags = cap_data;
2043 /*
2044 * Check if checksum is enabled on
2045 * tx and advertise the cksum capab
2046 * to mac layer accordingly. On Rx
2047 * side checksummed packets are
2048 * reveiced anyway
2049 */
2050 qede_info(qede, "%s tx checksum offload",
2051 (qede->checksum == DEFAULT_CKSUM_OFFLOAD) ?
2052 "Enabling":
2053 "Disabling");
2054
2055 if (qede->checksum != DEFAULT_CKSUM_OFFLOAD) {
2056 ret = B_FALSE;
2057 break;
2058 }
2059 /*
2060 * Hardware does not support ICMPv6 checksumming. Right now the
2061 * GLDv3 doesn't provide us a way to specify that we don't
2062 * support that. As such, we cannot indicate
2063 * HCKSUM_INET_FULL_V6.
2064 */
2065
2066 *tx_flags = HCKSUM_INET_FULL_V4 |
2067 HCKSUM_IPHDRCKSUM;
2068 ret = B_TRUE;
2069 break;
2070 }
2071 case MAC_CAPAB_LSO: {
2072 mac_capab_lso_t *cap_lso = (mac_capab_lso_t *)cap_data;
2073
2074 qede_info(qede, "%s large segmentation offload",
2075 qede->lso_enable ? "Enabling": "Disabling");
2076 if (qede->lso_enable) {
2077 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
2078 cap_lso->lso_basic_tcp_ipv4.lso_max = QEDE_LSO_MAXLEN;
2079 ret = B_TRUE;
2080 }
2081 break;
2082 }
2083 case MAC_CAPAB_RINGS: {
2084 #ifndef NO_CROSSBOW
2085 mac_capab_rings_t *cap_rings = cap_data;
2086 #ifndef ILLUMOS
2087 cap_rings->mr_version = MAC_RINGS_VERSION_1;
2088 #endif
2089
2090 switch (cap_rings->mr_type) {
2091 case MAC_RING_TYPE_RX:
2092 #ifndef ILLUMOS
2093 cap_rings->mr_flags = MAC_RINGS_VLAN_TRANSPARENT;
2094 #endif
2095 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2096 //cap_rings->mr_rnum = 1; /* qede variable */
2097 cap_rings->mr_rnum = qede->num_fp; /* qede variable */
2098 cap_rings->mr_gnum = 1;
2099 cap_rings->mr_rget = qede_fill_ring;
2100 cap_rings->mr_gget = qede_fill_group;
2101 cap_rings->mr_gaddring = NULL;
2102 cap_rings->mr_gremring = NULL;
2103 #ifndef ILLUMOS
2104 cap_rings->mr_ggetringtc = NULL;
2105 #endif
2106 ret = B_TRUE;
2107 break;
2108 case MAC_RING_TYPE_TX:
2109 #ifndef ILLUMOS
2110 cap_rings->mr_flags = MAC_RINGS_VLAN_TRANSPARENT;
2111 #endif
2112 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2113 //cap_rings->mr_rnum = 1;
2114 cap_rings->mr_rnum = qede->num_fp;
2115 cap_rings->mr_gnum = 0;
2116 cap_rings->mr_rget = qede_fill_ring;
2117 cap_rings->mr_gget = qede_fill_group;
2118 cap_rings->mr_gaddring = NULL;
2119 cap_rings->mr_gremring = NULL;
2120 #ifndef ILLUMOS
2121 cap_rings->mr_ggetringtc = NULL;
2122 #endif
2123 ret = B_TRUE;
2124 break;
2125 default:
2126 ret = B_FALSE;
2127 break;
2128 }
2129 #endif
2130 break; /* CASE MAC_CAPAB_RINGS */
2131 }
2132 #ifdef ILLUMOS
2133 case MAC_CAPAB_TRANSCEIVER: {
2134 mac_capab_transceiver_t *mct = cap_data;
2135
2136 mct->mct_flags = 0;
2137 mct->mct_ntransceivers = qede->edev.num_hwfns;
2138 mct->mct_info = qede_transceiver_info;
2139 mct->mct_read = qede_transceiver_read;
2140
2141 ret = B_TRUE;
2142 break;
2143 }
2144 #endif
2145 default:
2146 break;
2147 }
2148
2149 return (ret);
2150 }
2151
2152 int
2153 qede_configure_link(qede_t *qede, bool op);
2154
2155 static int
2156 qede_mac_set_property(void * arg,
2157 const char * pr_name,
2158 mac_prop_id_t pr_num,
2159 uint_t pr_valsize,
2160 const void * pr_val)
2161 {
2162 qede_t * qede = (qede_t *)arg;
2163 struct ecore_mcp_link_params *link_params;
2164 struct ecore_dev *edev = &qede->edev;
2165 struct ecore_hwfn *hwfn;
2166 int ret_val = 0, i;
2167 uint32_t option;
2168
2169 mutex_enter(&qede->gld_lock);
2170 switch (pr_num)
2171 {
2172 case MAC_PROP_MTU:
2173 bcopy(pr_val, &option, sizeof (option));
2174
2175 if(option == qede->mtu) {
2176 ret_val = 0;
2177 break;
2178 }
2179 if ((option != DEFAULT_JUMBO_MTU) &&
2180 (option != DEFAULT_MTU)) {
2181 ret_val = EINVAL;
2182 break;
2183 }
2184 if(qede->qede_state == QEDE_STATE_STARTED) {
2185 ret_val = EBUSY;
2186 break;
2187 }
2188
2189 ret_val = mac_maxsdu_update(qede->mac_handle, qede->mtu);
2190 if (ret_val == 0) {
2191
2192 qede->mtu = option;
2193 if (option == DEFAULT_JUMBO_MTU) {
2194 qede->jumbo_enable = B_TRUE;
2195 } else {
2196 qede->jumbo_enable = B_FALSE;
2197 }
2198
2199 hwfn = ECORE_LEADING_HWFN(edev);
2200 hwfn->hw_info.mtu = qede->mtu;
2201 ret_val = ecore_mcp_ov_update_mtu(hwfn,
2202 hwfn->p_main_ptt,
2203 hwfn->hw_info.mtu);
2204 if (ret_val != ECORE_SUCCESS) {
2205 qede_print("!%s(%d): MTU change %d option %d"
2206 "FAILED",
2207 __func__,qede->instance, qede->mtu, option);
2208 break;
2209 }
2210 qede_print("!%s(%d): MTU changed %d MTU option"
2211 " %d hwfn %d",
2212 __func__,qede->instance, qede->mtu,
2213 option, hwfn->hw_info.mtu);
2214 }
2215 break;
2216
2217 case MAC_PROP_EN_10GFDX_CAP:
2218 hwfn = &edev->hwfns[0];
2219 link_params = ecore_mcp_get_link_params(hwfn);
2220 if (*(uint8_t *) pr_val) {
2221 link_params->speed.autoneg = 0;
2222 link_params->speed.forced_speed = 10000;
2223 link_params->speed.advertised_speeds =
2224 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2225 qede->forced_speed_10G = *(uint8_t *)pr_val;
2226 }
2227 else {
2228 memcpy(link_params,
2229 &qede->link_input_params.default_link_params,
2230 sizeof (struct ecore_mcp_link_params));
2231 qede->forced_speed_10G = *(uint8_t *)pr_val;
2232 }
2233 if (qede->qede_state == QEDE_STATE_STARTED) {
2234 qede_configure_link(qede, true);
2235 } else {
2236 mutex_exit(&qede->gld_lock);
2237 return (0);
2238 }
2239 break;
2240 default:
2241 ret_val = ENOTSUP;
2242 break;
2243 }
2244 mutex_exit(&qede->gld_lock);
2245 return (ret_val);
2246 }
2247
2248 static void
2249 qede_mac_stop(void *arg)
2250 {
2251 qede_t *qede = (qede_t *)arg;
2252 int status;
2253
2254 qede_print("!%s(%d): called",
2255 __func__,qede->instance);
2256 mutex_enter(&qede->drv_lock);
2257 status = qede_stop(qede);
2258 if (status != DDI_SUCCESS) {
2259 qede_print("!%s(%d): qede_stop "
2260 "FAILED",
2261 __func__,qede->instance);
2262 }
2263
2264 mac_link_update(qede->mac_handle, LINK_STATE_UNKNOWN);
2265 mutex_exit(&qede->drv_lock);
2266 }
2267
2268 static int
2269 qede_mac_start(void *arg)
2270 {
2271 qede_t *qede = (qede_t *)arg;
2272 int status;
2273
2274 qede_print("!%s(%d): called", __func__,qede->instance);
2275 if (!mutex_tryenter(&qede->drv_lock)) {
2276 return (EAGAIN);
2277 }
2278
2279 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
2280 mutex_exit(&qede->drv_lock);
2281 return (ECANCELED);
2282 }
2283
2284 status = qede_start(qede);
2285 if (status != DDI_SUCCESS) {
2286 mutex_exit(&qede->drv_lock);
2287 return (EIO);
2288 }
2289
2290 mutex_exit(&qede->drv_lock);
2291
2292 #ifdef DBLK_DMA_PREMAP
2293 qede->pm_handle = mac_pmh_tx_get(qede->mac_handle);
2294 #endif
2295 return (0);
2296 }
2297
2298 static int
2299 qede_mac_get_property(void *arg,
2300 const char *pr_name,
2301 mac_prop_id_t pr_num,
2302 uint_t pr_valsize,
2303 void *pr_val)
2304 {
2305 qede_t *qede = (qede_t *)arg;
2306 struct ecore_dev *edev = &qede->edev;
2307 link_state_t link_state;
2308 link_duplex_t link_duplex;
2309 uint64_t link_speed;
2310 link_flowctrl_t link_flowctrl;
2311 struct qede_link_cfg link_cfg;
2312 mac_ether_media_t media;
2313 qede_link_cfg_t *hw_cfg = &qede->hwinit;
2314 int ret_val = 0;
2315
2316 memset(&link_cfg, 0, sizeof (struct qede_link_cfg));
2317 qede_get_link_info(&edev->hwfns[0], &link_cfg);
2318
2319
2320
2321 switch (pr_num)
2322 {
2323 case MAC_PROP_MTU:
2324
2325 ASSERT(pr_valsize >= sizeof(uint32_t));
2326 bcopy(&qede->mtu, pr_val, sizeof(uint32_t));
2327 break;
2328
2329 case MAC_PROP_DUPLEX:
2330
2331 ASSERT(pr_valsize >= sizeof(link_duplex_t));
2332 link_duplex = (qede->props.link_duplex) ?
2333 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
2334 bcopy(&link_duplex, pr_val, sizeof(link_duplex_t));
2335 break;
2336
2337 case MAC_PROP_SPEED:
2338
2339 ASSERT(pr_valsize >= sizeof(link_speed));
2340
2341 link_speed = (qede->props.link_speed * 1000000ULL);
2342 bcopy(&link_speed, pr_val, sizeof(link_speed));
2343 break;
2344
2345 case MAC_PROP_STATUS:
2346
2347 ASSERT(pr_valsize >= sizeof(link_state_t));
2348
2349 link_state = (qede->params.link_state) ?
2350 LINK_STATE_UP : LINK_STATE_DOWN;
2351 bcopy(&link_state, pr_val, sizeof(link_state_t));
2352 qede_info(qede, "mac_prop_status %d\n", link_state);
2353 break;
2354
2355 case MAC_PROP_MEDIA:
2356
2357 ASSERT(pr_valsize >= sizeof(mac_ether_media_t));
2358 media = qede_link_to_media(&link_cfg, qede->props.link_speed);
2359 bcopy(&media, pr_val, sizeof(mac_ether_media_t));
2360 break;
2361
2362 case MAC_PROP_AUTONEG:
2363
2364 *(uint8_t *)pr_val = link_cfg.autoneg;
2365 break;
2366
2367 case MAC_PROP_FLOWCTRL:
2368
2369 ASSERT(pr_valsize >= sizeof(link_flowctrl_t));
2370
2371 /*
2372 * illumos does not have the notion of LINK_FLOWCTRL_AUTO at this time.
2373 */
2374 #ifndef ILLUMOS
2375 if (link_cfg.pause_cfg & QEDE_LINK_PAUSE_AUTONEG_ENABLE) {
2376 link_flowctrl = LINK_FLOWCTRL_AUTO;
2377 }
2378 #endif
2379
2380 if (!(link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2381 !(link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2382 link_flowctrl = LINK_FLOWCTRL_NONE;
2383 }
2384 if ((link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2385 !(link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2386 link_flowctrl = LINK_FLOWCTRL_RX;
2387 }
2388 if (!(link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2389 (link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2390 link_flowctrl = LINK_FLOWCTRL_TX;
2391 }
2392 if ((link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2393 (link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2394 link_flowctrl = LINK_FLOWCTRL_BI;
2395 }
2396
2397 bcopy(&link_flowctrl, pr_val, sizeof (link_flowctrl_t));
2398 break;
2399
2400 case MAC_PROP_ADV_10GFDX_CAP:
2401 *(uint8_t *)pr_val = link_cfg.adv_capab.param_10000fdx;
2402 break;
2403
2404 case MAC_PROP_EN_10GFDX_CAP:
2405 *(uint8_t *)pr_val = qede->forced_speed_10G;
2406 break;
2407
2408 case MAC_PROP_PRIVATE:
2409 default:
2410 return (ENOTSUP);
2411
2412 }
2413
2414 return (0);
2415 }
2416
2417 static void
2418 qede_mac_property_info(void *arg,
2419 const char *pr_name,
2420 mac_prop_id_t pr_num,
2421 mac_prop_info_handle_t prh)
2422 {
2423 qede_t *qede = (qede_t *)arg;
2424 qede_link_props_t *def_cfg = &qede_def_link_props;
2425 link_flowctrl_t link_flowctrl;
2426
2427
2428 switch (pr_num)
2429 {
2430
2431 case MAC_PROP_STATUS:
2432 case MAC_PROP_SPEED:
2433 case MAC_PROP_DUPLEX:
2434 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
2435 break;
2436
2437 case MAC_PROP_MTU:
2438
2439 mac_prop_info_set_range_uint32(prh,
2440 MIN_MTU,
2441 MAX_MTU);
2442 break;
2443
2444 case MAC_PROP_AUTONEG:
2445
2446 mac_prop_info_set_default_uint8(prh, def_cfg->autoneg);
2447 break;
2448
2449 case MAC_PROP_FLOWCTRL:
2450
2451 if (!def_cfg->pause) {
2452 link_flowctrl = LINK_FLOWCTRL_NONE;
2453 } else {
2454 link_flowctrl = LINK_FLOWCTRL_BI;
2455 }
2456
2457 mac_prop_info_set_default_link_flowctrl(prh, link_flowctrl);
2458 break;
2459
2460 case MAC_PROP_EN_10GFDX_CAP:
2461 mac_prop_info_set_perm(prh, MAC_PROP_PERM_RW);
2462 break;
2463
2464 case MAC_PROP_ADV_10GFDX_CAP:
2465 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
2466 break;
2467
2468 default:
2469 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
2470 break;
2471
2472 }
2473 }
2474
2475 static mac_callbacks_t qede_callbacks =
2476 {
2477 (
2478 MC_IOCTL
2479 /* | MC_RESOURCES */
2480 | MC_SETPROP
2481 | MC_GETPROP
2482 | MC_PROPINFO
2483 | MC_GETCAPAB
2484 ),
2485 qede_mac_stats,
2486 qede_mac_start,
2487 qede_mac_stop,
2488 qede_mac_promiscuous,
2489 qede_mac_multicast,
2490 NULL,
2491 #ifndef NO_CROSSBOW
2492 NULL,
2493 #else
2494 qede_mac_tx,
2495 #endif
2496 NULL, /* qede_mac_resources, */
2497 qede_mac_ioctl,
2498 qede_mac_get_capability,
2499 NULL,
2500 NULL,
2501 qede_mac_set_property,
2502 qede_mac_get_property,
2503 #ifdef MC_PROPINFO
2504 qede_mac_property_info
2505 #endif
2506 };
2507
2508 boolean_t
2509 qede_gld_init(qede_t *qede)
2510 {
2511 int status, ret;
2512 mac_register_t *macp;
2513
2514 macp = mac_alloc(MAC_VERSION);
2515 if (macp == NULL) {
2516 cmn_err(CE_NOTE, "%s: mac_alloc() failed\n", __func__);
2517 return (B_FALSE);
2518 }
2519
2520 macp->m_driver = qede;
2521 macp->m_dip = qede->dip;
2522 macp->m_instance = qede->instance;
2523 macp->m_priv_props = NULL;
2524 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2525 macp->m_src_addr = qede->ether_addr;
2526 macp->m_callbacks = &qede_callbacks;
2527 macp->m_min_sdu = 0;
2528 macp->m_max_sdu = qede->mtu;
2529 macp->m_margin = VLAN_TAGSZ;
2530 #ifdef ILLUMOS
2531 macp->m_v12n = MAC_VIRT_LEVEL1;
2532 #endif
2533
2534 status = mac_register(macp, &qede->mac_handle);
2535 if (status != 0) {
2536 cmn_err(CE_NOTE, "%s: mac_register() failed\n", __func__);
2537 }
2538
2539 mac_free(macp);
2540 if (status == 0) {
2541 return (B_TRUE);
2542 }
2543 return (B_FALSE);
2544 }
2545
2546 boolean_t qede_gld_fini(qede_t * qede)
2547 {
2548 return (B_TRUE);
2549 }
2550
2551
2552 void qede_link_update(qede_t * qede,
2553 link_state_t state)
2554 {
2555 mac_link_update(qede->mac_handle, state);
2556 }
2557
2558