1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1, (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * Copyright 2018 Joyent, Inc.
25 * Copyright 2025 Oxide Computer Company
26 */
27
28 #include "qede.h"
29
30 #define FP_LOCK(ptr) \
31 mutex_enter(&ptr->fp_lock);
32 #define FP_UNLOCK(ptr) \
33 mutex_exit(&ptr->fp_lock);
34
35 int
qede_ucst_find(qede_t * qede,const uint8_t * mac_addr)36 qede_ucst_find(qede_t *qede, const uint8_t *mac_addr)
37 {
38 int slot;
39
40 for(slot = 0; slot < qede->ucst_total; slot++) {
41 if (bcmp(qede->ucst_mac[slot].mac_addr.ether_addr_octet,
42 mac_addr, ETHERADDRL) == 0) {
43 return (slot);
44 }
45 }
46 return (-1);
47
48 }
49
50 static int
qede_set_mac_addr(qede_t * qede,uint8_t * mac_addr,uint8_t fl)51 qede_set_mac_addr(qede_t *qede, uint8_t *mac_addr, uint8_t fl)
52 {
53 struct ecore_filter_ucast params;
54
55 memset(¶ms, 0, sizeof (params));
56
57 params.opcode = fl;
58 params.type = ECORE_FILTER_MAC;
59 params.is_rx_filter = true;
60 params.is_tx_filter = true;
61 COPY_ETH_ADDRESS(mac_addr, params.mac);
62
63 return (ecore_filter_ucast_cmd(&qede->edev,
64 ¶ms, ECORE_SPQ_MODE_EBLOCK, NULL));
65
66
67 }
68 static int
qede_add_macaddr(qede_t * qede,uint8_t * mac_addr)69 qede_add_macaddr(qede_t *qede, uint8_t *mac_addr)
70 {
71 int i, ret = 0;
72
73 i = qede_ucst_find(qede, mac_addr);
74 if (i != -1) {
75 /* LINTED E_ARGUMENT_MISMATCH */
76 qede_info(qede, "mac addr already added %d\n",
77 qede->ucst_avail);
78 return (0);
79 }
80 if (qede->ucst_avail == 0) {
81 qede_info(qede, "add macaddr ignored \n");
82 return (ENOSPC);
83 }
84 for (i = 0; i < qede->ucst_total; i++) {
85 if (qede->ucst_mac[i].set == 0) {
86 break;
87 }
88 }
89 if (i >= qede->ucst_total) {
90 qede_info(qede, "add macaddr ignored no space");
91 return (ENOSPC);
92 }
93 ret = qede_set_mac_addr(qede, (uint8_t *)mac_addr, ECORE_FILTER_ADD);
94 if (ret == 0) {
95 bcopy(mac_addr,
96 qede->ucst_mac[i].mac_addr.ether_addr_octet,
97 ETHERADDRL);
98 qede->ucst_mac[i].set = 1;
99 qede->ucst_avail--;
100 /* LINTED E_ARGUMENT_MISMATCH */
101 qede_info(qede, " add macaddr passed for addr "
102 "%02x:%02x:%02x:%02x:%02x:%02x",
103 mac_addr[0], mac_addr[1],
104 mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]);
105 } else {
106 /* LINTED E_ARGUMENT_MISMATCH */
107 qede_info(qede, "add macaddr failed for addr "
108 "%02x:%02x:%02x:%02x:%02x:%02x",
109 mac_addr[0], mac_addr[1],
110 mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]);
111
112 }
113 if (qede->ucst_avail == (qede->ucst_total -1)) {
114 u8 bcast_addr[] =
115 {
116 0xff, 0xff, 0xff, 0xff, 0xff,
117 0xff
118 };
119 for (i = 0; i < qede->ucst_total; i++) {
120 if (qede->ucst_mac[i].set == 0)
121 break;
122 }
123 ret = qede_set_mac_addr(qede,
124 (uint8_t *)bcast_addr, ECORE_FILTER_ADD);
125 if (ret == 0) {
126 bcopy(bcast_addr,
127 qede->ucst_mac[i].mac_addr.ether_addr_octet,
128 ETHERADDRL);
129 qede->ucst_mac[i].set = 1;
130 qede->ucst_avail--;
131 } else {
132
133 /* LINTED E_ARGUMENT_MISMATCH */
134 qede_info(qede, "add macaddr failed for addr "
135 "%02x:%02x:%02x:%02x:%02x:%02x",
136 mac_addr[0], mac_addr[1],
137 mac_addr[2], mac_addr[3], mac_addr[4],
138 mac_addr[5]);
139 }
140
141 }
142
143 return (ret);
144
145 }
146
147 #ifndef ILLUMOS
148 static int
qede_add_mac_addr(void * arg,const uint8_t * mac_addr,const uint64_t flags)149 qede_add_mac_addr(void *arg, const uint8_t *mac_addr, const uint64_t flags)
150 #else
151 static int
152 qede_add_mac_addr(void *arg, const uint8_t *mac_addr)
153 #endif
154 {
155 qede_mac_group_t *rx_group = (qede_mac_group_t *)arg;
156 qede_t *qede = rx_group->qede;
157 int ret = DDI_SUCCESS;
158
159 /* LINTED E_ARGUMENT_MISMATCH */
160 qede_info(qede, " mac addr :" MAC_STRING, MACTOSTR(mac_addr));
161
162 mutex_enter(&qede->gld_lock);
163 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
164 mutex_exit(&qede->gld_lock);
165 return (ECANCELED);
166 }
167 ret = qede_add_macaddr(qede, (uint8_t *)mac_addr);
168
169 mutex_exit(&qede->gld_lock);
170
171
172 return (ret);
173 }
174
175 static int
qede_rem_macaddr(qede_t * qede,uint8_t * mac_addr)176 qede_rem_macaddr(qede_t *qede, uint8_t *mac_addr)
177 {
178 int ret = 0;
179 int i;
180
181 i = qede_ucst_find(qede, mac_addr);
182 if (i == -1) {
183 /* LINTED E_ARGUMENT_MISMATCH */
184 qede_info(qede,
185 "mac addr not there to remove",
186 MAC_STRING, MACTOSTR(mac_addr));
187 return (0);
188 }
189 if (qede->ucst_mac[i].set == 0) {
190 return (EINVAL);
191 }
192 ret = qede_set_mac_addr(qede, (uint8_t *)mac_addr, ECORE_FILTER_REMOVE);
193 if (ret == 0) {
194 bzero(qede->ucst_mac[i].mac_addr.ether_addr_octet,ETHERADDRL);
195 qede->ucst_mac[i].set = 0;
196 qede->ucst_avail++;
197 } else {
198 /* LINTED E_ARGUMENT_MISMATCH */
199 qede_info(qede, "mac addr remove failed",
200 MAC_STRING, MACTOSTR(mac_addr));
201 }
202 return (ret);
203
204 }
205
206
207 static int
qede_rem_mac_addr(void * arg,const uint8_t * mac_addr)208 qede_rem_mac_addr(void *arg, const uint8_t *mac_addr)
209 {
210 qede_mac_group_t *rx_group = (qede_mac_group_t *)arg;
211 qede_t *qede = rx_group->qede;
212 int ret = DDI_SUCCESS;
213
214 /* LINTED E_ARGUMENT_MISMATCH */
215 qede_info(qede, "mac addr remove:" MAC_STRING, MACTOSTR(mac_addr));
216 mutex_enter(&qede->gld_lock);
217 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
218 mutex_exit(&qede->gld_lock);
219 return (ECANCELED);
220 }
221 ret = qede_rem_macaddr(qede, (uint8_t *)mac_addr);
222 mutex_exit(&qede->gld_lock);
223 return (ret);
224 }
225
226
227 static int
qede_tx_ring_stat(mac_ring_driver_t rh,uint_t stat,uint64_t * val)228 qede_tx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
229 {
230 int ret = 0;
231
232 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
233 qede_tx_ring_t *tx_ring = fp->tx_ring[0];
234 qede_t *qede = fp->qede;
235
236
237 if (qede->qede_state == QEDE_STATE_SUSPENDED)
238 return (ECANCELED);
239
240 switch (stat) {
241 case MAC_STAT_OBYTES:
242 *val = tx_ring->tx_byte_count;
243 break;
244
245 case MAC_STAT_OPACKETS:
246 *val = tx_ring->tx_pkt_count;
247 break;
248
249 default:
250 *val = 0;
251 ret = ENOTSUP;
252 }
253
254 return (ret);
255 }
256
257 #ifndef ILLUMOS
258 static mblk_t *
qede_rx_ring_poll(void * arg,int poll_bytes,int poll_pkts)259 qede_rx_ring_poll(void *arg, int poll_bytes, int poll_pkts)
260 {
261 #else
262 static mblk_t *
263 qede_rx_ring_poll(void *arg, int poll_bytes)
264 {
265 /* XXX pick a value at the moment */
266 int poll_pkts = 100;
267 #endif
268 qede_fastpath_t *fp = (qede_fastpath_t *)arg;
269 mblk_t *mp = NULL;
270 int work_done = 0;
271 qede_t *qede = fp->qede;
272
273 if (poll_bytes == 0) {
274 return (NULL);
275 }
276
277 mutex_enter(&fp->fp_lock);
278 qede->intrSbPollCnt[fp->vect_info->vect_index]++;
279
280 mp = qede_process_fastpath(fp, poll_bytes, poll_pkts, &work_done);
281 if (mp != NULL) {
282 fp->rx_ring->rx_poll_cnt++;
283 } else if ((mp == NULL) && (work_done == 0)) {
284 qede->intrSbPollNoChangeCnt[fp->vect_info->vect_index]++;
285 }
286
287 mutex_exit(&fp->fp_lock);
288 return (mp);
289 }
290
291 #ifndef ILLUMOS
292 static int
293 qede_rx_ring_intr_enable(mac_ring_driver_t rh)
294 #else
295 static int
296 qede_rx_ring_intr_enable(mac_intr_handle_t rh)
297 #endif
298 {
299 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
300
301 mutex_enter(&fp->qede->drv_lock);
302 if (!fp->sb_phys && (fp->sb_dma_handle == NULL)) {
303 mutex_exit(&fp->qede->drv_lock);
304 return (DDI_FAILURE);
305 }
306
307 fp->rx_ring->intrEnableCnt++;
308 qede_enable_hw_intr(fp);
309 fp->disabled_by_poll = 0;
310 mutex_exit(&fp->qede->drv_lock);
311
312 return (DDI_SUCCESS);
313 }
314
315 #ifndef ILLUMOS
316 static int
317 qede_rx_ring_intr_disable(mac_ring_driver_t rh)
318 #else
319 static int
320 qede_rx_ring_intr_disable(mac_intr_handle_t rh)
321 #endif
322 {
323 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
324
325 mutex_enter(&fp->qede->drv_lock);
326 if (!fp->sb_phys && (fp->sb_dma_handle == NULL)) {
327 mutex_exit(&fp->qede->drv_lock);
328 return (DDI_FAILURE);
329 }
330 fp->rx_ring->intrDisableCnt++;
331 qede_disable_hw_intr(fp);
332 fp->disabled_by_poll = 1;
333 mutex_exit(&fp->qede->drv_lock);
334 return (DDI_SUCCESS);
335 }
336
337 static int
338 qede_rx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
339 {
340
341 int ret = 0;
342
343 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
344 qede_t *qede = fp->qede;
345 qede_rx_ring_t *rx_ring = fp->rx_ring;
346
347 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
348 return (ECANCELED);
349 }
350
351 switch (stat) {
352 case MAC_STAT_RBYTES:
353 *val = rx_ring->rx_byte_cnt;
354 break;
355 case MAC_STAT_IPACKETS:
356 *val = rx_ring->rx_pkt_cnt;
357 break;
358 default:
359 *val = 0;
360 ret = ENOTSUP;
361 break;
362 }
363
364 return (ret);
365 }
366
367 static int
368 qede_get_global_ring_index(qede_t *qede, int gindex, int rindex)
369 {
370 qede_fastpath_t *fp;
371 qede_rx_ring_t *rx_ring;
372 int i = 0;
373
374 for (i = 0; i < qede->num_fp; i++) {
375 fp = &qede->fp_array[i];
376 rx_ring = fp->rx_ring;
377
378 if (rx_ring->group_index == gindex) {
379 rindex--;
380 }
381 if (rindex < 0) {
382 return (i);
383 }
384 }
385
386 return (-1);
387 }
388
389 static void
390 qede_rx_ring_stop(mac_ring_driver_t rh)
391 {
392 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
393 qede_rx_ring_t *rx_ring = fp->rx_ring;
394
395 qede_print("!%s(%d): called", __func__,fp->qede->instance);
396 mutex_enter(&fp->fp_lock);
397 rx_ring->mac_ring_started = B_FALSE;
398 mutex_exit(&fp->fp_lock);
399 }
400
401 static int
402 qede_rx_ring_start(mac_ring_driver_t rh, u64 mr_gen_num)
403 {
404 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
405 qede_rx_ring_t *rx_ring = fp->rx_ring;
406
407 qede_print("!%s(%d): called", __func__,fp->qede->instance);
408 mutex_enter(&fp->fp_lock);
409 rx_ring->mr_gen_num = mr_gen_num;
410 rx_ring->mac_ring_started = B_TRUE;
411 rx_ring->intrDisableCnt = 0;
412 rx_ring->intrEnableCnt = 0;
413 fp->disabled_by_poll = 0;
414
415 mutex_exit(&fp->fp_lock);
416
417 return (DDI_SUCCESS);
418 }
419
420 /* Callback function from mac layer to register rings */
421 void
422 qede_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
423 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
424 {
425 qede_t *qede = (qede_t *)arg;
426 mac_intr_t *mintr = &infop->mri_intr;
427
428 switch (rtype) {
429 case MAC_RING_TYPE_RX: {
430 /*
431 * Index passed as a param is the ring index within the
432 * given group index. If multiple groups are supported
433 * then need to search into all groups to find out the
434 * global ring index for the passed group relative
435 * ring index
436 */
437 int global_ring_index = qede_get_global_ring_index(qede,
438 group_index, ring_index);
439 qede_fastpath_t *fp;
440 qede_rx_ring_t *rx_ring;
441 int i;
442
443 /*
444 * global_ring_index < 0 means group index passed
445 * was registered by our driver
446 */
447 ASSERT(global_ring_index >= 0);
448
449 if (rh == NULL) {
450 cmn_err(CE_WARN, "!rx ring(%d) ring handle NULL",
451 global_ring_index);
452 }
453
454 fp = &qede->fp_array[global_ring_index];
455 rx_ring = fp->rx_ring;
456 fp->qede = qede;
457
458 rx_ring->mac_ring_handle = rh;
459
460 qede_info(qede, "rx_ring %d mac_ring_handle %p",
461 rx_ring->rss_id, rh);
462
463 /* mri_driver passed as arg to mac_ring* callbacks */
464 infop->mri_driver = (mac_ring_driver_t)fp;
465 /*
466 * mri_start callback will supply a mac rings generation
467 * number which is needed while indicating packets
468 * upstream via mac_rx_ring() call
469 */
470 infop->mri_start = qede_rx_ring_start;
471 infop->mri_stop = qede_rx_ring_stop;
472 infop->mri_poll = qede_rx_ring_poll;
473 infop->mri_stat = qede_rx_ring_stat;
474
475 mintr->mi_handle = (mac_intr_handle_t)fp;
476 mintr->mi_enable = qede_rx_ring_intr_enable;
477 mintr->mi_disable = qede_rx_ring_intr_disable;
478 if (qede->intr_ctx.intr_type_in_use &
479 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
480 mintr->mi_ddi_handle =
481 qede->intr_ctx.
482 intr_hdl_array[global_ring_index + qede->num_hwfns];
483 }
484 break;
485 }
486 case MAC_RING_TYPE_TX: {
487 qede_fastpath_t *fp;
488 qede_tx_ring_t *tx_ring;
489 int i, tc;
490
491 ASSERT(ring_index < qede->num_fp);
492
493 fp = &qede->fp_array[ring_index];
494 fp->qede = qede;
495 tx_ring = fp->tx_ring[0];
496 tx_ring->mac_ring_handle = rh;
497 qede_info(qede, "tx_ring %d mac_ring_handle %p",
498 tx_ring->tx_queue_index, rh);
499 infop->mri_driver = (mac_ring_driver_t)fp;
500 infop->mri_start = NULL;
501 infop->mri_stop = NULL;
502 infop->mri_tx = qede_ring_tx;
503 infop->mri_stat = qede_tx_ring_stat;
504 if (qede->intr_ctx.intr_type_in_use &
505 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
506 mintr->mi_ddi_handle =
507 qede->intr_ctx.
508 intr_hdl_array[ring_index + qede->num_hwfns];
509 }
510 break;
511 }
512 default:
513 break;
514 }
515 }
516
517 /*
518 * Callback function from mac layer to register group
519 */
520 void
521 qede_fill_group(void *arg, mac_ring_type_t rtype, const int index,
522 mac_group_info_t *infop, mac_group_handle_t gh)
523 {
524 qede_t *qede = (qede_t *)arg;
525
526 switch (rtype) {
527 case MAC_RING_TYPE_RX: {
528 qede_mac_group_t *rx_group;
529
530 rx_group = &qede->rx_groups[index];
531 rx_group->group_handle = gh;
532 rx_group->group_index = index;
533 rx_group->qede = qede;
534 infop->mgi_driver = (mac_group_driver_t)rx_group;
535 infop->mgi_start = NULL;
536 infop->mgi_stop = NULL;
537 #ifndef ILLUMOS
538 infop->mgi_addvlan = NULL;
539 infop->mgi_remvlan = NULL;
540 infop->mgi_getsriov_info = NULL;
541 infop->mgi_setmtu = NULL;
542 #endif
543 infop->mgi_addmac = qede_add_mac_addr;
544 infop->mgi_remmac = qede_rem_mac_addr;
545 infop->mgi_count = qede->num_fp;
546 #ifndef ILLUMOS
547 if (index == 0) {
548 infop->mgi_flags = MAC_GROUP_DEFAULT;
549 }
550 #endif
551
552 break;
553 }
554 case MAC_RING_TYPE_TX: {
555 qede_mac_group_t *tx_group;
556
557 tx_group = &qede->tx_groups[index];
558 tx_group->group_handle = gh;
559 tx_group->group_index = index;
560 tx_group->qede = qede;
561
562 infop->mgi_driver = (mac_group_driver_t)tx_group;
563 infop->mgi_start = NULL;
564 infop->mgi_stop = NULL;
565 infop->mgi_addmac = NULL;
566 infop->mgi_remmac = NULL;
567 #ifndef ILLUMOS
568 infop->mgi_addvlan = NULL;
569 infop->mgi_remvlan = NULL;
570 infop->mgi_setmtu = NULL;
571 infop->mgi_getsriov_info = NULL;
572 #endif
573
574 infop->mgi_count = qede->num_fp;
575
576 #ifndef ILLUMOS
577 if (index == 0) {
578 infop->mgi_flags = MAC_GROUP_DEFAULT;
579 }
580 #endif
581 break;
582 }
583 default:
584 break;
585 }
586 }
587
588 #ifdef ILLUMOS
589 static int
590 qede_transceiver_info(void *arg, uint_t id, mac_transceiver_info_t *infop)
591 {
592 qede_t *qede = arg;
593 struct ecore_dev *edev = &qede->edev;
594 struct ecore_hwfn *hwfn;
595 struct ecore_ptt *ptt;
596 uint32_t transceiver_state;
597
598 if (id >= edev->num_hwfns || arg == NULL || infop == NULL)
599 return (EINVAL);
600
601 hwfn = &edev->hwfns[id];
602 ptt = ecore_ptt_acquire(hwfn);
603 if (ptt == NULL) {
604 return (EIO);
605 }
606 /*
607 * Use the underlying raw API to get this information. While the
608 * ecore_phy routines have some ways of getting to this information, it
609 * ends up writing the raw data as ASCII characters which doesn't help
610 * us one bit.
611 */
612 transceiver_state = ecore_rd(hwfn, ptt, hwfn->mcp_info->port_addr +
613 offsetof(struct public_port, transceiver_data));
614 transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
615 ecore_ptt_release(hwfn, ptt);
616
617 if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) != 0) {
618 mac_transceiver_info_set_present(infop, B_TRUE);
619 /*
620 * Based on our testing, the ETH_TRANSCEIVER_STATE_VALID flag is
621 * not set, so we cannot rely on it. Instead, we have found that
622 * the ETH_TRANSCEIVER_STATE_UPDATING will be set when we cannot
623 * use the transceiver.
624 */
625 if ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) != 0) {
626 mac_transceiver_info_set_usable(infop, B_FALSE);
627 } else {
628 mac_transceiver_info_set_usable(infop, B_TRUE);
629 }
630 } else {
631 mac_transceiver_info_set_present(infop, B_FALSE);
632 mac_transceiver_info_set_usable(infop, B_FALSE);
633 }
634
635 return (0);
636 }
637
638 static int
639 qede_transceiver_read(void *arg, uint_t id, uint_t page, void *buf,
640 size_t nbytes, off_t offset, size_t *nread)
641 {
642 qede_t *qede = arg;
643 struct ecore_dev *edev = &qede->edev;
644 struct ecore_hwfn *hwfn;
645 uint32_t port, lane;
646 struct ecore_ptt *ptt;
647 enum _ecore_status_t ret;
648
649 if (id >= edev->num_hwfns || buf == NULL || nbytes == 0 || nread == NULL ||
650 (page != 0xa0 && page != 0xa2) || offset < 0)
651 return (EINVAL);
652
653 /*
654 * Both supported pages have a length of 256 bytes, ensure nothing asks
655 * us to go beyond that.
656 */
657 if (nbytes > 256 || offset >= 256 || (offset + nbytes > 256)) {
658 return (EINVAL);
659 }
660
661 hwfn = &edev->hwfns[id];
662 ptt = ecore_ptt_acquire(hwfn);
663 if (ptt == NULL) {
664 return (EIO);
665 }
666
667 ret = ecore_mcp_phy_sfp_read(hwfn, ptt, hwfn->port_id, page, offset,
668 nbytes, buf);
669 ecore_ptt_release(hwfn, ptt);
670 if (ret != ECORE_SUCCESS) {
671 return (EIO);
672 }
673 *nread = nbytes;
674 return (0);
675 }
676 #endif /* ILLUMOS */
677
678
679 static int
680 qede_mac_stats(void * arg,
681 uint_t stat,
682 uint64_t * value)
683 {
684 qede_t * qede = (qede_t *)arg;
685 struct ecore_eth_stats vstats;
686 struct ecore_dev *edev = &qede->edev;
687 struct qede_link_cfg lnkcfg;
688 int rc = 0;
689 qede_fastpath_t *fp = &qede->fp_array[0];
690 qede_rx_ring_t *rx_ring;
691 qede_tx_ring_t *tx_ring;
692
693 if ((qede == NULL) || (value == NULL)) {
694 return EINVAL;
695 }
696
697
698 mutex_enter(&qede->gld_lock);
699
700 if(qede->qede_state != QEDE_STATE_STARTED) {
701 mutex_exit(&qede->gld_lock);
702 return EAGAIN;
703 }
704
705 *value = 0;
706
707 memset(&vstats, 0, sizeof(struct ecore_eth_stats));
708 ecore_get_vport_stats(edev, &vstats);
709
710
711 memset(&qede->curcfg, 0, sizeof(struct qede_link_cfg));
712 qede_get_link_info(&edev->hwfns[0], &qede->curcfg);
713
714
715
716 switch (stat)
717 {
718 case MAC_STAT_IFSPEED:
719 *value = (qede->props.link_speed * 1000000ULL);
720 break;
721 case MAC_STAT_MULTIRCV:
722 *value = vstats.common.rx_mcast_pkts;
723 break;
724 case MAC_STAT_BRDCSTRCV:
725 *value = vstats.common.rx_bcast_pkts;
726 break;
727 case MAC_STAT_MULTIXMT:
728 *value = vstats.common.tx_mcast_pkts;
729 break;
730 case MAC_STAT_BRDCSTXMT:
731 *value = vstats.common.tx_bcast_pkts;
732 break;
733 case MAC_STAT_NORCVBUF:
734 *value = vstats.common.no_buff_discards;
735 break;
736 case MAC_STAT_NOXMTBUF:
737 *value = 0;
738 break;
739 case MAC_STAT_IERRORS:
740 case ETHER_STAT_MACRCV_ERRORS:
741 *value = vstats.common.mac_filter_discards +
742 vstats.common.packet_too_big_discard +
743 vstats.common.rx_crc_errors;
744 break;
745
746 case MAC_STAT_OERRORS:
747 break;
748
749 case MAC_STAT_COLLISIONS:
750 *value = vstats.bb.tx_total_collisions;
751 break;
752
753 case MAC_STAT_RBYTES:
754 *value = vstats.common.rx_ucast_bytes +
755 vstats.common.rx_mcast_bytes +
756 vstats.common.rx_bcast_bytes;
757 break;
758
759 case MAC_STAT_IPACKETS:
760 *value = vstats.common.rx_ucast_pkts +
761 vstats.common.rx_mcast_pkts +
762 vstats.common.rx_bcast_pkts;
763 break;
764
765 case MAC_STAT_OBYTES:
766 *value = vstats.common.tx_ucast_bytes +
767 vstats.common.tx_mcast_bytes +
768 vstats.common.tx_bcast_bytes;
769 break;
770
771 case MAC_STAT_OPACKETS:
772 *value = vstats.common.tx_ucast_pkts +
773 vstats.common.tx_mcast_pkts +
774 vstats.common.tx_bcast_pkts;
775 break;
776
777 case ETHER_STAT_ALIGN_ERRORS:
778 *value = vstats.common.rx_align_errors;
779 break;
780
781 case ETHER_STAT_FCS_ERRORS:
782 *value = vstats.common.rx_crc_errors;
783 break;
784
785 case ETHER_STAT_FIRST_COLLISIONS:
786 break;
787
788 case ETHER_STAT_MULTI_COLLISIONS:
789 break;
790
791 case ETHER_STAT_DEFER_XMTS:
792 break;
793
794 case ETHER_STAT_TX_LATE_COLLISIONS:
795 break;
796
797 case ETHER_STAT_EX_COLLISIONS:
798 break;
799
800 case ETHER_STAT_MACXMT_ERRORS:
801 *value = 0;
802 break;
803
804 case ETHER_STAT_CARRIER_ERRORS:
805 break;
806
807 case ETHER_STAT_TOOLONG_ERRORS:
808 *value = vstats.common.rx_oversize_packets;
809 break;
810
811 #if (MAC_VERSION > 1)
812 case ETHER_STAT_TOOSHORT_ERRORS:
813 *value = vstats.common.rx_undersize_packets;
814 break;
815 #endif
816
817 case ETHER_STAT_XCVR_ADDR:
818 *value = 0;
819 break;
820
821 case ETHER_STAT_XCVR_ID:
822 *value = 0;
823 break;
824
825 case ETHER_STAT_XCVR_INUSE:
826 *value = (uint64_t)qede_link_to_media(&qede->curcfg,
827 qede->props.link_speed);
828 break;
829
830 #if (MAC_VERSION > 1)
831 case ETHER_STAT_CAP_10GFDX:
832 *value = 0;
833 break;
834 #endif
835 case ETHER_STAT_CAP_100FDX:
836 *value = 0;
837 break;
838 case ETHER_STAT_CAP_100HDX:
839 *value = 0;
840 break;
841 case ETHER_STAT_CAP_ASMPAUSE:
842 *value = 1;
843 break;
844 case ETHER_STAT_CAP_PAUSE:
845 *value = 1;
846 break;
847 case ETHER_STAT_CAP_AUTONEG:
848 *value = 1;
849 break;
850
851 #if (MAC_VERSION > 1)
852 case ETHER_STAT_CAP_REMFAULT:
853 *value = 0;
854 break;
855 #endif
856
857 #if (MAC_VERSION > 1)
858 case ETHER_STAT_ADV_CAP_10GFDX:
859 *value = 0;
860 break;
861 #endif
862 case ETHER_STAT_ADV_CAP_ASMPAUSE:
863 *value = 1;
864 break;
865
866 case ETHER_STAT_ADV_CAP_PAUSE:
867 *value = 1;
868 break;
869
870 case ETHER_STAT_ADV_CAP_AUTONEG:
871 *value = qede->curcfg.adv_capab.autoneg;
872 break;
873
874 #if (MAC_VERSION > 1)
875 case ETHER_STAT_ADV_REMFAULT:
876 *value = 0;
877 break;
878 #endif
879
880 case ETHER_STAT_LINK_AUTONEG:
881 *value = qede->curcfg.autoneg;
882 break;
883
884 case ETHER_STAT_LINK_DUPLEX:
885 *value = (qede->props.link_duplex == DUPLEX_FULL) ?
886 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
887 break;
888 /*
889 * Supported speeds. These indicate what hardware is capable of.
890 */
891 case ETHER_STAT_CAP_1000HDX:
892 *value = qede->curcfg.supp_capab.param_1000hdx;
893 break;
894
895 case ETHER_STAT_CAP_1000FDX:
896 *value = qede->curcfg.supp_capab.param_1000fdx;
897 break;
898
899 case ETHER_STAT_CAP_10GFDX:
900 *value = qede->curcfg.supp_capab.param_10000fdx;
901 break;
902
903 case ETHER_STAT_CAP_25GFDX:
904 *value = qede->curcfg.supp_capab.param_25000fdx;
905 break;
906
907 case ETHER_STAT_CAP_40GFDX:
908 *value = qede->curcfg.supp_capab.param_40000fdx;
909 break;
910
911 case ETHER_STAT_CAP_50GFDX:
912 *value = qede->curcfg.supp_capab.param_50000fdx;
913 break;
914
915 case ETHER_STAT_CAP_100GFDX:
916 *value = qede->curcfg.supp_capab.param_100000fdx;
917 break;
918
919 /*
920 * Advertised speeds. These indicate what hardware is currently sending.
921 */
922 case ETHER_STAT_ADV_CAP_1000HDX:
923 *value = qede->curcfg.adv_capab.param_1000hdx;
924 break;
925
926 case ETHER_STAT_ADV_CAP_1000FDX:
927 *value = qede->curcfg.adv_capab.param_1000fdx;
928 break;
929
930 case ETHER_STAT_ADV_CAP_10GFDX:
931 *value = qede->curcfg.adv_capab.param_10000fdx;
932 break;
933
934 case ETHER_STAT_ADV_CAP_25GFDX:
935 *value = qede->curcfg.adv_capab.param_25000fdx;
936 break;
937
938 case ETHER_STAT_ADV_CAP_40GFDX:
939 *value = qede->curcfg.adv_capab.param_40000fdx;
940 break;
941
942 case ETHER_STAT_ADV_CAP_50GFDX:
943 *value = qede->curcfg.adv_capab.param_50000fdx;
944 break;
945
946 case ETHER_STAT_ADV_CAP_100GFDX:
947 *value = qede->curcfg.adv_capab.param_100000fdx;
948 break;
949
950 default:
951 rc = ENOTSUP;
952 }
953
954 mutex_exit(&qede->gld_lock);
955 return (rc);
956 }
957
958 /* (flag) TRUE = on, FALSE = off */
959 static int
960 qede_mac_promiscuous(void *arg,
961 boolean_t on)
962 {
963 qede_t *qede = (qede_t *)arg;
964 qede_print("!%s(%d): called", __func__,qede->instance);
965 int ret = DDI_SUCCESS;
966 enum qede_filter_rx_mode_type mode;
967
968 mutex_enter(&qede->drv_lock);
969
970 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
971 ret = ECANCELED;
972 goto exit;
973 }
974
975 if (on) {
976 qede_info(qede, "Entering promiscuous mode");
977 mode = QEDE_FILTER_RX_MODE_PROMISC;
978 qede->params.promisc_fl = B_TRUE;
979 } else {
980 qede_info(qede, "Leaving promiscuous mode");
981 if(qede->params.multi_promisc_fl == B_TRUE) {
982 mode = QEDE_FILTER_RX_MODE_MULTI_PROMISC;
983 } else {
984 mode = QEDE_FILTER_RX_MODE_REGULAR;
985 }
986 qede->params.promisc_fl = B_FALSE;
987 }
988
989 ret = qede_set_filter_rx_mode(qede, mode);
990
991 exit:
992 mutex_exit(&qede->drv_lock);
993 return (ret);
994 }
995
996 int qede_set_rx_mac_mcast(qede_t *qede, enum ecore_filter_opcode opcode,
997 uint8_t *mac, int mc_cnt)
998 {
999 struct ecore_filter_mcast cmd;
1000 int i;
1001 memset(&cmd, 0, sizeof(cmd));
1002 cmd.opcode = opcode;
1003 cmd.num_mc_addrs = mc_cnt;
1004
1005 for (i = 0; i < mc_cnt; i++, mac += ETH_ALLEN) {
1006 COPY_ETH_ADDRESS(mac, cmd.mac[i]);
1007 }
1008
1009
1010 return (ecore_filter_mcast_cmd(&qede->edev, &cmd,
1011 ECORE_SPQ_MODE_CB, NULL));
1012
1013 }
1014
1015 int
1016 qede_set_filter_rx_mode(qede_t * qede, enum qede_filter_rx_mode_type type)
1017 {
1018 struct ecore_filter_accept_flags flg;
1019
1020 memset(&flg, 0, sizeof(flg));
1021
1022 flg.update_rx_mode_config = 1;
1023 flg.update_tx_mode_config = 1;
1024 flg.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
1025 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
1026 flg.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
1027 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
1028
1029 if (type == QEDE_FILTER_RX_MODE_PROMISC)
1030 flg.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
1031 ECORE_ACCEPT_MCAST_UNMATCHED;
1032 else if (type == QEDE_FILTER_RX_MODE_MULTI_PROMISC)
1033 flg.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
1034 qede_info(qede, "rx_mode rx_filter=0x%x tx_filter=0x%x type=0x%x\n",
1035 flg.rx_accept_filter, flg.tx_accept_filter, type);
1036 return (ecore_filter_accept_cmd(&qede->edev, 0, flg,
1037 0, /* update_accept_any_vlan */
1038 0, /* accept_any_vlan */
1039 ECORE_SPQ_MODE_CB, NULL));
1040 }
1041
1042 int
1043 qede_multicast(qede_t *qede, boolean_t flag, const uint8_t *ptr_mcaddr)
1044 {
1045 int i, ret = DDI_SUCCESS;
1046 qede_mcast_list_entry_t *ptr_mlist;
1047 qede_mcast_list_entry_t *ptr_entry;
1048 int mc_cnt;
1049 unsigned char *mc_macs, *tmpmc;
1050 size_t size;
1051 boolean_t mcmac_exists = B_FALSE;
1052 enum qede_filter_rx_mode_type mode;
1053
1054 if (!ptr_mcaddr) {
1055 cmn_err(CE_NOTE, "Removing all multicast");
1056 } else {
1057 cmn_err(CE_NOTE,
1058 "qede=%p %s multicast: %02x:%02x:%02x:%02x:%02x:%02x",
1059 qede, (flag) ? "Adding" : "Removing", ptr_mcaddr[0],
1060 ptr_mcaddr[1],ptr_mcaddr[2],ptr_mcaddr[3],ptr_mcaddr[4],
1061 ptr_mcaddr[5]);
1062 }
1063
1064
1065 if (flag && (ptr_mcaddr == NULL)) {
1066 cmn_err(CE_WARN, "ERROR: Multicast address not specified");
1067 return EINVAL;
1068 }
1069
1070
1071 /* exceeds addition of mcaddr above limit */
1072 if (flag && (qede->mc_cnt >= MAX_MC_SOFT_LIMIT)) {
1073 qede_info(qede, "Cannot add more than MAX_MC_SOFT_LIMIT");
1074 return ENOENT;
1075 }
1076
1077 size = MAX_MC_SOFT_LIMIT * ETH_ALLEN;
1078
1079 mc_macs = kmem_zalloc(size, KM_NOSLEEP);
1080 if (!mc_macs) {
1081 cmn_err(CE_WARN, "ERROR: Failed to allocate for mc_macs");
1082 return EINVAL;
1083 }
1084
1085 tmpmc = mc_macs;
1086
1087 /* remove all multicast - as flag not set and mcaddr not specified*/
1088 if (!flag && (ptr_mcaddr == NULL)) {
1089 while (!QEDE_LIST_IS_EMPTY(&qede->mclist.head)) {
1090 ptr_entry = QEDE_LIST_FIRST_ENTRY(&qede->mclist.head,
1091 qede_mcast_list_entry_t, mclist_entry);
1092 QEDE_LIST_REMOVE(&ptr_entry->mclist_entry,
1093 &qede->mclist.head);
1094 kmem_free(ptr_entry,
1095 sizeof (qede_mcast_list_entry_t) + ETH_ALLEN);
1096 }
1097
1098 ret = qede_set_rx_mac_mcast(qede,
1099 ECORE_FILTER_REMOVE, mc_macs, 1);
1100 qede->mc_cnt = 0;
1101 goto exit;
1102 }
1103
1104 QEDE_LIST_FOR_EACH_ENTRY(ptr_entry,
1105 &qede->mclist.head, qede_mcast_list_entry_t, mclist_entry)
1106 {
1107 if ((ptr_entry != NULL) &&
1108 IS_ETH_ADDRESS_EQUAL(ptr_mcaddr, ptr_entry->mac)) {
1109 mcmac_exists = B_TRUE;
1110 break;
1111 }
1112 }
1113 if (flag && mcmac_exists) {
1114 ret = DDI_SUCCESS;
1115 goto exit;
1116 } else if (!flag && !mcmac_exists) {
1117 ret = DDI_SUCCESS;
1118 goto exit;
1119 }
1120
1121 if (flag) {
1122 ptr_entry = kmem_zalloc((sizeof (qede_mcast_list_entry_t) +
1123 ETH_ALLEN), KM_NOSLEEP);
1124 ptr_entry->mac = (uint8_t *)ptr_entry +
1125 sizeof (qede_mcast_list_entry_t);
1126 COPY_ETH_ADDRESS(ptr_mcaddr, ptr_entry->mac);
1127 QEDE_LIST_ADD(&ptr_entry->mclist_entry, &qede->mclist.head);
1128 } else {
1129 QEDE_LIST_REMOVE(&ptr_entry->mclist_entry, &qede->mclist.head);
1130 kmem_free(ptr_entry, sizeof(qede_mcast_list_entry_t) +
1131 ETH_ALLEN);
1132 }
1133
1134 mc_cnt = 0;
1135 QEDE_LIST_FOR_EACH_ENTRY(ptr_entry, &qede->mclist.head,
1136 qede_mcast_list_entry_t, mclist_entry) {
1137 COPY_ETH_ADDRESS(ptr_entry->mac, tmpmc);
1138 tmpmc += ETH_ALLEN;
1139 mc_cnt++;
1140 }
1141 qede->mc_cnt = mc_cnt;
1142 if (mc_cnt <=64) {
1143 ret = qede_set_rx_mac_mcast(qede, ECORE_FILTER_ADD,
1144 (unsigned char *)mc_macs, mc_cnt);
1145 if ((qede->params.multi_promisc_fl == B_TRUE) &&
1146 (qede->params.promisc_fl == B_FALSE)) {
1147 mode = QEDE_FILTER_RX_MODE_REGULAR;
1148 ret = qede_set_filter_rx_mode(qede, mode);
1149 }
1150 qede->params.multi_promisc_fl = B_FALSE;
1151 } else {
1152 if ((qede->params.multi_promisc_fl == B_FALSE) &&
1153 (qede->params.promisc_fl == B_FALSE)) {
1154 ret = qede_set_filter_rx_mode(qede,
1155 QEDE_FILTER_RX_MODE_MULTI_PROMISC);
1156 }
1157 qede->params.multi_promisc_fl = B_TRUE;
1158 qede_info(qede, "mode is MULTI_PROMISC");
1159 }
1160 exit:
1161 kmem_free(mc_macs, size);
1162 qede_info(qede, "multicast ret %d mc_cnt %d\n", ret, qede->mc_cnt);
1163 return (ret);
1164 }
1165
1166 /*
1167 * This function is used to enable or disable multicast packet reception for
1168 * particular multicast addresses.
1169 * (flag) TRUE = add, FALSE = remove
1170 */
1171 static int
1172 qede_mac_multicast(void *arg,
1173 boolean_t flag,
1174 const uint8_t * mcast_addr)
1175 {
1176 qede_t *qede = (qede_t *)arg;
1177 int ret = DDI_SUCCESS;
1178
1179
1180 mutex_enter(&qede->gld_lock);
1181 if(qede->qede_state != QEDE_STATE_STARTED) {
1182 mutex_exit(&qede->gld_lock);
1183 return (EAGAIN);
1184 }
1185 ret = qede_multicast(qede, flag, mcast_addr);
1186
1187 mutex_exit(&qede->gld_lock);
1188
1189 return (ret);
1190 }
1191 int
1192 qede_clear_filters(qede_t *qede)
1193 {
1194 int ret = 0;
1195 int i;
1196 if ((qede->params.promisc_fl == B_TRUE) ||
1197 (qede->params.multi_promisc_fl == B_TRUE)) {
1198 ret = qede_set_filter_rx_mode(qede,
1199 QEDE_FILTER_RX_MODE_REGULAR);
1200 if (ret) {
1201 qede_info(qede,
1202 "qede_clear_filters failed to set rx_mode");
1203 }
1204 }
1205 for (i=0; i < qede->ucst_total; i++)
1206 {
1207 if (qede->ucst_mac[i].set) {
1208 qede_rem_macaddr(qede,
1209 qede->ucst_mac[i].mac_addr.ether_addr_octet);
1210 }
1211 }
1212 qede_multicast(qede, B_FALSE, NULL);
1213 return (ret);
1214 }
1215
1216
1217 #ifdef NO_CROSSBOW
1218 static int
1219 qede_mac_unicast(void *arg,
1220 const uint8_t * mac_addr)
1221 {
1222 qede_t *qede = (qede_t *)arg;
1223 return 0;
1224 }
1225
1226
1227 static mblk_t *
1228 qede_mac_tx(void *arg,
1229 mblk_t * mblk)
1230 {
1231 qede_t *qede = (qede_t *)arg;
1232 qede_fastpath_t *fp = &qede->fp_array[0];
1233
1234 mblk = qede_ring_tx((void *)fp, mblk);
1235
1236 return (mblk);
1237 }
1238 #endif /* NO_CROSSBOW */
1239
1240
1241 static lb_property_t loopmodes[] = {
1242 { normal, "normal", QEDE_LOOP_NONE },
1243 { internal, "internal", QEDE_LOOP_INTERNAL },
1244 { external, "external", QEDE_LOOP_EXTERNAL },
1245 };
1246
1247 /*
1248 * Set Loopback mode
1249 */
1250
1251 static enum ioc_reply
1252 qede_set_loopback_mode(qede_t *qede, uint32_t mode)
1253 {
1254 int i = 0;
1255 struct ecore_dev *edev = &qede->edev;
1256 struct ecore_hwfn *hwfn;
1257 struct ecore_ptt *ptt = NULL;
1258 struct ecore_mcp_link_params *link_params;
1259
1260 hwfn = &edev->hwfns[0];
1261 link_params = ecore_mcp_get_link_params(hwfn);
1262 ptt = ecore_ptt_acquire(hwfn);
1263
1264 switch(mode) {
1265 default:
1266 qede_info(qede, "unknown loopback mode !!");
1267 ecore_ptt_release(hwfn, ptt);
1268 return IOC_INVAL;
1269
1270 case QEDE_LOOP_NONE:
1271 ecore_mcp_set_link(hwfn, ptt, 0);
1272
1273 while (qede->params.link_state && i < 5000) {
1274 OSAL_MSLEEP(1);
1275 i++;
1276 }
1277 i = 0;
1278
1279 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1280 qede->loop_back_mode = QEDE_LOOP_NONE;
1281 (void) ecore_mcp_set_link(hwfn, ptt, 1);
1282 ecore_ptt_release(hwfn, ptt);
1283
1284 while (!qede->params.link_state && i < 5000) {
1285 OSAL_MSLEEP(1);
1286 i++;
1287 }
1288 return IOC_REPLY;
1289
1290 case QEDE_LOOP_INTERNAL:
1291 qede_print("!%s(%d) : loopback mode (INTERNAL) is set!",
1292 __func__, qede->instance);
1293 ecore_mcp_set_link(hwfn, ptt, 0);
1294
1295 while(qede->params.link_state && i < 5000) {
1296 OSAL_MSLEEP(1);
1297 i++;
1298 }
1299 i = 0;
1300 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1301 qede->loop_back_mode = QEDE_LOOP_INTERNAL;
1302 (void) ecore_mcp_set_link(hwfn, ptt, 1);
1303 ecore_ptt_release(hwfn, ptt);
1304
1305 while(!qede->params.link_state && i < 5000) {
1306 OSAL_MSLEEP(1);
1307 i++;
1308 }
1309 return IOC_REPLY;
1310
1311 case QEDE_LOOP_EXTERNAL:
1312 qede_print("!%s(%d) : External loopback mode is not supported",
1313 __func__, qede->instance);
1314 ecore_ptt_release(hwfn, ptt);
1315 return IOC_INVAL;
1316 }
1317 }
1318
1319 static int
1320 qede_ioctl_pcicfg_rd(qede_t *qede, u32 addr, void *data,
1321 int len)
1322 {
1323 u32 crb, actual_crb;
1324 uint32_t ret = 0;
1325 int cap_offset = 0, cap_id = 0, next_cap = 0;
1326 ddi_acc_handle_t pci_cfg_handle = qede->pci_cfg_handle;
1327 qede_ioctl_data_t * data1 = (qede_ioctl_data_t *) data;
1328
1329 cap_offset = pci_config_get8(pci_cfg_handle, PCI_CONF_CAP_PTR);
1330 while (cap_offset != 0) {
1331 /* Check for an invalid PCI read. */
1332 if (cap_offset == PCI_EINVAL8) {
1333 return DDI_FAILURE;
1334 }
1335 cap_id = pci_config_get8(pci_cfg_handle, cap_offset);
1336 if (cap_id == PCI_CAP_ID_PCI_E) {
1337 /* PCIe expr capab struct found */
1338 break;
1339 } else {
1340 next_cap = pci_config_get8(pci_cfg_handle,
1341 cap_offset + 1);
1342 cap_offset = next_cap;
1343 }
1344 }
1345
1346 switch (len) {
1347 case 1:
1348 ret = pci_config_get8(qede->pci_cfg_handle, addr);
1349 (void) memcpy(data, &ret, sizeof(uint8_t));
1350 break;
1351 case 2:
1352 ret = pci_config_get16(qede->pci_cfg_handle, addr);
1353 (void) memcpy(data, &ret, sizeof(uint16_t));
1354 break;
1355 case 4:
1356 ret = pci_config_get32(qede->pci_cfg_handle, addr);
1357 (void) memcpy(data, &ret, sizeof(uint32_t));
1358 break;
1359 default:
1360 cmn_err(CE_WARN, "bad length for pci config read\n");
1361 return (1);
1362 }
1363 return (0);
1364 }
1365
1366 static int
1367 qede_ioctl_pcicfg_wr(qede_t *qede, u32 addr, void *data,
1368 int len)
1369 {
1370 uint16_t ret = 0;
1371 int cap_offset = 0, cap_id = 0, next_cap = 0;
1372 qede_ioctl_data_t * data1 = (qede_ioctl_data_t *) data;
1373 ddi_acc_handle_t pci_cfg_handle = qede->pci_cfg_handle;
1374 #if 1
1375 cap_offset = pci_config_get8(pci_cfg_handle, PCI_CONF_CAP_PTR);
1376 while (cap_offset != 0) {
1377 cap_id = pci_config_get8(pci_cfg_handle, cap_offset);
1378 if (cap_id == PCI_CAP_ID_PCI_E) {
1379 /* PCIe expr capab struct found */
1380 break;
1381 } else {
1382 next_cap = pci_config_get8(pci_cfg_handle,
1383 cap_offset + 1);
1384 cap_offset = next_cap;
1385 }
1386 }
1387 #endif
1388
1389 switch(len) {
1390 case 1:
1391 pci_config_put8(qede->pci_cfg_handle, addr,
1392 *(char *)&(data));
1393 break;
1394 case 2:
1395 ret = pci_config_get16(qede->pci_cfg_handle, addr);
1396 ret = ret | *(uint16_t *)data1->uabc;
1397
1398 pci_config_put16(qede->pci_cfg_handle, addr,
1399 ret);
1400 break;
1401 case 4:
1402 pci_config_put32(qede->pci_cfg_handle, addr, *(uint32_t *)data1->uabc);
1403 break;
1404
1405 default:
1406 return (1);
1407 }
1408 return (0);
1409 }
1410
1411 static int
1412 qede_ioctl_rd_wr_reg(qede_t *qede, void *data)
1413 {
1414 struct ecore_hwfn *p_hwfn;
1415 struct ecore_dev *edev = &qede->edev;
1416 struct ecore_ptt *ptt;
1417 qede_ioctl_data_t *data1 = (qede_ioctl_data_t *)data;
1418 uint32_t ret = 0;
1419 uint8_t cmd = (uint8_t) data1->unused1;
1420 uint32_t addr = data1->off;
1421 uint32_t val = *(uint32_t *)&data1->uabc[1];
1422 uint32_t hwfn_index = *(uint32_t *)&data1->uabc[5];
1423 uint32_t *reg_addr;
1424
1425 if (hwfn_index > qede->num_hwfns) {
1426 cmn_err(CE_WARN, "invalid hwfn index from application\n");
1427 return (EINVAL);
1428 }
1429 p_hwfn = &edev->hwfns[hwfn_index];
1430
1431 switch(cmd) {
1432 case QEDE_REG_READ:
1433 ret = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, addr);
1434 (void) memcpy(data1->uabc, &ret, sizeof(uint32_t));
1435 break;
1436
1437 case QEDE_REG_WRITE:
1438 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, addr, val);
1439 break;
1440
1441 default:
1442 cmn_err(CE_WARN,
1443 "wrong command in register read/write from application\n");
1444 break;
1445 }
1446 return (ret);
1447 }
1448
1449 static int
1450 qede_ioctl_rd_wr_nvram(qede_t *qede, mblk_t *mp)
1451 {
1452 qede_nvram_data_t *data1 = (qede_nvram_data_t *)(mp->b_cont->b_rptr);
1453 qede_nvram_data_t *data2, *next_data;
1454 struct ecore_dev *edev = &qede->edev;
1455 uint32_t hdr_size = 24, bytes_to_copy, copy_len = 0;
1456 uint32_t copy_len1 = 0;
1457 uint32_t addr = data1->off;
1458 uint32_t size = data1->size, i, buf_size;
1459 uint8_t cmd, cmd2;
1460 uint8_t *buf, *tmp_buf;
1461 mblk_t *mp1;
1462
1463 cmd = (uint8_t)data1->unused1;
1464
1465 switch(cmd) {
1466 case QEDE_NVRAM_CMD_READ:
1467 buf = kmem_zalloc(size, GFP_KERNEL);
1468 if(buf == NULL) {
1469 cmn_err(CE_WARN, "memory allocation failed"
1470 " in nvram read ioctl\n");
1471 return (DDI_FAILURE);
1472 }
1473 (void) ecore_mcp_nvm_read(edev, addr, buf, data1->size);
1474
1475 copy_len = (MBLKL(mp->b_cont)) - hdr_size;
1476 if(copy_len > size) {
1477 (void) memcpy(data1->uabc, buf, size);
1478 kmem_free(buf, size);
1479 //OSAL_FREE(edev, buf);
1480 break;
1481 }
1482 (void) memcpy(data1->uabc, buf, copy_len);
1483 bytes_to_copy = size - copy_len;
1484 tmp_buf = ((uint8_t *)buf) + copy_len;
1485 copy_len1 = copy_len;
1486 mp1 = mp->b_cont;
1487 mp1 = mp1->b_cont;
1488
1489 while (mp1) {
1490 copy_len = MBLKL(mp1);
1491 if(mp1->b_cont == NULL) {
1492 copy_len = MBLKL(mp1) - 4;
1493 }
1494 data2 = (qede_nvram_data_t *)mp1->b_rptr;
1495 if (copy_len > bytes_to_copy) {
1496 (void) memcpy(data2->uabc, tmp_buf,
1497 bytes_to_copy);
1498 break;
1499 }
1500 (void) memcpy(data2->uabc, tmp_buf, copy_len);
1501 tmp_buf = tmp_buf + copy_len;
1502 copy_len += copy_len;
1503 mp1 = mp1->b_cont;
1504 bytes_to_copy = bytes_to_copy - copy_len;
1505 }
1506
1507 kmem_free(buf, size);
1508 //OSAL_FREE(edev, buf);
1509 break;
1510
1511 case QEDE_NVRAM_CMD_WRITE:
1512 cmd2 = (uint8_t )data1->cmd2;
1513 size = data1->size;
1514 addr = data1->off;
1515 buf_size = size; //data1->buf_size;
1516 //buf_size = data1->buf_size;
1517
1518 switch(cmd2){
1519 case START_NVM_WRITE:
1520 buf = kmem_zalloc(size, GFP_KERNEL);
1521 //buf = qede->reserved_buf;
1522 qede->nvm_buf_size = data1->size;
1523 if(buf == NULL) {
1524 cmn_err(CE_WARN,
1525 "memory allocation failed in START_NVM_WRITE\n");
1526 return DDI_FAILURE;
1527 }
1528 qede->nvm_buf_start = buf;
1529 cmn_err(CE_NOTE,
1530 "buf = %p, size = %x\n", qede->nvm_buf_start, size);
1531 qede->nvm_buf = buf;
1532 qede->copy_len = 0;
1533 //tmp_buf = buf + addr;
1534 break;
1535
1536 case ACCUMULATE_NVM_BUF:
1537 tmp_buf = qede->nvm_buf;
1538 copy_len = MBLKL(mp->b_cont) - hdr_size;
1539 if(copy_len > buf_size) {
1540 if (buf_size < qede->nvm_buf_size) {
1541 (void) memcpy(tmp_buf, data1->uabc, buf_size);
1542 qede->copy_len = qede->copy_len +
1543 buf_size;
1544 } else {
1545 (void) memcpy(tmp_buf,
1546 data1->uabc, qede->nvm_buf_size);
1547 qede->copy_len =
1548 qede->copy_len + qede->nvm_buf_size;
1549 }
1550 tmp_buf = tmp_buf + buf_size;
1551 qede->nvm_buf = tmp_buf;
1552 //qede->copy_len = qede->copy_len + buf_size;
1553 cmn_err(CE_NOTE,
1554 "buf_size from app = %x\n", copy_len);
1555 break;
1556 }
1557 (void) memcpy(tmp_buf, data1->uabc, copy_len);
1558 tmp_buf = tmp_buf + copy_len;
1559 bytes_to_copy = buf_size - copy_len;
1560 mp1 = mp->b_cont;
1561 mp1 = mp1->b_cont;
1562 copy_len1 = copy_len;
1563
1564 while (mp1) {
1565 copy_len = MBLKL(mp1);
1566 if (mp1->b_cont == NULL) {
1567 copy_len = MBLKL(mp1) - 4;
1568 }
1569 next_data = (qede_nvram_data_t *) mp1->b_rptr;
1570 if (copy_len > bytes_to_copy){
1571 (void) memcpy(tmp_buf, next_data->uabc,
1572 bytes_to_copy);
1573 qede->copy_len = qede->copy_len +
1574 bytes_to_copy;
1575 break;
1576 }
1577 (void) memcpy(tmp_buf, next_data->uabc,
1578 copy_len);
1579 qede->copy_len = qede->copy_len + copy_len;
1580 tmp_buf = tmp_buf + copy_len;
1581 copy_len = copy_len1 + copy_len;
1582 bytes_to_copy = bytes_to_copy - copy_len;
1583 mp1 = mp1->b_cont;
1584 }
1585 qede->nvm_buf = tmp_buf;
1586 break;
1587
1588 case STOP_NVM_WRITE:
1589 //qede->nvm_buf = tmp_buf;
1590 break;
1591 case READ_BUF:
1592 tmp_buf = (uint8_t *)qede->nvm_buf_start;
1593 for(i = 0; i < size ; i++){
1594 cmn_err(CE_NOTE,
1595 "buff (%d) : %d\n", i, *tmp_buf);
1596 tmp_buf ++;
1597 }
1598 break;
1599 }
1600 break;
1601 case QEDE_NVRAM_CMD_PUT_FILE_DATA:
1602 tmp_buf = qede->nvm_buf_start;
1603 (void) ecore_mcp_nvm_write(edev, ECORE_PUT_FILE_DATA,
1604 addr, tmp_buf, size);
1605 kmem_free(qede->nvm_buf_start, size);
1606 //OSAL_FREE(edev, tmp_buf);
1607 cmn_err(CE_NOTE, "total size = %x, copied size = %x\n",
1608 qede->nvm_buf_size, qede->copy_len);
1609 tmp_buf = NULL;
1610 qede->nvm_buf = NULL;
1611 qede->nvm_buf_start = NULL;
1612 break;
1613
1614 case QEDE_NVRAM_CMD_SET_SECURE_MODE:
1615 (void) ecore_mcp_nvm_set_secure_mode(edev, addr);
1616 break;
1617
1618 case QEDE_NVRAM_CMD_DEL_FILE:
1619 (void) ecore_mcp_nvm_del_file(edev, addr);
1620 break;
1621
1622 case QEDE_NVRAM_CMD_PUT_FILE_BEGIN:
1623 (void) ecore_mcp_nvm_put_file_begin(edev, addr);
1624 break;
1625
1626 case QEDE_NVRAM_CMD_GET_NVRAM_RESP:
1627 buf = kmem_zalloc(size, KM_SLEEP);
1628 (void) ecore_mcp_nvm_resp(edev, buf);
1629 (void)memcpy(data1->uabc, buf, size);
1630 kmem_free(buf, size);
1631 break;
1632
1633 default:
1634 cmn_err(CE_WARN,
1635 "wrong command in NVRAM read/write from application\n");
1636 break;
1637 }
1638 return (DDI_SUCCESS);
1639 }
1640
1641 static int
1642 qede_get_func_info(qede_t *qede, void *data)
1643 {
1644 qede_link_output_t link_op;
1645 qede_func_info_t func_info;
1646 qede_ioctl_data_t *data1 = (qede_ioctl_data_t *)data;
1647 struct ecore_dev *edev = &qede->edev;
1648 struct ecore_hwfn *hwfn;
1649 struct ecore_mcp_link_params params;
1650 struct ecore_mcp_link_state link;
1651
1652 hwfn = &edev->hwfns[0];
1653
1654 if(hwfn == NULL){
1655 cmn_err(CE_WARN, "(%s) : cannot acquire hwfn\n",
1656 __func__);
1657 return (DDI_FAILURE);
1658 }
1659 memcpy(¶ms, &hwfn->mcp_info->link_input, sizeof(params));
1660 memcpy(&link, &hwfn->mcp_info->link_output, sizeof(link));
1661
1662 if(link.link_up) {
1663 link_op.link_up = true;
1664 }
1665
1666 link_op.supported_caps = SUPPORTED_FIBRE;
1667 if(params.speed.autoneg) {
1668 link_op.supported_caps |= SUPPORTED_Autoneg;
1669 }
1670
1671 if(params.pause.autoneg ||
1672 (params.pause.forced_rx && params.pause.forced_tx)) {
1673 link_op.supported_caps |= SUPPORTED_Asym_Pause;
1674 }
1675
1676 if (params.pause.autoneg || params.pause.forced_rx ||
1677 params.pause.forced_tx) {
1678 link_op.supported_caps |= SUPPORTED_Pause;
1679 }
1680
1681 if (params.speed.advertised_speeds &
1682 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1683 link_op.supported_caps |= SUPPORTED_1000baseT_Half |
1684 SUPPORTED_1000baseT_Full;
1685 }
1686
1687 if (params.speed.advertised_speeds &
1688 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1689 link_op.supported_caps |= SUPPORTED_10000baseKR_Full;
1690 }
1691
1692 if (params.speed.advertised_speeds &
1693 NVM_CFG1_PORT_DRV_LINK_SPEED_40G) {
1694 link_op.supported_caps |= SUPPORTED_40000baseLR4_Full;
1695 }
1696
1697 link_op.advertised_caps = link_op.supported_caps;
1698
1699 if(link.link_up) {
1700 link_op.speed = link.speed;
1701 } else {
1702 link_op.speed = 0;
1703 }
1704
1705 link_op.duplex = DUPLEX_FULL;
1706 link_op.port = PORT_FIBRE;
1707
1708 link_op.autoneg = params.speed.autoneg;
1709
1710 /* Link partner capabilities */
1711 if (link.partner_adv_speed &
1712 ECORE_LINK_PARTNER_SPEED_1G_HD) {
1713 link_op.lp_caps |= SUPPORTED_1000baseT_Half;
1714 }
1715
1716 if (link.partner_adv_speed &
1717 ECORE_LINK_PARTNER_SPEED_1G_FD) {
1718 link_op.lp_caps |= SUPPORTED_1000baseT_Full;
1719 }
1720
1721 if (link.partner_adv_speed &
1722 ECORE_LINK_PARTNER_SPEED_10G) {
1723 link_op.lp_caps |= SUPPORTED_10000baseKR_Full;
1724 }
1725
1726 if (link.partner_adv_speed &
1727 ECORE_LINK_PARTNER_SPEED_20G) {
1728 link_op.lp_caps |= SUPPORTED_20000baseKR2_Full;
1729 }
1730
1731 if (link.partner_adv_speed &
1732 ECORE_LINK_PARTNER_SPEED_40G) {
1733 link_op.lp_caps |= SUPPORTED_40000baseLR4_Full;
1734 }
1735
1736 if (link.an_complete) {
1737 link_op.lp_caps |= SUPPORTED_Autoneg;
1738 }
1739
1740 if (link.partner_adv_pause) {
1741 link_op.lp_caps |= SUPPORTED_Pause;
1742 }
1743
1744 if (link.partner_adv_pause == ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1745 link.partner_adv_pause == ECORE_LINK_PARTNER_BOTH_PAUSE) {
1746 link_op.lp_caps |= SUPPORTED_Asym_Pause;
1747 }
1748
1749 func_info.supported = link_op.supported_caps;
1750 func_info.advertising = link_op.advertised_caps;
1751 func_info.speed = link_op.speed;
1752 func_info.duplex = link_op.duplex;
1753 func_info.port = qede->pci_func & 0x1;
1754 func_info.autoneg = link_op.autoneg;
1755
1756 (void) memcpy(data1->uabc, &func_info, sizeof(qede_func_info_t));
1757
1758 return (0);
1759 }
1760
1761 static int
1762 qede_do_ioctl(qede_t *qede, queue_t *q, mblk_t *mp)
1763 {
1764 qede_ioctl_data_t *up_data;
1765 qede_driver_info_t driver_info;
1766 struct ecore_dev *edev = &qede->edev;
1767 struct ecore_hwfn *hwfn;
1768 struct ecore_ptt *ptt = NULL;
1769 struct mcp_file_att attrib;
1770 uint32_t flash_size;
1771 uint32_t mcp_resp, mcp_param, txn_size;
1772 uint32_t cmd, size, ret = 0;
1773 uint64_t off;
1774 int * up_data1;
1775 void * ptr;
1776 mblk_t *mp1 = mp;
1777 char mac_addr[32];
1778
1779 up_data = (qede_ioctl_data_t *)(mp->b_cont->b_rptr);
1780
1781 cmd = up_data->cmd;
1782 off = up_data->off;
1783 size = up_data->size;
1784
1785 switch (cmd) {
1786 case QEDE_DRV_INFO:
1787 hwfn = &edev->hwfns[0];
1788 ptt = ecore_ptt_acquire(hwfn);
1789
1790 snprintf(driver_info.drv_name, MAX_QEDE_NAME_LEN, "%s", "qede");
1791 snprintf(driver_info.drv_version, QEDE_STR_SIZE,
1792 "v:%s", qede->version);
1793 snprintf(driver_info.mfw_version, QEDE_STR_SIZE,
1794 "%s", qede->versionMFW);
1795 snprintf(driver_info.stormfw_version, QEDE_STR_SIZE,
1796 "%s", qede->versionFW);
1797 snprintf(driver_info.bus_info, QEDE_STR_SIZE,
1798 "%s", qede->bus_dev_func);
1799
1800
1801 /*
1802 * calling ecore_mcp_nvm_rd_cmd to find the flash length, i
1803 * 0x08 is equivalent of NVM_TYPE_MFW_TRACE1
1804 */
1805 ecore_mcp_get_flash_size(hwfn, ptt, &flash_size);
1806 driver_info.eeprom_dump_len = flash_size;
1807 (void) memcpy(up_data->uabc, &driver_info,
1808 sizeof (qede_driver_info_t));
1809 up_data->size = sizeof (qede_driver_info_t);
1810
1811 ecore_ptt_release(hwfn, ptt);
1812 break;
1813
1814 case QEDE_RD_PCICFG:
1815 ret = qede_ioctl_pcicfg_rd(qede, off, up_data->uabc, size);
1816 break;
1817
1818 case QEDE_WR_PCICFG:
1819 ret = qede_ioctl_pcicfg_wr(qede, off, up_data, size);
1820 break;
1821
1822 case QEDE_RW_REG:
1823 ret = qede_ioctl_rd_wr_reg(qede, (void *)up_data);
1824 break;
1825
1826 case QEDE_RW_NVRAM:
1827 ret = qede_ioctl_rd_wr_nvram(qede, mp1);
1828 break;
1829
1830 case QEDE_FUNC_INFO:
1831 ret = qede_get_func_info(qede, (void *)up_data);
1832 break;
1833
1834 case QEDE_MAC_ADDR:
1835 snprintf(mac_addr, sizeof(mac_addr),
1836 "%02x:%02x:%02x:%02x:%02x:%02x",
1837 qede->ether_addr[0], qede->ether_addr[1],
1838 qede->ether_addr[2], qede->ether_addr[3],
1839 qede->ether_addr[4], qede->ether_addr[5]);
1840 (void) memcpy(up_data->uabc, &mac_addr, sizeof(mac_addr));
1841 break;
1842
1843 }
1844 //if (cmd == QEDE_RW_NVRAM) {
1845 // miocack (q, mp, (sizeof(qede_ioctl_data_t)), 0);
1846 // return IOC_REPLY;
1847 //}
1848 miocack (q, mp, (sizeof(qede_ioctl_data_t)), ret);
1849 //miocack (q, mp, 0, ret);
1850 return (IOC_REPLY);
1851 }
1852
1853 static void
1854 qede_ioctl(qede_t *qede, int cmd, queue_t *q, mblk_t *mp)
1855 {
1856 void *ptr;
1857
1858 switch(cmd) {
1859 case QEDE_CMD:
1860 (void) qede_do_ioctl(qede, q, mp);
1861 break;
1862 default :
1863 cmn_err(CE_WARN, "qede ioctl command %x not supported\n", cmd);
1864 break;
1865 }
1866 return;
1867 }
1868 enum ioc_reply
1869 qede_loopback_ioctl(qede_t *qede, queue_t *wq, mblk_t *mp,
1870 struct iocblk *iocp)
1871 {
1872 lb_info_sz_t *lb_info_size;
1873 lb_property_t *lb_prop;
1874 uint32_t *lb_mode;
1875 int cmd;
1876
1877 /*
1878 * Validate format of ioctl
1879 */
1880 if(mp->b_cont == NULL) {
1881 return IOC_INVAL;
1882 }
1883
1884 cmd = iocp->ioc_cmd;
1885
1886 switch(cmd) {
1887 default:
1888 qede_print("!%s(%d): unknown ioctl command %x\n",
1889 __func__, qede->instance, cmd);
1890 return IOC_INVAL;
1891 case LB_GET_INFO_SIZE:
1892 if (iocp->ioc_count != sizeof(lb_info_sz_t)) {
1893 qede_info(qede, "error: ioc_count %d, sizeof %d",
1894 iocp->ioc_count, sizeof(lb_info_sz_t));
1895 return IOC_INVAL;
1896 }
1897 lb_info_size = (void *)mp->b_cont->b_rptr;
1898 *lb_info_size = sizeof(loopmodes);
1899 return IOC_REPLY;
1900 case LB_GET_INFO:
1901 if (iocp->ioc_count != sizeof (loopmodes)) {
1902 qede_info(qede, "error: iocp->ioc_count %d, sizepof %d",
1903 iocp->ioc_count, sizeof (loopmodes));
1904 return (IOC_INVAL);
1905 }
1906 lb_prop = (void *)mp->b_cont->b_rptr;
1907 bcopy(loopmodes, lb_prop, sizeof (loopmodes));
1908 return IOC_REPLY;
1909 case LB_GET_MODE:
1910 if (iocp->ioc_count != sizeof (uint32_t)) {
1911 qede_info(qede, "iocp->ioc_count %d, sizeof : %d\n",
1912 iocp->ioc_count, sizeof (uint32_t));
1913 return (IOC_INVAL);
1914 }
1915 lb_mode = (void *)mp->b_cont->b_rptr;
1916 *lb_mode = qede->loop_back_mode;
1917 return IOC_REPLY;
1918 case LB_SET_MODE:
1919 if (iocp->ioc_count != sizeof (uint32_t)) {
1920 qede_info(qede, "iocp->ioc_count %d, sizeof : %d\n",
1921 iocp->ioc_count, sizeof (uint32_t));
1922 return (IOC_INVAL);
1923 }
1924 lb_mode = (void *)mp->b_cont->b_rptr;
1925 return (qede_set_loopback_mode(qede,*lb_mode));
1926 }
1927 }
1928
1929 static void
1930 qede_mac_ioctl(void * arg,
1931 queue_t * wq,
1932 mblk_t * mp)
1933 {
1934 int err, cmd;
1935 qede_t * qede = (qede_t *)arg;
1936 struct iocblk *iocp = (struct iocblk *) (uintptr_t)mp->b_rptr;
1937 enum ioc_reply status = IOC_DONE;
1938 boolean_t need_privilege = B_TRUE;
1939
1940 iocp->ioc_error = 0;
1941 cmd = iocp->ioc_cmd;
1942
1943 mutex_enter(&qede->drv_lock);
1944 if ((qede->qede_state == QEDE_STATE_SUSPENDING) ||
1945 (qede->qede_state == QEDE_STATE_SUSPENDED)) {
1946 mutex_exit(&qede->drv_lock);
1947 miocnak(wq, mp, 0, EINVAL);
1948 return;
1949 }
1950
1951 switch(cmd) {
1952 case QEDE_CMD:
1953 break;
1954 case LB_GET_INFO_SIZE:
1955 case LB_GET_INFO:
1956 case LB_GET_MODE:
1957 need_privilege = B_FALSE;
1958 case LB_SET_MODE:
1959 break;
1960 default:
1961 qede_print("!%s(%d) unknown ioctl command %x\n",
1962 __func__, qede->instance, cmd);
1963 miocnak(wq, mp, 0, EINVAL);
1964 mutex_exit(&qede->drv_lock);
1965 return;
1966 }
1967
1968 if(need_privilege) {
1969 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1970 if(err){
1971 qede_info(qede, "secpolicy() failed");
1972 miocnak(wq, mp, 0, err);
1973 mutex_exit(&qede->drv_lock);
1974 return;
1975 }
1976 }
1977
1978 switch (cmd) {
1979 default:
1980 qede_print("!%s(%d) : unknown ioctl command %x\n",
1981 __func__, qede->instance, cmd);
1982 status = IOC_INVAL;
1983 mutex_exit(&qede->drv_lock);
1984 return;
1985 case LB_GET_INFO_SIZE:
1986 case LB_GET_INFO:
1987 case LB_GET_MODE:
1988 case LB_SET_MODE:
1989 status = qede_loopback_ioctl(qede, wq, mp, iocp);
1990 break;
1991 case QEDE_CMD:
1992 qede_ioctl(qede, cmd, wq, mp);
1993 status = IOC_DONE;
1994 break;
1995 }
1996
1997 switch(status){
1998 default:
1999 qede_print("!%s(%d) : invalid status from ioctl",
2000 __func__,qede->instance);
2001 break;
2002 case IOC_DONE:
2003 /*
2004 * OK, Reply already sent
2005 */
2006
2007 break;
2008 case IOC_REPLY:
2009 mp->b_datap->db_type = iocp->ioc_error == 0 ?
2010 M_IOCACK : M_IOCNAK;
2011 qreply(wq, mp);
2012 break;
2013 case IOC_INVAL:
2014 mutex_exit(&qede->drv_lock);
2015 //miocack(wq, mp, 0, 0);
2016 miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
2017 EINVAL : iocp->ioc_error);
2018 return;
2019 }
2020 mutex_exit(&qede->drv_lock);
2021 }
2022
2023 extern ddi_dma_attr_t qede_buf2k_dma_attr_txbuf;
2024 extern ddi_dma_attr_t qede_dma_attr_rxbuf;
2025 extern ddi_dma_attr_t qede_dma_attr_desc;
2026
2027 static boolean_t
2028 qede_mac_get_capability(void *arg,
2029 mac_capab_t capability,
2030 void * cap_data)
2031 {
2032 qede_t * qede = (qede_t *)arg;
2033 uint32_t *txflags = cap_data;
2034 boolean_t ret = B_FALSE;
2035
2036 switch (capability) {
2037 case MAC_CAPAB_HCKSUM: {
2038 u32 *tx_flags = cap_data;
2039 /*
2040 * Check if checksum is enabled on
2041 * tx and advertise the cksum capab
2042 * to mac layer accordingly. On Rx
2043 * side checksummed packets are
2044 * reveiced anyway
2045 */
2046 qede_info(qede, "%s tx checksum offload",
2047 (qede->checksum == DEFAULT_CKSUM_OFFLOAD) ?
2048 "Enabling":
2049 "Disabling");
2050
2051 if (qede->checksum != DEFAULT_CKSUM_OFFLOAD) {
2052 ret = B_FALSE;
2053 break;
2054 }
2055 /*
2056 * Hardware does not support ICMPv6 checksumming. Right now the
2057 * GLDv3 doesn't provide us a way to specify that we don't
2058 * support that. As such, we cannot indicate
2059 * HCKSUM_INET_FULL_V6.
2060 */
2061
2062 *tx_flags = HCKSUM_INET_FULL_V4 |
2063 HCKSUM_IPHDRCKSUM;
2064 ret = B_TRUE;
2065 break;
2066 }
2067 case MAC_CAPAB_LSO: {
2068 mac_capab_lso_t *cap_lso = (mac_capab_lso_t *)cap_data;
2069
2070 qede_info(qede, "%s large segmentation offload",
2071 qede->lso_enable ? "Enabling": "Disabling");
2072 if (qede->lso_enable) {
2073 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
2074 cap_lso->lso_basic_tcp_ipv4.lso_max = QEDE_LSO_MAXLEN;
2075 ret = B_TRUE;
2076 }
2077 break;
2078 }
2079 case MAC_CAPAB_RINGS: {
2080 #ifndef NO_CROSSBOW
2081 mac_capab_rings_t *cap_rings = cap_data;
2082 #ifndef ILLUMOS
2083 cap_rings->mr_version = MAC_RINGS_VERSION_1;
2084 #endif
2085
2086 switch (cap_rings->mr_type) {
2087 case MAC_RING_TYPE_RX:
2088 #ifndef ILLUMOS
2089 cap_rings->mr_flags = MAC_RINGS_VLAN_TRANSPARENT;
2090 #endif
2091 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2092 //cap_rings->mr_rnum = 1; /* qede variable */
2093 cap_rings->mr_rnum = qede->num_fp; /* qede variable */
2094 cap_rings->mr_gnum = 1;
2095 cap_rings->mr_rget = qede_fill_ring;
2096 cap_rings->mr_gget = qede_fill_group;
2097 cap_rings->mr_gaddring = NULL;
2098 cap_rings->mr_gremring = NULL;
2099 #ifndef ILLUMOS
2100 cap_rings->mr_ggetringtc = NULL;
2101 #endif
2102 ret = B_TRUE;
2103 break;
2104 case MAC_RING_TYPE_TX:
2105 #ifndef ILLUMOS
2106 cap_rings->mr_flags = MAC_RINGS_VLAN_TRANSPARENT;
2107 #endif
2108 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2109 //cap_rings->mr_rnum = 1;
2110 cap_rings->mr_rnum = qede->num_fp;
2111 cap_rings->mr_gnum = 0;
2112 cap_rings->mr_rget = qede_fill_ring;
2113 cap_rings->mr_gget = qede_fill_group;
2114 cap_rings->mr_gaddring = NULL;
2115 cap_rings->mr_gremring = NULL;
2116 #ifndef ILLUMOS
2117 cap_rings->mr_ggetringtc = NULL;
2118 #endif
2119 ret = B_TRUE;
2120 break;
2121 default:
2122 ret = B_FALSE;
2123 break;
2124 }
2125 #endif
2126 break; /* CASE MAC_CAPAB_RINGS */
2127 }
2128 #ifdef ILLUMOS
2129 case MAC_CAPAB_TRANSCEIVER: {
2130 mac_capab_transceiver_t *mct = cap_data;
2131
2132 mct->mct_flags = 0;
2133 mct->mct_ntransceivers = qede->edev.num_hwfns;
2134 mct->mct_info = qede_transceiver_info;
2135 mct->mct_read = qede_transceiver_read;
2136
2137 ret = B_TRUE;
2138 break;
2139 }
2140 #endif
2141 default:
2142 break;
2143 }
2144
2145 return (ret);
2146 }
2147
2148 int
2149 qede_configure_link(qede_t *qede, bool op);
2150
2151 static int
2152 qede_mac_set_property(void * arg,
2153 const char * pr_name,
2154 mac_prop_id_t pr_num,
2155 uint_t pr_valsize,
2156 const void * pr_val)
2157 {
2158 qede_t * qede = (qede_t *)arg;
2159 struct ecore_mcp_link_params *link_params;
2160 struct ecore_dev *edev = &qede->edev;
2161 struct ecore_hwfn *hwfn;
2162 int ret_val = 0, i;
2163 uint32_t option;
2164
2165 mutex_enter(&qede->gld_lock);
2166 switch (pr_num)
2167 {
2168 case MAC_PROP_MTU:
2169 bcopy(pr_val, &option, sizeof (option));
2170
2171 if(option == qede->mtu) {
2172 ret_val = 0;
2173 break;
2174 }
2175 if ((option != DEFAULT_JUMBO_MTU) &&
2176 (option != DEFAULT_MTU)) {
2177 ret_val = EINVAL;
2178 break;
2179 }
2180 if(qede->qede_state == QEDE_STATE_STARTED) {
2181 ret_val = EBUSY;
2182 break;
2183 }
2184
2185 ret_val = mac_maxsdu_update(qede->mac_handle, qede->mtu);
2186 if (ret_val == 0) {
2187
2188 qede->mtu = option;
2189 if (option == DEFAULT_JUMBO_MTU) {
2190 qede->jumbo_enable = B_TRUE;
2191 } else {
2192 qede->jumbo_enable = B_FALSE;
2193 }
2194
2195 hwfn = ECORE_LEADING_HWFN(edev);
2196 hwfn->hw_info.mtu = qede->mtu;
2197 ret_val = ecore_mcp_ov_update_mtu(hwfn,
2198 hwfn->p_main_ptt,
2199 hwfn->hw_info.mtu);
2200 if (ret_val != ECORE_SUCCESS) {
2201 qede_print("!%s(%d): MTU change %d option %d"
2202 "FAILED",
2203 __func__,qede->instance, qede->mtu, option);
2204 break;
2205 }
2206 qede_print("!%s(%d): MTU changed %d MTU option"
2207 " %d hwfn %d",
2208 __func__,qede->instance, qede->mtu,
2209 option, hwfn->hw_info.mtu);
2210 }
2211 break;
2212
2213 case MAC_PROP_EN_10GFDX_CAP:
2214 hwfn = &edev->hwfns[0];
2215 link_params = ecore_mcp_get_link_params(hwfn);
2216 if (*(uint8_t *) pr_val) {
2217 link_params->speed.autoneg = 0;
2218 link_params->speed.forced_speed = 10000;
2219 link_params->speed.advertised_speeds =
2220 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2221 qede->forced_speed_10G = *(uint8_t *)pr_val;
2222 }
2223 else {
2224 memcpy(link_params,
2225 &qede->link_input_params.default_link_params,
2226 sizeof (struct ecore_mcp_link_params));
2227 qede->forced_speed_10G = *(uint8_t *)pr_val;
2228 }
2229 if (qede->qede_state == QEDE_STATE_STARTED) {
2230 qede_configure_link(qede, true);
2231 } else {
2232 mutex_exit(&qede->gld_lock);
2233 return (0);
2234 }
2235 break;
2236 default:
2237 ret_val = ENOTSUP;
2238 break;
2239 }
2240 mutex_exit(&qede->gld_lock);
2241 return (ret_val);
2242 }
2243
2244 static void
2245 qede_mac_stop(void *arg)
2246 {
2247 qede_t *qede = (qede_t *)arg;
2248 int status;
2249
2250 qede_print("!%s(%d): called",
2251 __func__,qede->instance);
2252 mutex_enter(&qede->drv_lock);
2253 status = qede_stop(qede);
2254 if (status != DDI_SUCCESS) {
2255 qede_print("!%s(%d): qede_stop "
2256 "FAILED",
2257 __func__,qede->instance);
2258 }
2259
2260 mac_link_update(qede->mac_handle, LINK_STATE_UNKNOWN);
2261 mutex_exit(&qede->drv_lock);
2262 }
2263
2264 static int
2265 qede_mac_start(void *arg)
2266 {
2267 qede_t *qede = (qede_t *)arg;
2268 int status;
2269
2270 qede_print("!%s(%d): called", __func__,qede->instance);
2271 if (!mutex_tryenter(&qede->drv_lock)) {
2272 return (EAGAIN);
2273 }
2274
2275 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
2276 mutex_exit(&qede->drv_lock);
2277 return (ECANCELED);
2278 }
2279
2280 status = qede_start(qede);
2281 if (status != DDI_SUCCESS) {
2282 mutex_exit(&qede->drv_lock);
2283 return (EIO);
2284 }
2285
2286 mutex_exit(&qede->drv_lock);
2287
2288 #ifdef DBLK_DMA_PREMAP
2289 qede->pm_handle = mac_pmh_tx_get(qede->mac_handle);
2290 #endif
2291 return (0);
2292 }
2293
2294 static int
2295 qede_mac_get_property(void *arg,
2296 const char *pr_name,
2297 mac_prop_id_t pr_num,
2298 uint_t pr_valsize,
2299 void *pr_val)
2300 {
2301 qede_t *qede = (qede_t *)arg;
2302 struct ecore_dev *edev = &qede->edev;
2303 link_state_t link_state;
2304 link_duplex_t link_duplex;
2305 uint64_t link_speed;
2306 link_flowctrl_t link_flowctrl;
2307 struct qede_link_cfg link_cfg;
2308 mac_ether_media_t media;
2309 qede_link_cfg_t *hw_cfg = &qede->hwinit;
2310 int ret_val = 0;
2311
2312 memset(&link_cfg, 0, sizeof (struct qede_link_cfg));
2313 qede_get_link_info(&edev->hwfns[0], &link_cfg);
2314
2315
2316
2317 switch (pr_num)
2318 {
2319 case MAC_PROP_MTU:
2320
2321 ASSERT(pr_valsize >= sizeof(uint32_t));
2322 bcopy(&qede->mtu, pr_val, sizeof(uint32_t));
2323 break;
2324
2325 case MAC_PROP_DUPLEX:
2326
2327 ASSERT(pr_valsize >= sizeof(link_duplex_t));
2328 link_duplex = (qede->props.link_duplex) ?
2329 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
2330 bcopy(&link_duplex, pr_val, sizeof(link_duplex_t));
2331 break;
2332
2333 case MAC_PROP_SPEED:
2334
2335 ASSERT(pr_valsize >= sizeof(link_speed));
2336
2337 link_speed = (qede->props.link_speed * 1000000ULL);
2338 bcopy(&link_speed, pr_val, sizeof(link_speed));
2339 break;
2340
2341 case MAC_PROP_STATUS:
2342
2343 ASSERT(pr_valsize >= sizeof(link_state_t));
2344
2345 link_state = (qede->params.link_state) ?
2346 LINK_STATE_UP : LINK_STATE_DOWN;
2347 bcopy(&link_state, pr_val, sizeof(link_state_t));
2348 qede_info(qede, "mac_prop_status %d\n", link_state);
2349 break;
2350
2351 case MAC_PROP_MEDIA:
2352
2353 ASSERT(pr_valsize >= sizeof(mac_ether_media_t));
2354 media = qede_link_to_media(&link_cfg, qede->props.link_speed);
2355 bcopy(&media, pr_val, sizeof(mac_ether_media_t));
2356 break;
2357
2358 case MAC_PROP_AUTONEG:
2359
2360 *(uint8_t *)pr_val = link_cfg.autoneg;
2361 break;
2362
2363 case MAC_PROP_FLOWCTRL:
2364
2365 ASSERT(pr_valsize >= sizeof(link_flowctrl_t));
2366
2367 /*
2368 * illumos does not have the notion of LINK_FLOWCTRL_AUTO at this time.
2369 */
2370 #ifndef ILLUMOS
2371 if (link_cfg.pause_cfg & QEDE_LINK_PAUSE_AUTONEG_ENABLE) {
2372 link_flowctrl = LINK_FLOWCTRL_AUTO;
2373 }
2374 #endif
2375
2376 if (!(link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2377 !(link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2378 link_flowctrl = LINK_FLOWCTRL_NONE;
2379 }
2380 if ((link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2381 !(link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2382 link_flowctrl = LINK_FLOWCTRL_RX;
2383 }
2384 if (!(link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2385 (link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2386 link_flowctrl = LINK_FLOWCTRL_TX;
2387 }
2388 if ((link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2389 (link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2390 link_flowctrl = LINK_FLOWCTRL_BI;
2391 }
2392
2393 bcopy(&link_flowctrl, pr_val, sizeof (link_flowctrl_t));
2394 break;
2395
2396 case MAC_PROP_ADV_10GFDX_CAP:
2397 *(uint8_t *)pr_val = link_cfg.adv_capab.param_10000fdx;
2398 break;
2399
2400 case MAC_PROP_EN_10GFDX_CAP:
2401 *(uint8_t *)pr_val = qede->forced_speed_10G;
2402 break;
2403
2404 case MAC_PROP_PRIVATE:
2405 default:
2406 return (ENOTSUP);
2407
2408 }
2409
2410 return (0);
2411 }
2412
2413 static void
2414 qede_mac_property_info(void *arg,
2415 const char *pr_name,
2416 mac_prop_id_t pr_num,
2417 mac_prop_info_handle_t prh)
2418 {
2419 qede_t *qede = (qede_t *)arg;
2420 qede_link_props_t *def_cfg = &qede_def_link_props;
2421 link_flowctrl_t link_flowctrl;
2422
2423
2424 switch (pr_num)
2425 {
2426
2427 case MAC_PROP_STATUS:
2428 case MAC_PROP_SPEED:
2429 case MAC_PROP_DUPLEX:
2430 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
2431 break;
2432
2433 case MAC_PROP_MTU:
2434
2435 mac_prop_info_set_range_uint32(prh,
2436 MIN_MTU,
2437 MAX_MTU);
2438 break;
2439
2440 case MAC_PROP_AUTONEG:
2441
2442 mac_prop_info_set_default_uint8(prh, def_cfg->autoneg);
2443 break;
2444
2445 case MAC_PROP_FLOWCTRL:
2446
2447 if (!def_cfg->pause) {
2448 link_flowctrl = LINK_FLOWCTRL_NONE;
2449 } else {
2450 link_flowctrl = LINK_FLOWCTRL_BI;
2451 }
2452
2453 mac_prop_info_set_default_link_flowctrl(prh, link_flowctrl);
2454 break;
2455
2456 case MAC_PROP_EN_10GFDX_CAP:
2457 mac_prop_info_set_perm(prh, MAC_PROP_PERM_RW);
2458 break;
2459
2460 case MAC_PROP_ADV_10GFDX_CAP:
2461 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
2462 break;
2463
2464 default:
2465 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
2466 break;
2467
2468 }
2469 }
2470
2471 static mac_callbacks_t qede_callbacks =
2472 {
2473 (
2474 MC_IOCTL
2475 /* | MC_RESOURCES */
2476 | MC_SETPROP
2477 | MC_GETPROP
2478 | MC_PROPINFO
2479 | MC_GETCAPAB
2480 ),
2481 qede_mac_stats,
2482 qede_mac_start,
2483 qede_mac_stop,
2484 qede_mac_promiscuous,
2485 qede_mac_multicast,
2486 NULL,
2487 #ifndef NO_CROSSBOW
2488 NULL,
2489 #else
2490 qede_mac_tx,
2491 #endif
2492 NULL, /* qede_mac_resources, */
2493 qede_mac_ioctl,
2494 qede_mac_get_capability,
2495 NULL,
2496 NULL,
2497 qede_mac_set_property,
2498 qede_mac_get_property,
2499 #ifdef MC_PROPINFO
2500 qede_mac_property_info
2501 #endif
2502 };
2503
2504 boolean_t
2505 qede_gld_init(qede_t *qede)
2506 {
2507 int status, ret;
2508 mac_register_t *macp;
2509
2510 macp = mac_alloc(MAC_VERSION);
2511 if (macp == NULL) {
2512 cmn_err(CE_NOTE, "%s: mac_alloc() failed\n", __func__);
2513 return (B_FALSE);
2514 }
2515
2516 macp->m_driver = qede;
2517 macp->m_dip = qede->dip;
2518 macp->m_instance = qede->instance;
2519 macp->m_priv_props = NULL;
2520 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2521 macp->m_src_addr = qede->ether_addr;
2522 macp->m_callbacks = &qede_callbacks;
2523 macp->m_min_sdu = 0;
2524 macp->m_max_sdu = qede->mtu;
2525 macp->m_margin = VLAN_TAGSZ;
2526 #ifdef ILLUMOS
2527 macp->m_v12n = MAC_VIRT_LEVEL1;
2528 #endif
2529
2530 status = mac_register(macp, &qede->mac_handle);
2531 if (status != 0) {
2532 cmn_err(CE_NOTE, "%s: mac_register() failed\n", __func__);
2533 }
2534
2535 mac_free(macp);
2536 if (status == 0) {
2537 return (B_TRUE);
2538 }
2539 return (B_FALSE);
2540 }
2541
2542 boolean_t qede_gld_fini(qede_t * qede)
2543 {
2544 return (B_TRUE);
2545 }
2546
2547
2548 void qede_link_update(qede_t * qede,
2549 link_state_t state)
2550 {
2551 mac_link_update(qede->mac_handle, state);
2552 }
2553
2554