1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2014 QLogic Corporation
24 * The contents of this file are subject to the terms of the
25 * QLogic End User License (the "License").
26 * You may not use this file except in compliance with the License.
27 *
28 * You can obtain a copy of the License at
29 * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
30 * QLogic_End_User_Software_License.txt
31 * See the License for the specific language governing permissions
32 * and limitations under the License.
33 */
34
35 /*
36 * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
37 */
38
39 #include "bnxe.h"
40
41 #define BNXE_DEF_TX_BD_PAGE_CNT 12
42 #define BNXE_DEF_TX_COAL_BUF_CNT 10
43
44 typedef struct
45 {
46 int bufCnt;
47 int txBdPageCnt;
48 int txCoalBufCnt;
49 } BnxeHwPageConfig;
50
51 static BnxeHwPageConfig bnxeHwPageConfigs[] =
52 {
53 /* Buffers TX BD Pages TX Coalesce Bufs */
54 { 1000, 4, 10 },
55 { 1500, 6, 10 },
56 { 3000, 12, 10 },
57 { 0, 0, 0 }
58 };
59
60 #if 0
61 #define MEM_LOG BnxeLogInfo
62 #else
63 #define MEM_LOG
64 #endif
65
66 ddi_device_acc_attr_t bnxeAccessAttribBAR =
67 {
68 DDI_DEVICE_ATTR_V0, /* devacc_attr_version */
69 DDI_STRUCTURE_LE_ACC, /* devacc_attr_endian_flags */
70 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */
71 DDI_DEFAULT_ACC /* devacc_attr_access */
72 };
73
74 ddi_device_acc_attr_t bnxeAccessAttribBUF =
75 {
76 DDI_DEVICE_ATTR_V0, /* devacc_attr_version */
77 DDI_NEVERSWAP_ACC, /* devacc_attr_endian_flags */
78 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */
79 DDI_DEFAULT_ACC /* devacc_attr_access */
80 };
81
82 ddi_dma_attr_t bnxeDmaPageAttrib =
83 {
84 DMA_ATTR_V0, /* dma_attr_version */
85 0, /* dma_attr_addr_lo */
86 0xffffffffffffffff, /* dma_attr_addr_hi */
87 0xffffffffffffffff, /* dma_attr_count_max */
88 0, /* dma_attr_align */
89 0xffffffff, /* dma_attr_burstsizes */
90 1, /* dma_attr_minxfer */
91 0xffffffffffffffff, /* dma_attr_maxxfer */
92 0xffffffffffffffff, /* dma_attr_seg */
93 1, /* dma_attr_sgllen */
94 1, /* dma_attr_granular */
95 0, /* dma_attr_flags */
96 };
97
98
mm_wait(lm_device_t * pDev,u32_t delayUs)99 void mm_wait(lm_device_t * pDev,
100 u32_t delayUs)
101 {
102 (void)pDev;
103 drv_usecwait(delayUs);
104 }
105
106
mm_read_pci(lm_device_t * pDev,u32_t pciReg,u32_t * pRegValue)107 lm_status_t mm_read_pci(lm_device_t * pDev,
108 u32_t pciReg,
109 u32_t * pRegValue)
110 {
111 um_device_t * pUM = (um_device_t *)pDev;
112
113 *pRegValue = pci_config_get32(pUM->pPciCfg, (off_t)pciReg);
114
115 return LM_STATUS_SUCCESS;
116 }
117
118
mm_write_pci(lm_device_t * pDev,u32_t pciReg,u32_t regValue)119 lm_status_t mm_write_pci(lm_device_t * pDev,
120 u32_t pciReg,
121 u32_t regValue)
122 {
123 um_device_t * pUM = (um_device_t *)pDev;
124
125 pci_config_put32(pUM->pPciCfg, (off_t)pciReg, regValue);
126
127 return LM_STATUS_SUCCESS;
128 }
129
130
BnxeInitBdCnts(um_device_t * pUM,int cli_idx)131 void BnxeInitBdCnts(um_device_t * pUM,
132 int cli_idx)
133 {
134 lm_device_t * pLM = (lm_device_t *)pUM;
135 BnxeHwPageConfig * pPageCfg;
136
137 pLM->params.l2_tx_bd_page_cnt[cli_idx] = BNXE_DEF_TX_BD_PAGE_CNT;
138 pLM->params.l2_tx_coal_buf_cnt[cli_idx] = BNXE_DEF_TX_COAL_BUF_CNT;
139
140 pPageCfg = &bnxeHwPageConfigs[0];
141 while (pPageCfg->bufCnt)
142 {
143 if (pLM->params.l2_rx_desc_cnt[cli_idx] <= pPageCfg->bufCnt)
144 {
145 pLM->params.l2_tx_bd_page_cnt[cli_idx] = pPageCfg->txBdPageCnt;
146 pLM->params.l2_tx_coal_buf_cnt[cli_idx] = pPageCfg->txCoalBufCnt;
147 break;
148 }
149
150 pPageCfg++;
151 }
152 }
153
154
155 extern u32_t LOG2(u32_t v);
156 unsigned long log2_align(unsigned long n);
157
mm_get_user_config(lm_device_t * pLM)158 lm_status_t mm_get_user_config(lm_device_t * pLM)
159 {
160 um_device_t * pUM = (um_device_t *)pLM;
161 u32_t total_size;
162 u32_t required_page_size;
163
164 BnxeCfgInit(pUM);
165
166 pLM->params.sw_config = LM_SWCFG_10G;
167
168 pLM->params.ofld_cap = (LM_OFFLOAD_TX_IP_CKSUM |
169 LM_OFFLOAD_RX_IP_CKSUM |
170 LM_OFFLOAD_TX_TCP_CKSUM |
171 LM_OFFLOAD_RX_TCP_CKSUM |
172 LM_OFFLOAD_TX_TCP6_CKSUM |
173 LM_OFFLOAD_RX_TCP6_CKSUM |
174 LM_OFFLOAD_TX_UDP_CKSUM |
175 LM_OFFLOAD_RX_UDP_CKSUM |
176 LM_OFFLOAD_TX_UDP6_CKSUM |
177 LM_OFFLOAD_RX_UDP6_CKSUM);
178
179 /* XXX Wake on LAN? */
180 //pLM->params.wol_cap = (LM_WAKE_UP_MODE_MAGIC_PACKET | LM_WAKE_UP_MODE_NWUF);
181
182 /* keep the VLAN tag in the mac header when receiving */
183 pLM->params.keep_vlan_tag = 1;
184
185 /* set in BnxeIntrInit based on the allocated number of MSIX interrupts */
186 //pLM->params.rss_chain_cnt = pUM->devParams.numRings;
187 //pLM->params.tss_chain_cnt = pUM->devParams.numRings;
188
189 pLM->params.l2_rx_desc_cnt[LM_CLI_IDX_NDIS] = pUM->devParams.numRxDesc[LM_CLI_IDX_NDIS];
190 pLM->params.l2_tx_bd_page_cnt[LM_CLI_IDX_NDIS] = 0;
191 pLM->params.l2_tx_coal_buf_cnt[LM_CLI_IDX_NDIS] = 0;
192
193 BnxeInitBdCnts(pUM, LM_CLI_IDX_NDIS);
194
195 pLM->params.l2_rx_desc_cnt[LM_CLI_IDX_FWD] = 0;
196 pLM->params.l2_tx_bd_page_cnt[LM_CLI_IDX_FWD] = 0;
197 pLM->params.l2_tx_coal_buf_cnt[LM_CLI_IDX_FWD] = 0;
198
199 pLM->params.l2_rx_desc_cnt[LM_CLI_IDX_ISCSI] = 0;
200 pLM->params.l2_tx_bd_page_cnt[LM_CLI_IDX_ISCSI] = 0;
201 pLM->params.l2_tx_coal_buf_cnt[LM_CLI_IDX_ISCSI] = 0;
202
203 pLM->params.l2_rx_desc_cnt[LM_CLI_IDX_FCOE] = 0;
204 pLM->params.l2_tx_bd_page_cnt[LM_CLI_IDX_FCOE] = 0;
205 pLM->params.l2_tx_coal_buf_cnt[LM_CLI_IDX_FCOE] = 0;
206
207 pLM->params.max_func_toe_cons = 0;
208 pLM->params.max_func_iscsi_cons = 0;
209 pLM->params.max_func_rdma_cons = 0;
210 pLM->params.max_func_fcoe_cons = pUM->lm_dev.hw_info.max_port_fcoe_conn;
211 pLM->params.max_func_connections =
212 log2_align(pLM->params.max_func_toe_cons +
213 pLM->params.max_func_rdma_cons +
214 pLM->params.max_func_iscsi_cons +
215 pLM->params.max_func_fcoe_cons +
216 MAX_ETH_CONS);
217
218 /* determine: 1. itl_client_page_size, #context in page*/
219
220 /* based on PCIe block INIT document */
221
222 /* We now need to calculate the page size based on the maximum number of
223 * connections supported. Since this property is identical to all ports, and
224 * is configured in COMMON registers, we need to use the maximum number of
225 * connections in all ports. */
226
227 /* The L2P table is used to map logical addresses to physical ones. There
228 * are four clients that use this table. We want to use only the ILT
229 * (Internal), we need to calculate the total size required for all clients,
230 * divide it by the number of entries in the ILT table and that will give us
231 * the page size we want. The following table describes the needs of each of
232 * these clients:
233 *
234 * HW block(L2P client) Area name Size [B]
235 * Searcher T1 ROUNDUP(LOG2(N)) * 64
236 * Timers Linear Array N * 8
237 * QM Queues N * 32 * 4
238 * CDU Context N * S + W * ROUNDUP (N/m) (W=0)
239 *
240 * N: Number of connections
241 * S: Context Size
242 * W: Block Waste (not really interesting) we configure the context size to
243 * be a power of 2.
244 * m: Number of cids in a block (not really interesting, since W will always
245 * be 0)
246 */
247 total_size = (pLM->hw_info.max_common_conns *
248 (SEARCHER_TOTAL_MEM_REQUIRED_PER_CON +
249 TIMERS_TOTAL_MEM_REQUIRED_PER_CON +
250 QM_TOTAL_MEM_REQUIRED_PER_CON +
251 pLM->params.context_line_size));
252
253 required_page_size = (total_size / ILT_NUM_PAGE_ENTRIES_PER_FUNC);
254 required_page_size = (2 << LOG2(required_page_size));
255
256 if (required_page_size < LM_PAGE_SIZE)
257 {
258 required_page_size = LM_PAGE_SIZE;
259 }
260
261 pLM->params.ilt_client_page_size = required_page_size;
262 pLM->params.num_context_in_page = (pLM->params.ilt_client_page_size /
263 pLM->params.context_line_size);
264
265 if (pUM->devParams.intrCoalesce)
266 {
267 pLM->params.int_coalesing_mode = LM_INT_COAL_PERIODIC_SYNC;
268 pLM->params.int_per_sec_rx_override = pUM->devParams.intrRxPerSec;
269 pLM->params.int_per_sec_tx_override = pUM->devParams.intrTxPerSec;
270 }
271 else
272 {
273 pLM->params.int_coalesing_mode = LM_INT_COAL_NONE;
274 }
275
276 pLM->params.enable_dynamic_hc[0] = 0;
277 pLM->params.enable_dynamic_hc[1] = 0;
278 pLM->params.enable_dynamic_hc[2] = 0;
279 pLM->params.enable_dynamic_hc[3] = 0;
280
281 /*
282 * l2_fw_flow_ctrl is read from the shmem in MF mode in E2 and above. In
283 * all other cases this parameter is read from the driver conf. We also
284 * read this parameter from the driver conf in E1.5 MF mode since 57711
285 * boot code does not have the struct func_ext_cfg.
286 */
287 if (((pLM->hw_info.mf_info.mf_mode != MULTI_FUNCTION_SI) &&
288 (pLM->hw_info.mf_info.mf_mode != MULTI_FUNCTION_AFEX)) ||
289 (CHIP_IS_E1x(pLM)))
290 {
291 pLM->params.l2_fw_flow_ctrl = (pUM->devParams.l2_fw_flow_ctrl) ? 1 : 0;
292 }
293
294 pLM->params.rcv_buffer_offset = BNXE_DMA_RX_OFFSET;
295
296 pLM->params.debug_cap_flags = DEFAULT_DEBUG_CAP_FLAGS_VAL;
297
298 pLM->params.max_fcoe_task = lm_fc_max_fcoe_task_sup(pLM);
299
300 /* enable rate shaping */
301 pLM->params.cmng_enable = 1;
302
303 pLM->params.validate_sq_complete = 1;
304
305 return LM_STATUS_SUCCESS;
306 }
307
308
BnxeIsBarUsed(um_device_t * pUM,int regNumber,offset_t offset,u32_t size)309 static boolean_t BnxeIsBarUsed(um_device_t * pUM,
310 int regNumber,
311 offset_t offset,
312 u32_t size)
313 {
314 BnxeMemRegion * pMem;
315
316 BNXE_LOCK_ENTER_MEM(pUM);
317
318 pMem = (BnxeMemRegion *)d_list_peek_head(&pUM->memRegionList);
319
320 while (pMem)
321 {
322 if ((pMem->regNumber == regNumber) &&
323 (pMem->offset == offset) &&
324 (pMem->size == size))
325 {
326 BNXE_LOCK_EXIT_MEM(pUM);
327 return B_TRUE;
328 }
329
330 pMem = (BnxeMemRegion *)d_list_next_entry(D_LINK_CAST(pMem));
331 }
332
333 BNXE_LOCK_EXIT_MEM(pUM);
334 return B_FALSE;
335 }
336
337
mm_map_io_base(lm_device_t * pLM,lm_address_t baseAddr,u32_t size,u8_t bar)338 void * mm_map_io_base(lm_device_t * pLM,
339 lm_address_t baseAddr,
340 u32_t size,
341 u8_t bar)
342 {
343 um_device_t * pUM = (um_device_t *)pLM;
344 BnxeMemRegion * pMem;
345 //int numRegs;
346 off_t regSize;
347 int rc;
348
349 /*
350 * Solaris identifies:
351 * BAR 0 - size 0 (pci config regs?)
352 * BAR 1 - size 0x800000 (Everest 1/2 LM BAR 0)
353 * BAR 2 - size 0x4000000 (Everest 1 LM BAR 1)
354 * 0x800000 (Everest 2 LM BAR 1)
355 * BAR 3 - size 0x10000 (Everest 2 LM BAR 2)
356 */
357 bar++;
358
359 //ddi_dev_nregs(pUM->pDev, &numRegs);
360
361 ddi_dev_regsize(pUM->pDev, bar, ®Size);
362
363 if ((size > regSize) || BnxeIsBarUsed(pUM, bar, 0, size))
364 {
365 BnxeLogWarn(pUM, "BAR %d at offset %d and size %d is already being used!",
366 bar, 0, (int)regSize);
367 return NULL;
368 }
369
370 if ((pMem = kmem_zalloc(sizeof(BnxeMemRegion), KM_NOSLEEP)) == NULL)
371 {
372 BnxeLogWarn(pUM, "Memory allocation for BAR %d at offset %d and size %d failed!",
373 bar, 0, (int)regSize);
374 return NULL;
375 }
376
377 if ((rc = ddi_regs_map_setup(pUM->pDev,
378 bar, // bar number
379 &pMem->pRegAddr,
380 0, // region map offset,
381 size, // region memory window size (0=all)
382 &bnxeAccessAttribBAR,
383 &pMem->regAccess)) != DDI_SUCCESS)
384 {
385 BnxeLogWarn(pUM, "Failed to memory map device (BAR=%d, offset=%d, size=%d) (%d)",
386 bar, 0, size, rc);
387 kmem_free(pMem, sizeof(BnxeMemRegion));
388 return NULL;
389 }
390
391 pMem->baseAddr = baseAddr;
392 pMem->regNumber = bar;
393 pMem->offset = 0;
394 pMem->size = size;
395
396 BNXE_LOCK_ENTER_MEM(pUM);
397 d_list_push_head(&pUM->memRegionList, D_LINK_CAST(pMem));
398 BNXE_LOCK_EXIT_MEM(pUM);
399
400 bar--;
401 pLM->vars.reg_handle[bar] = pMem->regAccess;
402
403 return pMem->pRegAddr;
404 }
405
406
mm_map_io_space_solaris(lm_device_t * pLM,lm_address_t physAddr,u8_t bar,u32_t offset,u32_t size,ddi_acc_handle_t * pRegAccHandle)407 void * mm_map_io_space_solaris(lm_device_t * pLM,
408 lm_address_t physAddr,
409 u8_t bar,
410 u32_t offset,
411 u32_t size,
412 ddi_acc_handle_t * pRegAccHandle)
413 {
414 um_device_t * pUM = (um_device_t *)pLM;
415 BnxeMemRegion * pMem;
416 off_t regSize;
417 int rc;
418
419 /* see bar mapping described in mm_map_io_base above */
420 bar++;
421
422 ddi_dev_regsize(pUM->pDev, bar, ®Size);
423
424 if ((size > regSize) || BnxeIsBarUsed(pUM, bar, offset, size))
425 {
426 BnxeLogWarn(pUM, "BAR %d at offset %d and size %d is already being used!",
427 bar, offset, (int)regSize);
428 return NULL;
429 }
430
431 if ((pMem = kmem_zalloc(sizeof(BnxeMemRegion), KM_NOSLEEP)) == NULL)
432 {
433 BnxeLogWarn(pUM, "Memory allocation for BAR %d at offset %d and size %d failed!",
434 bar, offset, (int)regSize);
435 return NULL;
436 }
437
438 if ((rc = ddi_regs_map_setup(pUM->pDev,
439 bar, // bar number
440 &pMem->pRegAddr,
441 offset, // region map offset,
442 size, // region memory window size (0=all)
443 &bnxeAccessAttribBAR,
444 pRegAccHandle)) != DDI_SUCCESS)
445 {
446 BnxeLogWarn(pUM, "Failed to memory map device (BAR=%d, offset=%d, size=%d) (%d)",
447 bar, offset, size, rc);
448 kmem_free(pMem, sizeof(BnxeMemRegion));
449 return NULL;
450 }
451
452 pMem->baseAddr = physAddr;
453 pMem->regNumber = bar;
454 pMem->offset = offset;
455 pMem->size = size;
456 pMem->regAccess = *pRegAccHandle;
457
458 BNXE_LOCK_ENTER_MEM(pUM);
459 d_list_push_head(&pUM->memRegionList, D_LINK_CAST(pMem));
460 BNXE_LOCK_EXIT_MEM(pUM);
461
462 return pMem->pRegAddr;
463 }
464
465
mm_unmap_io_space(lm_device_t * pLM,void * pVirtAddr,u32_t size)466 void mm_unmap_io_space(lm_device_t * pLM,
467 void * pVirtAddr,
468 u32_t size)
469 {
470 um_device_t * pUM = (um_device_t *)pLM;
471 BnxeMemRegion * pMemRegion;
472
473 BNXE_LOCK_ENTER_MEM(pUM);
474
475 pMemRegion = (BnxeMemRegion *)d_list_peek_head(&pUM->memRegionList);
476
477 while (pMemRegion)
478 {
479 if ((pMemRegion->pRegAddr == pVirtAddr) &&
480 (pMemRegion->size == size))
481 {
482 d_list_remove_entry(&pUM->memRegionList, D_LINK_CAST(pMemRegion));
483 ddi_regs_map_free(&pMemRegion->regAccess);
484 kmem_free(pMemRegion, sizeof(BnxeMemRegion));
485 break;
486 }
487
488 pMemRegion = (BnxeMemRegion *)d_list_next_entry(D_LINK_CAST(pMemRegion));
489 }
490
491 BNXE_LOCK_EXIT_MEM(pUM);
492 }
493
494
mm_alloc_mem_imp(lm_device_t * pLM,u32_t memSize,const char * sz_file,const unsigned long line,u8_t cli_idx)495 void * mm_alloc_mem_imp(lm_device_t * pLM,
496 u32_t memSize,
497 const char * sz_file,
498 const unsigned long line,
499 u8_t cli_idx)
500 {
501 um_device_t * pUM = (um_device_t *)pLM;
502 BnxeMemBlock * pMem;
503 void * pBuf;
504 u32_t * pTmp;
505 int i;
506
507 (void)cli_idx;
508
509 if ((pMem = kmem_zalloc(sizeof(BnxeMemBlock), KM_NOSLEEP)) == NULL)
510 {
511 return NULL;
512 }
513
514 /* allocated space for header/trailer checks */
515 memSize += (BNXE_MEM_CHECK_LEN * 2);
516
517 MEM_LOG(pUM, "*** MEM: %8u", memSize);
518
519 if ((pBuf = kmem_zalloc(memSize, KM_NOSLEEP)) == NULL)
520 {
521 BnxeLogWarn(pUM, "Failed to allocate memory");
522 kmem_free(pMem, sizeof(BnxeMemBlock));
523 return NULL;
524 }
525
526 /* fill in the header check */
527 for (i = 0, pTmp = (u32_t *)pBuf;
528 i < BNXE_MEM_CHECK_LEN;
529 i += 4, pTmp++)
530 {
531 *pTmp = BNXE_MAGIC;
532 }
533
534 /* fill in the trailer check */
535 for (i = 0, pTmp = (u32_t *)((char *)pBuf + memSize - BNXE_MEM_CHECK_LEN);
536 i < BNXE_MEM_CHECK_LEN;
537 i += 4, pTmp++)
538 {
539 *pTmp = BNXE_MAGIC;
540 }
541
542 pMem->size = memSize;
543 pMem->pBuf = pBuf;
544 snprintf(pMem->fileName, sizeof(pMem->fileName), "%s", sz_file);
545 pMem->fileLine = line;
546
547 BNXE_LOCK_ENTER_MEM(pUM);
548 d_list_push_head(&pUM->memBlockList, D_LINK_CAST(pMem));
549 BNXE_LOCK_EXIT_MEM(pUM);
550
551 MEM_LOG(pUM, "Allocated %d byte block virt:%p",
552 memSize, ((char *)pBuf + BNXE_MEM_CHECK_LEN));
553
554 return ((char *)pBuf + BNXE_MEM_CHECK_LEN);
555 }
556
557
mm_alloc_phys_mem_align_imp(lm_device_t * pLM,u32_t memSize,lm_address_t * pPhysAddr,u32_t alignment,u8_t memType,const char * sz_file,const unsigned long line,u8_t cli_idx)558 void * mm_alloc_phys_mem_align_imp(lm_device_t * pLM,
559 u32_t memSize,
560 lm_address_t * pPhysAddr,
561 u32_t alignment,
562 u8_t memType,
563 const char * sz_file,
564 const unsigned long line,
565 u8_t cli_idx)
566 {
567 um_device_t * pUM = (um_device_t *)pLM;
568 int rc;
569 caddr_t pBuf;
570 size_t length;
571 unsigned int count;
572 ddi_dma_attr_t dmaAttrib;
573 ddi_dma_handle_t * pDmaHandle;
574 ddi_acc_handle_t * pDmaAccHandle;
575 ddi_dma_cookie_t cookie;
576 BnxeMemDma * pMem;
577 size_t size;
578
579 (void)memType;
580 (void)cli_idx;
581
582 if (memSize == 0)
583 {
584 return NULL;
585 }
586
587 if ((pMem = kmem_zalloc(sizeof(BnxeMemDma), KM_NOSLEEP)) == NULL)
588 {
589 return NULL;
590 }
591
592 dmaAttrib = bnxeDmaPageAttrib;
593 dmaAttrib.dma_attr_align = alignment;
594
595 pDmaHandle = &pMem->dmaHandle;
596 pDmaAccHandle = &pMem->dmaAccHandle;
597
598 size = memSize;
599 size += (alignment - 1);
600 size &= ~((u32_t)(alignment - 1));
601
602 MEM_LOG(pUM, "*** DMA: %8u (%4d) - %8u", memSize, alignment, size);
603
604 if ((rc = ddi_dma_alloc_handle(pUM->pDev,
605 &dmaAttrib,
606 DDI_DMA_DONTWAIT,
607 (void *)0,
608 pDmaHandle)) != DDI_SUCCESS)
609 {
610 BnxeLogWarn(pUM, "Failed to alloc DMA handle");
611 kmem_free(pMem, sizeof(BnxeMemDma));
612 return NULL;
613 }
614
615 if ((rc = ddi_dma_mem_alloc(*pDmaHandle,
616 size,
617 &bnxeAccessAttribBUF,
618 DDI_DMA_CONSISTENT,
619 DDI_DMA_DONTWAIT,
620 (void *)0,
621 &pBuf,
622 &length,
623 pDmaAccHandle)) != DDI_SUCCESS)
624 {
625 BnxeLogWarn(pUM, "Failed to alloc DMA memory");
626 ddi_dma_free_handle(pDmaHandle);
627 kmem_free(pMem, sizeof(BnxeMemDma));
628 return NULL;
629 }
630
631 if ((rc = ddi_dma_addr_bind_handle(*pDmaHandle,
632 (struct as *)0,
633 pBuf,
634 length,
635 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
636 DDI_DMA_DONTWAIT,
637 (void *)0,
638 &cookie,
639 &count)) != DDI_DMA_MAPPED)
640 {
641 BnxeLogWarn(pUM, "Failed to bind DMA address");
642 ddi_dma_mem_free(pDmaAccHandle);
643 ddi_dma_free_handle(pDmaHandle);
644 kmem_free(pMem, sizeof(BnxeMemDma));
645 return NULL;
646 }
647
648 pPhysAddr->as_u64 = cookie.dmac_laddress;
649
650 /* save the virtual memory address so we can get the dma_handle later */
651 pMem->size = memSize;
652 pMem->pDmaVirt = pBuf;
653 pMem->physAddr = *pPhysAddr;
654 snprintf(pMem->fileName, sizeof(pMem->fileName), "%s", sz_file);
655 pMem->fileLine = line;
656
657 #if 0
658 MEM_LOG(pUM, "*** DMA: virt %p / phys 0x%0llx (%d/%d)",
659 pBuf, pPhysAddr->as_u64,
660 (!((u32_t)pBuf % (u32_t)alignment)) ? 1 : 0,
661 (!((u32_t)pPhysAddr->as_ptr % (u32_t)alignment) ? 1 : 0));
662 #endif
663
664 BNXE_LOCK_ENTER_MEM(pUM);
665 d_list_push_head(&pUM->memDmaList, D_LINK_CAST(pMem));
666 BNXE_LOCK_EXIT_MEM(pUM);
667
668 MEM_LOG(pUM, "Allocated %d sized DMA block phys:%p virt:%p",
669 memSize, pMem->physAddr.as_ptr, pMem->pDmaVirt);
670
671 /* Zero memory! */
672 bzero(pBuf, length);
673
674 /* make sure the new contents are flushed back to main memory */
675 ddi_dma_sync(*pDmaHandle, 0, length, DDI_DMA_SYNC_FORDEV);
676
677 return pBuf;
678 }
679
680
mm_alloc_phys_mem_imp(lm_device_t * pLM,u32_t memSize,lm_address_t * pPhysAddr,u8_t memType,const char * sz_file,const unsigned long line,u8_t cli_idx)681 void * mm_alloc_phys_mem_imp(lm_device_t * pLM,
682 u32_t memSize,
683 lm_address_t * pPhysAddr,
684 u8_t memType,
685 const char * sz_file,
686 const unsigned long line,
687 u8_t cli_idx)
688 {
689 return mm_alloc_phys_mem_align_imp(pLM, memSize, pPhysAddr,
690 BNXE_DMA_ALIGNMENT, memType,
691 sz_file, line, cli_idx);
692 }
693
694
mm_rt_alloc_mem_imp(lm_device_t * pDev,u32_t memSize,const char * sz_file,const unsigned long line,u8_t cli_idx)695 void * mm_rt_alloc_mem_imp(lm_device_t * pDev,
696 u32_t memSize,
697 const char * sz_file,
698 const unsigned long line,
699 u8_t cli_idx)
700 {
701 return mm_alloc_mem_imp(pDev, memSize, sz_file, line, cli_idx);
702 }
703
704
mm_rt_alloc_phys_mem_imp(lm_device_t * pDev,u32_t memSize,lm_address_t * pPhysAddr,u8_t flushType,const char * sz_file,const unsigned long line,u8_t cli_idx)705 void * mm_rt_alloc_phys_mem_imp(lm_device_t * pDev,
706 u32_t memSize,
707 lm_address_t * pPhysAddr,
708 u8_t flushType,
709 const char * sz_file,
710 const unsigned long line,
711 u8_t cli_idx)
712 {
713 return mm_alloc_phys_mem_imp(pDev, memSize, pPhysAddr, flushType,
714 sz_file, line, cli_idx);
715 }
716
717
mm_get_current_time(lm_device_t * pDev)718 u64_t mm_get_current_time(lm_device_t * pDev)
719 {
720 um_device_t * pUM = (um_device_t *)pDev;
721 BnxeDbgBreakMsg(pUM, "MM_GET_CURRENT_TIME");
722 return 0;
723 }
724
725
mm_rt_free_mem(lm_device_t * pDev,void * pBuf,u32_t memSize,u8_t cli_idx)726 void mm_rt_free_mem(lm_device_t * pDev,
727 void * pBuf,
728 u32_t memSize,
729 u8_t cli_idx)
730 {
731 um_device_t * pUM = (um_device_t *)pDev;
732 BnxeMemBlock * pMem;
733 u32_t * pTmp;
734 int i;
735
736 (void)cli_idx;
737
738 BNXE_LOCK_ENTER_MEM(pUM);
739
740 pMem = (BnxeMemBlock *)d_list_peek_head(&pUM->memBlockList);
741
742 /* adjuest for header/trailer checks */
743 pBuf = ((char *)pBuf - BNXE_MEM_CHECK_LEN);
744 memSize += (BNXE_MEM_CHECK_LEN * 2);
745
746 /* verify header check */
747 for (i = 0, pTmp = (u32_t *)pBuf;
748 i < BNXE_MEM_CHECK_LEN;
749 i += 4, pTmp++)
750 {
751 if (*pTmp != BNXE_MAGIC)
752 {
753 BnxeLogWarn(pUM, "Header overflow! (%p/%u)", pBuf, memSize);
754 BnxeDbgBreak(pUM);
755 }
756 }
757
758 /* verify trailer check */
759 for (i = 0, pTmp = (u32_t *)((char *)pBuf + memSize - BNXE_MEM_CHECK_LEN);
760 i < BNXE_MEM_CHECK_LEN;
761 i += 4, pTmp++)
762 {
763 if (*pTmp != BNXE_MAGIC)
764 {
765 BnxeLogWarn(pUM, "Trailer overflow! (%p/%u)", pBuf, memSize);
766 BnxeDbgBreak(pUM);
767 }
768 }
769
770 while (pMem)
771 {
772 if (pBuf == pMem->pBuf)
773 {
774 if (memSize != pMem->size)
775 {
776 /* Uh-Oh! */
777 BnxeLogWarn(pUM, "Attempt to free memory block with invalid size (%d/%d)",
778 memSize, pMem->size);
779 BnxeDbgBreak(pUM);
780
781 BNXE_LOCK_EXIT_MEM(pUM);
782 return;
783 }
784
785 d_list_remove_entry(&pUM->memBlockList, D_LINK_CAST(pMem));
786
787 kmem_free(pBuf, memSize);
788 kmem_free(pMem, sizeof(BnxeMemBlock));
789
790 BNXE_LOCK_EXIT_MEM(pUM);
791 return;
792 }
793
794 pMem = (BnxeMemBlock *)d_list_next_entry(D_LINK_CAST(pMem));
795 }
796
797 BNXE_LOCK_EXIT_MEM(pUM);
798 }
799
800
mm_rt_free_phys_mem(lm_device_t * pDev,u32_t memSize,void * pBuf,lm_address_t pPhysAddr,u8_t cli_idx)801 void mm_rt_free_phys_mem(lm_device_t * pDev,
802 u32_t memSize,
803 void * pBuf,
804 lm_address_t pPhysAddr,
805 u8_t cli_idx)
806 {
807 um_device_t * pUM = (um_device_t *)pDev;
808 BnxeMemDma * pMem;
809
810 (void)pPhysAddr;
811 (void)cli_idx;
812
813 BNXE_LOCK_ENTER_MEM(pUM);
814
815 pMem = (BnxeMemDma *)d_list_peek_head(&pUM->memDmaList);
816
817 while (pMem)
818 {
819 if (pBuf == pMem->pDmaVirt)
820 {
821 if (memSize != pMem->size)
822 {
823 /* Uh-Oh! */
824 BnxeLogWarn(pUM, "Attempt to free DMA memory with invalid size (%d/%d)",
825 memSize, pMem->size);
826 BnxeDbgBreak(pUM);
827
828 BNXE_LOCK_EXIT_MEM(pUM);
829 return;
830 }
831
832 d_list_remove_entry(&pUM->memDmaList, D_LINK_CAST(pMem));
833
834 ddi_dma_unbind_handle(pMem->dmaHandle);
835 ddi_dma_mem_free(&pMem->dmaAccHandle);
836 ddi_dma_free_handle(&pMem->dmaHandle);
837 kmem_free(pMem, sizeof(BnxeMemDma));
838
839 BNXE_LOCK_EXIT_MEM(pUM);
840 return;
841 }
842
843 pMem = (BnxeMemDma *)d_list_next_entry(D_LINK_CAST(pMem));
844 }
845
846 BNXE_LOCK_EXIT_MEM(pUM);
847 }
848
849
mm_memset(void * pBuf,u8_t val,u32_t memSize)850 void mm_memset(void * pBuf,
851 u8_t val,
852 u32_t memSize)
853 {
854 memset(pBuf, val, memSize);
855 }
856
857
mm_memcpy(void * pDest,const void * pSrc,u32_t memSize)858 void mm_memcpy(void * pDest,
859 const void * pSrc,
860 u32_t memSize)
861 {
862 memcpy(pDest, pSrc, memSize);
863 }
864
865
mm_memcmp(void * pBuf1,void * pBuf2,u32_t count)866 u8_t mm_memcmp(void * pBuf1,
867 void * pBuf2,
868 u32_t count)
869 {
870 return (memcmp(pBuf1, pBuf2, count) == 0) ? 1 : 0;
871 }
872
873
mm_indicate_tx(lm_device_t * pLM,u32_t idx,s_list_t * packet_list)874 void mm_indicate_tx(lm_device_t * pLM,
875 u32_t idx,
876 s_list_t * packet_list)
877 {
878 BnxeTxPktsReclaim((um_device_t *)pLM, idx, packet_list);
879 }
880
881
mm_set_done(lm_device_t * pDev,u32_t cid,void * cookie)882 void mm_set_done(lm_device_t * pDev,
883 u32_t cid,
884 void * cookie)
885 {
886 #if 0
887 um_device_t * pUM = (um_device_t *)pDev;
888 BnxeLogInfo(pUM, "RAMROD on cid %d cmd is done", cid);
889 #else
890 (void)pDev;
891 (void)cid;
892 #endif
893 }
894
895
mm_return_sq_pending_command(lm_device_t * pDev,struct sq_pending_command * pPending)896 void mm_return_sq_pending_command(lm_device_t * pDev,
897 struct sq_pending_command * pPending)
898 {
899 /* XXX probably need a memory pool to pull from... */
900 mm_rt_free_mem(pDev, pPending, sizeof(struct sq_pending_command),
901 LM_CLI_IDX_NDIS);
902 }
903
904
mm_get_sq_pending_command(lm_device_t * pDev)905 struct sq_pending_command * mm_get_sq_pending_command(lm_device_t * pDev)
906 {
907 /* XXX probably need a memory pool to pull from... */
908 return mm_rt_alloc_mem(pDev, sizeof(struct sq_pending_command),
909 LM_CLI_IDX_NDIS);
910 }
911
912
mm_copy_packet_buf(lm_device_t * pDev,lm_packet_t * pLMPkt,u8_t * pMemBuf,u32_t size)913 u32_t mm_copy_packet_buf(lm_device_t * pDev,
914 lm_packet_t * pLMPkt,
915 u8_t * pMemBuf,
916 u32_t size)
917 {
918 //um_device_t * pUM = (um_device_t *)pDev;
919 um_txpacket_t * pTxPkt = (um_txpacket_t *)pLMPkt;
920 mblk_t * pMblk;
921 u32_t copied;
922 u32_t mblkDataLen;
923 u32_t toCopy;
924
925 pMblk = pTxPkt->pMblk;
926 copied = 0;
927
928 while (size && pMblk)
929 {
930 mblkDataLen = (pMblk->b_wptr - pMblk->b_rptr);
931 toCopy = (mblkDataLen <= size) ? mblkDataLen : size;
932
933 bcopy(pMblk->b_rptr, pMemBuf, toCopy);
934
935 pMemBuf += toCopy;
936 copied += toCopy;
937 size -= toCopy;
938
939 pMblk = pMblk->b_cont;
940 }
941
942 return copied;
943 }
944
945
mm_fan_failure(lm_device_t * pDev)946 lm_status_t mm_fan_failure(lm_device_t * pDev)
947 {
948 um_device_t * pUM = (um_device_t *)pDev;
949 BnxeLogWarn(pUM, "FAN FAILURE!");
950 return LM_STATUS_SUCCESS;
951 }
952
953
BnxeLinkStatus(um_device_t * pUM,lm_status_t link,lm_medium_t medium)954 static void BnxeLinkStatus(um_device_t * pUM,
955 lm_status_t link,
956 lm_medium_t medium)
957 {
958 #define TBUF_SIZE 64
959 char tbuf[TBUF_SIZE];
960 char * pDuplex;
961 char * pRxFlow;
962 char * pTxFlow;
963 char * pSpeed;
964
965 if (link != LM_STATUS_LINK_ACTIVE)
966 {
967 /* reset the link status */
968 pUM->props.link_speed = 0;
969 pUM->props.link_duplex = B_FALSE;
970 pUM->props.link_txpause = B_FALSE;
971 pUM->props.link_rxpause = B_FALSE;
972 pUM->props.uptime = 0;
973
974 /* reset the link partner status */
975 pUM->remote.link_autoneg = B_FALSE;
976 pUM->remote.param_20000fdx = B_FALSE;
977 pUM->remote.param_10000fdx = B_FALSE;
978 pUM->remote.param_2500fdx = B_FALSE;
979 pUM->remote.param_1000fdx = B_FALSE;
980 pUM->remote.param_100fdx = B_FALSE;
981 pUM->remote.param_100hdx = B_FALSE;
982 pUM->remote.param_10fdx = B_FALSE;
983 pUM->remote.param_10hdx = B_FALSE;
984 pUM->remote.param_txpause = B_FALSE;
985 pUM->remote.param_rxpause = B_FALSE;
986
987 BnxeLogInfo(pUM, "Link Down");
988 return;
989 }
990
991 pUM->props.uptime = ddi_get_time();
992
993 if (GET_MEDIUM_DUPLEX(medium) == LM_MEDIUM_HALF_DUPLEX)
994 {
995 pDuplex = "Half";
996 pUM->props.link_duplex = B_FALSE;
997 }
998 else
999 {
1000 pDuplex = "Full";
1001 pUM->props.link_duplex = B_TRUE;
1002 }
1003
1004 if (pUM->lm_dev.vars.flow_control & LM_FLOW_CONTROL_RECEIVE_PAUSE)
1005 {
1006 pRxFlow = "ON";
1007 pUM->props.link_rxpause = B_TRUE;
1008 }
1009 else
1010 {
1011 pRxFlow = "OFF";
1012 pUM->props.link_rxpause = B_FALSE;
1013 }
1014
1015 if (pUM->lm_dev.vars.flow_control & LM_FLOW_CONTROL_TRANSMIT_PAUSE)
1016 {
1017 pTxFlow = "ON";
1018 pUM->props.link_txpause = B_TRUE;
1019 }
1020 else
1021 {
1022 pTxFlow = "OFF";
1023 pUM->props.link_txpause = B_FALSE;
1024 }
1025
1026 #if 0
1027 if (pUM->curcfg.lnkcfg.link_autoneg == B_TRUE)
1028 {
1029 BnxeUpdateLpCap(pUM);
1030 }
1031 #endif
1032
1033 switch (GET_MEDIUM_SPEED(medium))
1034 {
1035 case LM_MEDIUM_SPEED_10MBPS:
1036
1037 pUM->props.link_speed = 10;
1038 pSpeed = "10Mb";
1039 break;
1040
1041 case LM_MEDIUM_SPEED_100MBPS:
1042
1043 pUM->props.link_speed = 100;
1044 pSpeed = "100Mb";
1045 break;
1046
1047 case LM_MEDIUM_SPEED_1000MBPS:
1048
1049 pUM->props.link_speed = 1000;
1050 pSpeed = "1Gb";
1051 break;
1052
1053 case LM_MEDIUM_SPEED_2500MBPS:
1054
1055 pUM->props.link_speed = 2500;
1056 pSpeed = "2.5Gb";
1057 break;
1058
1059 case LM_MEDIUM_SPEED_10GBPS:
1060
1061 pUM->props.link_speed = 10000;
1062 pSpeed = "10Gb";
1063 break;
1064
1065 case LM_MEDIUM_SPEED_12GBPS:
1066
1067 pUM->props.link_speed = 12000;
1068 pSpeed = "12Gb";
1069 break;
1070
1071 case LM_MEDIUM_SPEED_12_5GBPS:
1072
1073 pUM->props.link_speed = 12500;
1074 pSpeed = "12.5Gb";
1075 break;
1076
1077 case LM_MEDIUM_SPEED_13GBPS:
1078
1079 pUM->props.link_speed = 13000;
1080 pSpeed = "13Gb";
1081 break;
1082
1083 case LM_MEDIUM_SPEED_15GBPS:
1084
1085 pUM->props.link_speed = 15000;
1086 pSpeed = "15Gb";
1087 break;
1088
1089 case LM_MEDIUM_SPEED_16GBPS:
1090
1091 pUM->props.link_speed = 16000;
1092 pSpeed = "16Gb";
1093 break;
1094
1095 case LM_MEDIUM_SPEED_20GBPS:
1096
1097 pUM->props.link_speed = 20000;
1098 pSpeed = "20Gb";
1099 break;
1100
1101 default:
1102
1103 if ((GET_MEDIUM_SPEED(medium) >= LM_MEDIUM_SPEED_SEQ_START) &&
1104 (GET_MEDIUM_SPEED(medium) <= LM_MEDIUM_SPEED_SEQ_END))
1105 {
1106 pUM->props.link_speed = (((GET_MEDIUM_SPEED(medium) >> 8) -
1107 (LM_MEDIUM_SPEED_SEQ_START >> 8) +
1108 1) * 100);
1109 snprintf(tbuf, TBUF_SIZE, "%u", pUM->props.link_speed);
1110 pSpeed = tbuf;
1111 break;
1112 }
1113
1114 pUM->props.link_speed = 0;
1115 pSpeed = "";
1116
1117 break;
1118 }
1119
1120 if (*pSpeed == 0)
1121 {
1122 BnxeLogInfo(pUM, "%s Duplex Rx Flow %s Tx Flow %s Link Up",
1123 pDuplex, pRxFlow, pTxFlow);
1124 }
1125 else
1126 {
1127 BnxeLogInfo(pUM, "%s %s Duplex Rx Flow %s Tx Flow %s Link Up",
1128 pSpeed, pDuplex, pRxFlow, pTxFlow);
1129 }
1130 }
1131
1132
mm_indicate_link(lm_device_t * pLM,lm_status_t link,lm_medium_t medium)1133 void mm_indicate_link(lm_device_t * pLM,
1134 lm_status_t link,
1135 lm_medium_t medium)
1136 {
1137 um_device_t * pUM = (um_device_t *)pLM;
1138
1139 /* ignore link status if it has not changed since the last indicate */
1140 if ((pUM->devParams.lastIndLink == link) &&
1141 (pUM->devParams.lastIndMedium == medium))
1142 {
1143 return;
1144 }
1145
1146 pUM->devParams.lastIndLink = link;
1147 pUM->devParams.lastIndMedium = medium;
1148
1149 BnxeLinkStatus(pUM, link, medium);
1150
1151 if (CLIENT_BOUND(pUM, LM_CLI_IDX_NDIS))
1152 {
1153 BnxeGldLink(pUM, (link == LM_STATUS_LINK_ACTIVE) ?
1154 LINK_STATE_UP : LINK_STATE_DOWN);
1155 }
1156
1157 if (CLIENT_BOUND(pUM, LM_CLI_IDX_FCOE))
1158 {
1159 if (pUM->fcoe.pDev == NULL)
1160 {
1161 BnxeLogWarn(pUM, "FCoE Client bound and pDev is NULL (LINK STATUS failed!) %s@%s",
1162 BNXEF_NAME, ddi_get_name_addr(pUM->pDev));
1163 }
1164 else if (pUM->fcoe.bind.cliCtl == NULL)
1165 {
1166 BnxeLogWarn(pUM, "FCoE Client bound and cliCtl is NULL (LINK STATUS failed!) %s@%s",
1167 BNXEF_NAME, ddi_get_name_addr(pUM->pDev));
1168 }
1169 else
1170 {
1171 pUM->fcoe.bind.cliCtl(pUM->fcoe.pDev,
1172 (link == LM_STATUS_LINK_ACTIVE) ?
1173 CLI_CTL_LINK_UP : CLI_CTL_LINK_DOWN,
1174 NULL,
1175 0);
1176 }
1177 }
1178 }
1179
1180
mm_schedule_task(lm_device_t * pDev,u32_t delay_ms,lm_task_cb_t task,void * param)1181 lm_status_t mm_schedule_task(lm_device_t * pDev,
1182 u32_t delay_ms,
1183 lm_task_cb_t task,
1184 void * param)
1185 {
1186 um_device_t * pUM = (um_device_t *)pDev;
1187
1188 BnxeWorkQueueAddDelayNoCopy(pUM, (void (*)(um_device_t *, void *))task, param, delay_ms);
1189
1190 return LM_STATUS_SUCCESS;
1191 }
1192
1193
mm_register_lpme(lm_device_t * pDev,lm_generic_workitem_function * func,u8_t b_fw_access,u8_t b_queue_for_fw)1194 lm_status_t mm_register_lpme(lm_device_t * pDev,
1195 lm_generic_workitem_function * func,
1196 u8_t b_fw_access,
1197 u8_t b_queue_for_fw)
1198 {
1199 um_device_t * pUM = (um_device_t *)pDev;
1200
1201 (void)b_fw_access;
1202 (void)b_queue_for_fw;
1203
1204 BnxeWorkQueueAddGeneric(pUM, (void (*)(um_device_t *))func);
1205
1206 return LM_STATUS_SUCCESS;
1207 }
1208
1209
MM_ACQUIRE_SPQ_LOCK_IMP(lm_device_t * pDev)1210 void MM_ACQUIRE_SPQ_LOCK_IMP(lm_device_t * pDev)
1211 {
1212 BNXE_LOCK_ENTER_SPQ((um_device_t *)pDev);
1213 }
1214
1215
MM_RELEASE_SPQ_LOCK_IMP(lm_device_t * pDev)1216 void MM_RELEASE_SPQ_LOCK_IMP(lm_device_t * pDev)
1217 {
1218 BNXE_LOCK_EXIT_SPQ((um_device_t *)pDev);
1219 }
1220
1221
MM_ACQUIRE_SPQ_LOCK_DPC_IMP(lm_device_t * pDev)1222 void MM_ACQUIRE_SPQ_LOCK_DPC_IMP(lm_device_t * pDev)
1223 {
1224 BNXE_LOCK_ENTER_SPQ((um_device_t *)pDev);
1225 }
1226
1227
MM_RELEASE_SPQ_LOCK_DPC_IMP(lm_device_t * pDev)1228 void MM_RELEASE_SPQ_LOCK_DPC_IMP(lm_device_t * pDev)
1229 {
1230 BNXE_LOCK_EXIT_SPQ((um_device_t *)pDev);
1231 }
1232
1233
MM_ACQUIRE_CID_LOCK_IMP(lm_device_t * pDev)1234 void MM_ACQUIRE_CID_LOCK_IMP(lm_device_t * pDev)
1235 {
1236 BNXE_LOCK_ENTER_CID((um_device_t *)pDev);
1237 }
1238
1239
MM_RELEASE_CID_LOCK_IMP(lm_device_t * pDev)1240 void MM_RELEASE_CID_LOCK_IMP(lm_device_t * pDev)
1241 {
1242 BNXE_LOCK_EXIT_CID((um_device_t *)pDev);
1243 }
1244
1245
MM_ACQUIRE_REQUEST_LOCK_IMP(lm_device_t * pDev)1246 void MM_ACQUIRE_REQUEST_LOCK_IMP(lm_device_t * pDev)
1247 {
1248 BNXE_LOCK_ENTER_RRREQ((um_device_t *)pDev);
1249 }
1250
1251
MM_RELEASE_REQUEST_LOCK_IMP(lm_device_t * pDev)1252 void MM_RELEASE_REQUEST_LOCK_IMP(lm_device_t * pDev)
1253 {
1254 BNXE_LOCK_EXIT_RRREQ((um_device_t *)pDev);
1255 }
1256
1257
MM_ACQUIRE_PHY_LOCK_IMP(lm_device_t * pDev)1258 void MM_ACQUIRE_PHY_LOCK_IMP(lm_device_t * pDev)
1259 {
1260 BNXE_LOCK_ENTER_PHY((um_device_t *)pDev);
1261 }
1262
1263
MM_RELEASE_PHY_LOCK_IMP(lm_device_t * pDev)1264 void MM_RELEASE_PHY_LOCK_IMP(lm_device_t * pDev)
1265 {
1266 BNXE_LOCK_EXIT_PHY((um_device_t *)pDev);
1267 }
1268
1269
MM_ACQUIRE_PHY_LOCK_DPC_IMP(lm_device_t * pDev)1270 void MM_ACQUIRE_PHY_LOCK_DPC_IMP(lm_device_t * pDev)
1271 {
1272 BNXE_LOCK_ENTER_PHY((um_device_t *)pDev);
1273 }
1274
1275
MM_RELEASE_PHY_LOCK_DPC_IMP(lm_device_t * pDev)1276 void MM_RELEASE_PHY_LOCK_DPC_IMP(lm_device_t * pDev)
1277 {
1278 BNXE_LOCK_EXIT_PHY((um_device_t *)pDev);
1279 }
1280
1281
mm_init_lock(lm_device_t * pDev,mm_spin_lock_t * spinlock)1282 void mm_init_lock(lm_device_t * pDev,
1283 mm_spin_lock_t * spinlock)
1284 {
1285 um_device_t * pUM = (um_device_t *)pDev;
1286
1287 mutex_init(spinlock, NULL,
1288 MUTEX_DRIVER, DDI_INTR_PRI(pUM->intrPriority));
1289 }
1290
1291
mm_acquire_lock(mm_spin_lock_t * spinlock)1292 lm_status_t mm_acquire_lock(mm_spin_lock_t * spinlock)
1293 {
1294 if (spinlock == NULL)
1295 {
1296 return LM_STATUS_INVALID_PARAMETER;
1297 }
1298
1299 mutex_enter(spinlock);
1300
1301 return LM_STATUS_SUCCESS;
1302 }
1303
1304
mm_release_lock(mm_spin_lock_t * spinlock)1305 lm_status_t mm_release_lock(mm_spin_lock_t * spinlock)
1306 {
1307 if (spinlock == NULL)
1308 {
1309 return LM_STATUS_INVALID_PARAMETER;
1310 }
1311
1312 mutex_exit(spinlock);
1313
1314 return LM_STATUS_SUCCESS;
1315 }
1316
1317
MM_ACQUIRE_MCP_LOCK_IMP(lm_device_t * pDev)1318 void MM_ACQUIRE_MCP_LOCK_IMP(lm_device_t * pDev)
1319 {
1320 BNXE_LOCK_ENTER_MCP((um_device_t *)pDev);
1321 }
1322
1323
MM_RELEASE_MCP_LOCK_IMP(lm_device_t * pDev)1324 void MM_RELEASE_MCP_LOCK_IMP(lm_device_t * pDev)
1325 {
1326 BNXE_LOCK_EXIT_MCP((um_device_t *)pDev);
1327 }
1328
1329
MM_ACQUIRE_ISLES_CONTROL_LOCK_IMP(lm_device_t * pDev)1330 void MM_ACQUIRE_ISLES_CONTROL_LOCK_IMP(lm_device_t * pDev)
1331 {
1332 BNXE_LOCK_ENTER_ISLES_CONTROL((um_device_t *)pDev);
1333 }
1334
1335
MM_RELEASE_ISLES_CONTROL_LOCK_IMP(lm_device_t * pDev)1336 void MM_RELEASE_ISLES_CONTROL_LOCK_IMP(lm_device_t * pDev)
1337 {
1338 BNXE_LOCK_EXIT_ISLES_CONTROL((um_device_t *)pDev);
1339 }
1340
1341
MM_ACQUIRE_ISLES_CONTROL_LOCK_DPC_IMP(lm_device_t * pDev)1342 void MM_ACQUIRE_ISLES_CONTROL_LOCK_DPC_IMP(lm_device_t * pDev)
1343 {
1344 BNXE_LOCK_ENTER_ISLES_CONTROL((um_device_t *)pDev);
1345 }
1346
1347
MM_RELEASE_ISLES_CONTROL_LOCK_DPC_IMP(lm_device_t * pDev)1348 void MM_RELEASE_ISLES_CONTROL_LOCK_DPC_IMP(lm_device_t * pDev)
1349 {
1350 BNXE_LOCK_EXIT_ISLES_CONTROL((um_device_t *)pDev);
1351 }
1352
1353
MM_ACQUIRE_IND_REG_LOCK_IMP(lm_device_t * pDev)1354 void MM_ACQUIRE_IND_REG_LOCK_IMP(lm_device_t * pDev)
1355 {
1356 BNXE_LOCK_ENTER_IND((um_device_t *)pDev);
1357 }
1358
1359
MM_RELEASE_IND_REG_LOCK_IMP(lm_device_t * pDev)1360 void MM_RELEASE_IND_REG_LOCK_IMP(lm_device_t * pDev)
1361 {
1362 BNXE_LOCK_EXIT_IND((um_device_t *)pDev);
1363 }
1364
1365
MM_ACQUIRE_LOADER_LOCK_IMP()1366 void MM_ACQUIRE_LOADER_LOCK_IMP()
1367 {
1368 mutex_enter(&bnxeLoaderMutex);
1369 }
1370
1371
MM_RELEASE_LOADER_LOCK_IMP()1372 void MM_RELEASE_LOADER_LOCK_IMP()
1373 {
1374 mutex_exit(&bnxeLoaderMutex);
1375 }
1376
1377
MM_ACQUIRE_SP_REQ_MGR_LOCK_IMP(lm_device_t * pDev)1378 void MM_ACQUIRE_SP_REQ_MGR_LOCK_IMP(lm_device_t * pDev)
1379 {
1380 BNXE_LOCK_ENTER_SPREQ((um_device_t *)pDev);
1381 }
1382
1383
MM_RELEASE_SP_REQ_MGR_LOCK_IMP(lm_device_t * pDev)1384 void MM_RELEASE_SP_REQ_MGR_LOCK_IMP(lm_device_t * pDev)
1385 {
1386 BNXE_LOCK_EXIT_SPREQ((um_device_t *)pDev);
1387 }
1388
1389
MM_ACQUIRE_SB_LOCK_IMP(lm_device_t * pDev,u8_t sb_idx)1390 void MM_ACQUIRE_SB_LOCK_IMP(lm_device_t * pDev, u8_t sb_idx)
1391 {
1392 BNXE_LOCK_ENTER_SB((um_device_t *)pDev, sb_idx);
1393 }
1394
1395
MM_RELEASE_SB_LOCK_IMP(lm_device_t * pDev,u8_t sb_idx)1396 void MM_RELEASE_SB_LOCK_IMP(lm_device_t * pDev, u8_t sb_idx)
1397 {
1398 BNXE_LOCK_EXIT_SB((um_device_t *)pDev, sb_idx);
1399 }
1400
1401
MM_ACQUIRE_ETH_CON_LOCK_IMP(lm_device_t * pDev)1402 void MM_ACQUIRE_ETH_CON_LOCK_IMP(lm_device_t * pDev)
1403 {
1404 BNXE_LOCK_ENTER_ETH_CON((um_device_t *)pDev);
1405 }
1406
1407
MM_RELEASE_ETH_CON_LOCK_IMP(lm_device_t * pDev)1408 void MM_RELEASE_ETH_CON_LOCK_IMP(lm_device_t * pDev)
1409 {
1410 BNXE_LOCK_EXIT_ETH_CON((um_device_t *)pDev);
1411 }
1412
1413
mm_crc32(unsigned char * address,unsigned int size,unsigned int crc)1414 unsigned int mm_crc32(unsigned char * address,
1415 unsigned int size,
1416 unsigned int crc)
1417 {
1418 return 0;
1419 }
1420
1421
mm_crc16(unsigned char * address,unsigned int size,unsigned short crc)1422 unsigned short mm_crc16(unsigned char * address,
1423 unsigned int size,
1424 unsigned short crc)
1425 {
1426 return 0;
1427 }
1428
1429
mm_event_log_generic_arg_fwd(lm_device_t * pDev,const lm_log_id_t lm_log_id,va_list argp)1430 lm_status_t mm_event_log_generic_arg_fwd(lm_device_t * pDev,
1431 const lm_log_id_t lm_log_id,
1432 va_list argp)
1433 {
1434 um_device_t * pUM = (um_device_t *)pDev;
1435 u8_t port = 0 ;
1436 char * sz_vendor_name = NULL;
1437 char * sz_vendor_pn = NULL;
1438
1439 switch (lm_log_id)
1440 {
1441 case LM_LOG_ID_FAN_FAILURE: // fan failure detected
1442
1443 BnxeLogWarn(pUM, "FAN FAILURE!");
1444 break;
1445
1446 case LM_LOG_ID_UNQUAL_IO_MODULE: // SFP+ unqualified io module
1447 /*
1448 * expected parameters:
1449 * u8 port, const char * vendor_name, const char * vendor_pn
1450 */
1451 port = va_arg(argp, int);
1452 sz_vendor_name = va_arg(argp, char*);
1453 sz_vendor_pn = va_arg(argp, char*);
1454
1455 BnxeLogInfo(pUM, "Unqualified IO Module: %s %s (port=%d)",
1456 sz_vendor_name, sz_vendor_pn, port);
1457 break;
1458
1459 case LM_LOG_ID_OVER_CURRENT: // SFP+ over current power
1460 /*
1461 * expected parametrs:
1462 * u8 port
1463 */
1464 port = va_arg(argp, int);
1465
1466 BnxeLogWarn(pUM, "SFP+ over current, power failure! (port=%d)", port);
1467 break;
1468
1469 case LM_LOG_ID_NO_10G_SUPPORT: // 10g speed is requested but not supported
1470 /*
1471 * expected parametrs:
1472 * u8 port
1473 */
1474 port = va_arg(argp, int);
1475
1476 BnxeLogWarn(pUM, "10Gb speed not supported! (port=%d)", port);
1477 break;
1478
1479 case LM_LOG_ID_PHY_UNINITIALIZED:
1480 /*
1481 * expected parametrs:
1482 * u8 port
1483 */
1484 port = va_arg(argp, int);
1485
1486 BnxeLogWarn(pUM, "PHY uninitialized! (port=%d)", port);
1487 break;
1488
1489 case LM_LOG_ID_MDIO_ACCESS_TIMEOUT:
1490
1491 #define MM_PORT_NUM(pdev) \
1492 (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_4) ? \
1493 (PATH_ID(pdev) + (2 * PORT_ID(pdev))) : \
1494 (PATH_ID(pdev) + PORT_ID(pdev))
1495
1496 port = MM_PORT_NUM(&pUM->lm_dev);
1497
1498 BnxeLogWarn(pUM, "MDIO access timeout! (port=%d)", port);
1499 break;
1500
1501 default:
1502
1503 BnxeLogWarn(pUM, "Unknown MM event log! (type=%d)", lm_log_id);
1504 break;
1505 }
1506
1507 return LM_STATUS_SUCCESS;
1508 }
1509
1510
mm_event_log_generic(lm_device_t * pDev,const lm_log_id_t lm_log_id,...)1511 lm_status_t mm_event_log_generic(lm_device_t * pDev,
1512 const lm_log_id_t lm_log_id,
1513 ...)
1514 {
1515 lm_status_t lm_status = LM_STATUS_SUCCESS;
1516 va_list argp;
1517
1518 va_start(argp, lm_log_id);
1519 lm_status = mm_event_log_generic_arg_fwd(pDev, lm_log_id, argp);
1520 va_end(argp);
1521
1522 return lm_status;
1523 }
1524
1525
mm_build_ver_string(lm_device_t * pDev)1526 u32_t mm_build_ver_string(lm_device_t * pDev)
1527 {
1528 um_device_t * pUM = (um_device_t *)pDev;
1529
1530 snprintf((char *)pDev->ver_str,
1531 sizeof(pDev->ver_str),
1532 "%s",
1533 pUM->version);
1534
1535 return min(strlen((char *)pDev->ver_str), strlen(pUM->version));
1536 }
1537
1538
mm_indicate_hw_failure(lm_device_t * pDev)1539 void mm_indicate_hw_failure(lm_device_t * pDev)
1540 {
1541 um_device_t * pUM = (um_device_t *)pDev;
1542
1543 BnxeLogWarn(pUM, "HW failure indicated!");
1544 }
1545
1546
mm_bar_read_byte(struct _lm_device_t * pdev,u8_t bar,u32_t offset,u8_t * ret)1547 void mm_bar_read_byte(struct _lm_device_t *pdev,
1548 u8_t bar,
1549 u32_t offset,
1550 u8_t *ret)
1551 {
1552 mm_read_barrier();
1553 *ret = ddi_get8(pdev->vars.reg_handle[bar],
1554 (uint8_t *)((caddr_t)pdev->vars.mapped_bar_addr[bar] +
1555 offset));
1556 }
1557
1558
mm_bar_read_word(struct _lm_device_t * pdev,u8_t bar,u32_t offset,u16_t * ret)1559 void mm_bar_read_word(struct _lm_device_t *pdev,
1560 u8_t bar,
1561 u32_t offset,
1562 u16_t *ret)
1563 {
1564 mm_read_barrier();
1565 *ret = ddi_get16(pdev->vars.reg_handle[bar],
1566 (uint16_t *)((caddr_t)pdev->vars.mapped_bar_addr[bar] +
1567 offset));
1568 }
1569
1570
mm_bar_read_dword(struct _lm_device_t * pdev,u8_t bar,u32_t offset,u32_t * ret)1571 void mm_bar_read_dword(struct _lm_device_t *pdev,
1572 u8_t bar,
1573 u32_t offset,
1574 u32_t *ret)
1575 {
1576 mm_read_barrier();
1577 *ret = ddi_get32(pdev->vars.reg_handle[bar],
1578 (uint32_t *)((caddr_t)pdev->vars.mapped_bar_addr[bar] +
1579 offset));
1580 }
1581
1582
mm_bar_read_ddword(struct _lm_device_t * pdev,u8_t bar,u32_t offset,u64_t * ret)1583 void mm_bar_read_ddword(struct _lm_device_t *pdev,
1584 u8_t bar,
1585 u32_t offset,
1586 u64_t *ret)
1587 {
1588 mm_read_barrier();
1589 *ret = ddi_get64(pdev->vars.reg_handle[bar],
1590 (uint64_t *)((caddr_t)pdev->vars.mapped_bar_addr[bar] +
1591 offset));
1592 }
1593
1594
mm_bar_write_byte(struct _lm_device_t * pdev,u8_t bar,u32_t offset,u8_t val)1595 void mm_bar_write_byte(struct _lm_device_t *pdev,
1596 u8_t bar,
1597 u32_t offset,
1598 u8_t val)
1599 {
1600 ddi_put8(pdev->vars.reg_handle[bar],
1601 (uint8_t *)((caddr_t)pdev->vars.mapped_bar_addr[bar] + offset),
1602 val);
1603 mm_write_barrier();
1604 }
1605
1606
mm_bar_write_word(struct _lm_device_t * pdev,u8_t bar,u32_t offset,u16_t val)1607 void mm_bar_write_word(struct _lm_device_t *pdev,
1608 u8_t bar,
1609 u32_t offset,
1610 u16_t val)
1611 {
1612 ddi_put16(pdev->vars.reg_handle[bar],
1613 (uint16_t *)((caddr_t)pdev->vars.mapped_bar_addr[bar] + offset),
1614 val);
1615 mm_write_barrier();
1616 }
1617
1618
mm_bar_write_dword(struct _lm_device_t * pdev,u8_t bar,u32_t offset,u32_t val)1619 void mm_bar_write_dword(struct _lm_device_t *pdev,
1620 u8_t bar,
1621 u32_t offset,
1622 u32_t val)
1623 {
1624 ddi_put32(pdev->vars.reg_handle[bar],
1625 (uint32_t *)((caddr_t)pdev->vars.mapped_bar_addr[bar] + offset),
1626 val);
1627 mm_write_barrier();
1628 }
1629
1630
mm_bar_write_ddword(struct _lm_device_t * pdev,u8_t bar,u32_t offset,u64_t val)1631 void mm_bar_write_ddword(struct _lm_device_t *pdev,
1632 u8_t bar,
1633 u32_t offset,
1634 u64_t val)
1635 {
1636 ddi_put64(pdev->vars.reg_handle[bar],
1637 (uint64_t *)((caddr_t)pdev->vars.mapped_bar_addr[bar] + offset),
1638 val);
1639 mm_write_barrier();
1640 }
1641
1642
mm_bar_copy_buffer(struct _lm_device_t * pdev,u8_t bar,u32_t offset,u32_t size,u32_t * buf_ptr)1643 void mm_bar_copy_buffer(struct _lm_device_t * pdev,
1644 u8_t bar,
1645 u32_t offset,
1646 u32_t size,
1647 u32_t *buf_ptr)
1648 {
1649 u32_t i;
1650
1651 for (i = 0; i < size; i++)
1652 {
1653 ddi_put32(pdev->vars.reg_handle[bar],
1654 (uint32_t *)((caddr_t)pdev->vars.mapped_bar_addr[bar] +
1655 offset + (i * 4)),
1656 *(buf_ptr + i));
1657 }
1658 }
1659
1660
mm_get_cap_offset(struct _lm_device_t * pdev,u32_t capabilityID)1661 u32_t mm_get_cap_offset(struct _lm_device_t * pdev,
1662 u32_t capabilityID)
1663 {
1664 u32_t cap_offset = PCI_CAPABILITY_LIST; //CapPtr ofset
1665 u8_t cap_id;
1666 u32_t reg_value = 0;
1667
1668 lm_status_t lm_status = mm_read_pci(pdev, cap_offset, ®_value);
1669 if ((lm_status == LM_STATUS_SUCCESS) && (reg_value != 0xFFFFFFFF)) {
1670 cap_offset = (u8_t)(reg_value & 0x000000FF);
1671 if ((cap_offset == 0) || (cap_offset >= 0x100)) {
1672 return 0xFFFFFFFF;
1673 }
1674 } else {
1675 return 0xFFFFFFFF;
1676 }
1677 do {
1678 reg_value = 0;
1679 lm_status = mm_read_pci(pdev, cap_offset, ®_value);
1680 if ((lm_status == LM_STATUS_SUCCESS) && (reg_value != 0xFFFFFFFF)) {
1681 cap_id = (u8_t)(reg_value & 0x000000FF);
1682 if (cap_id == capabilityID) {
1683 break;
1684 }
1685 cap_offset = (reg_value & 0x0000FF00) >> 8;
1686 if (cap_offset == 0) {
1687 break;
1688 }
1689 } else {
1690 cap_offset = 0xFFFFFFFF;
1691 break;
1692 }
1693 } while ((lm_status == LM_STATUS_SUCCESS));
1694
1695 return cap_offset;
1696 }
1697
mm_get_wol_flags(struct _lm_device_t * pdev)1698 u32_t mm_get_wol_flags(struct _lm_device_t * pdev)
1699 {
1700 return LM_WAKE_UP_MODE_NONE;
1701 }
1702
mm_get_feature_flags(struct _lm_device_t * pdev)1703 u32_t mm_get_feature_flags(struct _lm_device_t * pdev)
1704 {
1705 return 0;
1706 }
1707
mm_get_vmq_cnt(struct _lm_device_t * pdev)1708 u32_t mm_get_vmq_cnt(struct _lm_device_t * pdev)
1709 {
1710 return 0;
1711 }
1712
mm_i2c_update(struct _lm_device_t * pdev)1713 lm_status_t mm_i2c_update(struct _lm_device_t * pdev)
1714 {
1715 return LM_STATUS_SUCCESS;
1716 }
1717
mm_query_system_time(void)1718 u64_t mm_query_system_time(void)
1719 {
1720 return 0;
1721 }
1722
1723