xref: /illumos-gate/usr/src/uts/common/io/qede/qede_osal.c (revision 09a032ce55a4b25e9a50eba798b5dfa1f449cb4b)
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1,  (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1,  (the "License").
26 
27 * You may not use this file except in compliance with the License.
28 
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
31 
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
34 */
35 
36 /*
37  * Copyright 2023 Oxide Computer Company
38  */
39 
40 #include "qede.h"
41 #include <sys/pci.h>
42 #include <sys/pcie.h>
43 extern ddi_dma_attr_t qede_gen_buf_dma_attr;
44 extern struct ddi_device_acc_attr qede_desc_acc_attr;
45 
46 /*
47  * Find the dma_handle corresponding to the tx, rx data structures
48  */
49 int
qede_osal_find_dma_handle_for_block(qede_t * qede,void * addr,ddi_dma_handle_t * dma_handle)50 qede_osal_find_dma_handle_for_block(qede_t *qede, void *addr,
51     ddi_dma_handle_t *dma_handle)
52 {
53 	qede_phys_mem_entry_t *entry;
54 	int ret = DDI_FAILURE;
55 
56 	mutex_enter(&qede->phys_mem_list.lock);
57 	QEDE_LIST_FOR_EACH_ENTRY(entry,
58 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
59 	    &qede->phys_mem_list.head,
60 	    qede_phys_mem_entry_t,
61 	    list_entry) {
62 		if (entry->paddr == addr) {
63 			*dma_handle = entry->dma_handle;
64 			ret = DDI_SUCCESS;
65 			break;
66 		}
67 	}
68 
69 	mutex_exit(&qede->phys_mem_list.lock);
70 
71 	return (ret);
72 }
73 
74 void
qede_osal_dma_sync(struct ecore_dev * edev,void * addr,u32 size,bool is_post)75 qede_osal_dma_sync(struct ecore_dev *edev, void* addr, u32 size, bool is_post)
76 {
77 	qede_t *qede = (qede_t *)edev;
78 	qede_phys_mem_entry_t *entry;
79 	ddi_dma_handle_t *dma_handle = NULL;
80 	uint_t type = (is_post == false) ? DDI_DMA_SYNC_FORDEV :
81 	    DDI_DMA_SYNC_FORKERNEL;
82 
83 	mutex_enter(&qede->phys_mem_list.lock);
84 
85 	/* LINTED E_BAD_PTR_CAST_ALIGN */
86 	QEDE_LIST_FOR_EACH_ENTRY(entry, &qede->phys_mem_list.head,
87 	    qede_phys_mem_entry_t, list_entry) {
88 		if (entry->paddr == addr) {
89 			dma_handle = &entry->dma_handle;
90 		}
91 	}
92 
93 	if (dma_handle == NULL) {
94 		qede_print_err("!%s(%d): addr %p not found in list",
95 		    __func__, qede->instance, addr);
96 		mutex_exit(&qede->phys_mem_list.lock);
97 		return;
98 	} else {
99 		(void) ddi_dma_sync(*dma_handle,
100 		    0 /* offset into the mem block */,
101 		    size, type);
102 	}
103 
104 	mutex_exit(&qede->phys_mem_list.lock);
105 }
106 
107 void *
qede_osal_zalloc(struct ecore_dev * edev,int flags,size_t size)108 qede_osal_zalloc(struct ecore_dev *edev, int flags, size_t size)
109 {
110 	qede_t *qede = (qede_t *)edev;
111 	qede_mem_list_entry_t *new_entry;
112 	void *buf;
113 
114 	if ((new_entry = kmem_zalloc(sizeof (qede_mem_list_entry_t), flags))
115 	    == NULL) {
116 		qede_print_err("%s(%d): Failed to alloc new list entry",
117 		    __func__, qede->instance);
118 		return (NULL);
119 	}
120 
121 	if ((buf = kmem_zalloc(size, flags)) == NULL) {
122 		qede_print_err("%s(%d): Failed to alloc mem, size %d",
123 		    __func__, qede->instance, size);
124 		kmem_free(new_entry, sizeof (qede_mem_list_entry_t));
125 		return (NULL);
126 	}
127 
128 	new_entry->size = size;
129 	new_entry->buf = buf;
130 
131 	mutex_enter(&qede->mem_list.mem_list_lock);
132 	QEDE_LIST_ADD(&new_entry->mem_entry, &qede->mem_list.mem_list_head);
133 	mutex_exit(&qede->mem_list.mem_list_lock);
134 
135 	return (buf);
136 }
137 
138 
139 void *
qede_osal_alloc(struct ecore_dev * edev,int flags,size_t size)140 qede_osal_alloc(struct ecore_dev *edev, int flags, size_t size)
141 {
142 	qede_t *qede = (qede_t *)edev;
143 	qede_mem_list_entry_t *new_entry;
144 	void *buf;
145 
146 	if ((new_entry = kmem_zalloc(sizeof (qede_mem_list_entry_t), flags))
147 	    == NULL) {
148 		qede_print_err("%s(%d): Failed to alloc new list entry",
149 		    __func__, qede->instance);
150 		return (NULL);
151 	}
152 
153 	if ((buf = kmem_alloc(size, flags)) == NULL) {
154 		qede_print_err("%s(%d): Failed to alloc %d bytes",
155 		    __func__, qede->instance, size);
156 		kmem_free(new_entry, sizeof (qede_mem_list_t));
157 		return (NULL);
158 	}
159 
160 	new_entry->size = size;
161 	new_entry->buf = buf;
162 
163 	mutex_enter(&qede->mem_list.mem_list_lock);
164 	QEDE_LIST_ADD(&new_entry->mem_entry, &qede->mem_list.mem_list_head);
165 	mutex_exit(&qede->mem_list.mem_list_lock);
166 
167 	return (buf);
168 }
169 
170 void
qede_osal_free(struct ecore_dev * edev,void * addr)171 qede_osal_free(struct ecore_dev *edev, void *addr)
172 {
173 	qede_t *qede = (qede_t *)edev;
174 	qede_mem_list_entry_t *mem_entry;
175 
176 	mutex_enter(&qede->mem_list.mem_list_lock);
177 
178 	/* LINTED E_BAD_PTR_CAST_ALIGN */
179 	QEDE_LIST_FOR_EACH_ENTRY(mem_entry, &qede->mem_list.mem_list_head,
180 	    qede_mem_list_entry_t, mem_entry) {
181 		if (mem_entry->buf == addr) {
182 			QEDE_LIST_REMOVE(&mem_entry->mem_entry,
183 			    &qede->mem_list.mem_list_head);
184 			kmem_free(addr, mem_entry->size);
185 			kmem_free(mem_entry, sizeof (qede_mem_list_entry_t));
186 			break;
187 		}
188 	}
189 
190 	mutex_exit(&qede->mem_list.mem_list_lock);
191 }
192 
193 /*
194  * @VB: What are the alignment requirements here ??
195  */
196 void *
qede_osal_dma_alloc_coherent(struct ecore_dev * edev,dma_addr_t * paddr,size_t size)197 qede_osal_dma_alloc_coherent(struct ecore_dev *edev, dma_addr_t *paddr,
198     size_t size)
199 {
200 	qede_t *qede = (qede_t *)edev;
201 	qede_phys_mem_entry_t *new_entry;
202 	ddi_dma_handle_t *dma_handle;
203 	ddi_acc_handle_t *dma_acc_handle;
204 	ddi_dma_cookie_t cookie;
205 	int ret;
206 	caddr_t pbuf;
207 	unsigned int count;
208 
209 	memset(&cookie, 0, sizeof (cookie));
210 
211 	if ((new_entry =
212 	    kmem_zalloc(sizeof (qede_phys_mem_entry_t), KM_NOSLEEP)) == NULL) {
213 		qede_print_err("%s(%d): Failed to alloc new list entry",
214 		    __func__, qede->instance);
215 		return (NULL);
216 	}
217 
218 	dma_handle = &new_entry->dma_handle;
219 	dma_acc_handle = &new_entry->dma_acc_handle;
220 
221 	if ((ret =
222 	    ddi_dma_alloc_handle(qede->dip, &qede_gen_buf_dma_attr,
223 	    DDI_DMA_DONTWAIT,
224 	    NULL, dma_handle)) != DDI_SUCCESS) {
225 		qede_print_err("%s(%d): Failed to alloc dma handle",
226 		    __func__, qede->instance);
227 		qede_stacktrace(qede);
228 		goto free;
229 	}
230 
231 	if ((ret = ddi_dma_mem_alloc(*dma_handle, size, &qede_desc_acc_attr,
232 	    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, &pbuf, &size,
233 	    dma_acc_handle)) != DDI_SUCCESS) {
234 		qede_print_err("%s(%d): Failed to alloc dma mem %d bytes",
235 		    __func__, qede->instance, size);
236 		qede_stacktrace(qede);
237 		goto free_hdl;
238 	}
239 
240 	if ((ret = ddi_dma_addr_bind_handle(*dma_handle, NULL, pbuf, size,
241 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
242 	    DDI_DMA_DONTWAIT, NULL, &cookie, &count)) != DDI_DMA_MAPPED) {
243 		qede_print("!%s(%d): failed to bind dma addr to handle,"
244 		   " ret %d",
245 		    __func__, qede->instance, ret);
246 		goto free_dma_mem;
247 	}
248 
249 	if (count != 1) {
250 		qede_print("%s(%d): ncookies = %d for phys addr %p, "
251 		    "discard dma buffer",
252 		    __func__, qede->instance, count, &cookie.dmac_laddress);
253 		goto free_dma_mem;
254 	}
255 
256 	new_entry->size = size;
257 	new_entry->virt_addr = pbuf;
258 
259 	new_entry->paddr = (void *)cookie.dmac_laddress;
260 
261 	*paddr = (dma_addr_t)new_entry->paddr;
262 
263 	mutex_enter(&qede->phys_mem_list.lock);
264 	QEDE_LIST_ADD(&new_entry->list_entry, &qede->phys_mem_list.head);
265 	mutex_exit(&qede->phys_mem_list.lock);
266 
267 	return (new_entry->virt_addr);
268 
269 free_dma_mem:
270 	ddi_dma_mem_free(dma_acc_handle);
271 free_hdl:
272 	ddi_dma_free_handle(dma_handle);
273 free:
274 	kmem_free(new_entry, sizeof (qede_phys_mem_entry_t));
275 	return (NULL);
276 }
277 
278 void
qede_osal_dma_free_coherent(struct ecore_dev * edev,void * vaddr,dma_addr_t paddr,size_t size)279 qede_osal_dma_free_coherent(struct ecore_dev *edev, void *vaddr,
280     dma_addr_t paddr, size_t size)
281 {
282 	qede_t *qede = (qede_t *)edev;
283 	qede_phys_mem_entry_t *entry;
284 
285 	mutex_enter(&qede->phys_mem_list.lock);
286 
287 	/* LINTED E_BAD_PTR_CAST_ALIGN */
288 	QEDE_LIST_FOR_EACH_ENTRY(entry, &qede->phys_mem_list.head,
289 	    qede_phys_mem_entry_t, list_entry) {
290 		if (entry->virt_addr == vaddr) {
291 			QEDE_LIST_REMOVE(&entry->list_entry,
292 			    &qede->phys_mem_list.head);
293 			ddi_dma_unbind_handle(entry->dma_handle);
294 			ddi_dma_mem_free(&entry->dma_acc_handle);
295 			ddi_dma_free_handle(&entry->dma_handle);
296 			kmem_free(entry, sizeof (qede_phys_mem_entry_t));
297 			break;
298 		}
299 	}
300 
301 	mutex_exit(&qede->phys_mem_list.lock);
302 }
303 
304 void
qede_get_link_info(struct ecore_hwfn * hwfn,struct qede_link_cfg * lnkCfg)305 qede_get_link_info(struct ecore_hwfn *hwfn, struct qede_link_cfg *lnkCfg)
306 {
307         struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
308         qede_t *qede = (qede_t *)(void *)edev;
309         struct ecore_mcp_link_state lnk_state;
310         struct ecore_mcp_link_params lnk_params;
311         struct ecore_mcp_link_capabilities lnk_caps;
312 
313 	qede_update_media_info(edev, lnkCfg);
314 
315         memcpy(&lnk_state, ecore_mcp_get_link_state(hwfn),
316 	    sizeof (lnk_state));
317         memcpy(&lnk_params, ecore_mcp_get_link_params(hwfn),
318 	    sizeof (lnk_params));
319         memcpy(&lnk_caps, ecore_mcp_get_link_capabilities(hwfn),
320 	    sizeof (lnk_caps));
321 
322 	if (lnk_state.link_up) {
323 		lnkCfg->link_up = B_TRUE;
324 		lnkCfg->speed = lnk_state.speed;
325 		lnkCfg->duplex = DUPLEX_FULL;
326 	}
327 
328 	if (lnk_params.speed.autoneg) {
329 		lnkCfg->supp_capab.autoneg = B_TRUE;
330 		lnkCfg->adv_capab.autoneg = B_TRUE;
331 	}
332 	if (lnk_params.speed.autoneg ||
333 		(lnk_params.pause.forced_rx && lnk_params.pause.forced_tx)) {
334 		lnkCfg->supp_capab.asym_pause = B_TRUE;
335 		lnkCfg->adv_capab.asym_pause = B_TRUE;
336 	}
337 	if (lnk_params.speed.autoneg ||
338 		lnk_params.pause.forced_rx || lnk_params.pause.forced_tx) {
339 		lnkCfg->supp_capab.pause = B_TRUE;
340 		lnkCfg->adv_capab.pause = B_TRUE;
341 	}
342 
343 	if (lnk_params.speed.advertised_speeds &
344 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
345 		lnkCfg->adv_capab.param_10000fdx = B_TRUE;
346 	}
347 	if(lnk_params.speed.advertised_speeds &
348 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
349                 lnkCfg->adv_capab.param_25000fdx = B_TRUE;
350 	}
351 	if (lnk_params.speed.advertised_speeds &
352 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
353 		lnkCfg->adv_capab.param_40000fdx = B_TRUE;
354 	}
355 	if (lnk_params.speed.advertised_speeds &
356 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) {
357 		lnkCfg->adv_capab.param_50000fdx = B_TRUE;
358 	}
359 	if (lnk_params.speed.advertised_speeds &
360 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
361 		lnkCfg->adv_capab.param_100000fdx = B_TRUE;
362 	}
363 	if (lnk_params.speed.advertised_speeds &
364 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
365 		lnkCfg->adv_capab.param_1000fdx = B_TRUE;
366 		lnkCfg->adv_capab.param_1000hdx = B_TRUE;
367 	}
368 
369 	lnkCfg->autoneg = lnk_params.speed.autoneg;
370 
371 	if (lnk_caps.speed_capabilities &
372 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
373 		lnkCfg->supp_capab.param_10000fdx = B_TRUE;
374 	}
375 	if(lnk_caps.speed_capabilities &
376 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
377                 lnkCfg->supp_capab.param_25000fdx = B_TRUE;
378 	}
379 	if (lnk_caps.speed_capabilities &
380 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
381 		lnkCfg->supp_capab.param_40000fdx = B_TRUE;
382 	}
383 	if (lnk_caps.speed_capabilities &
384 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) {
385 		lnkCfg->supp_capab.param_50000fdx = B_TRUE;
386 	}
387 	if (lnk_caps.speed_capabilities &
388 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
389 		lnkCfg->supp_capab.param_100000fdx = B_TRUE;
390 	}
391 	if (lnk_caps.speed_capabilities &
392 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
393 		lnkCfg->supp_capab.param_1000fdx = B_TRUE;
394 		lnkCfg->supp_capab.param_1000hdx = B_TRUE;
395 	}
396 
397 	if (lnk_params.pause.autoneg) {
398                 lnkCfg->pause_cfg |= QEDE_LINK_PAUSE_AUTONEG_ENABLE;
399 	}
400         if (lnk_params.pause.forced_rx) {
401                 lnkCfg->pause_cfg |= QEDE_LINK_PAUSE_RX_ENABLE;
402 	}
403         if (lnk_params.pause.forced_tx) {
404                 lnkCfg->pause_cfg |= QEDE_LINK_PAUSE_TX_ENABLE;
405 	}
406 
407 
408 	if(lnk_state.partner_adv_speed &
409 		ECORE_LINK_PARTNER_SPEED_1G_HD) {
410 		lnkCfg->rem_capab.param_1000hdx = B_TRUE;
411 	}
412 	if(lnk_state.partner_adv_speed &
413 		ECORE_LINK_PARTNER_SPEED_1G_FD) {
414 		lnkCfg->rem_capab.param_1000fdx = B_TRUE;
415 	}
416 	if(lnk_state.partner_adv_speed &
417 		ECORE_LINK_PARTNER_SPEED_10G) {
418 		lnkCfg->rem_capab.param_10000fdx = B_TRUE;
419 	}
420 	if(lnk_state.partner_adv_speed &
421 		ECORE_LINK_PARTNER_SPEED_40G) {
422 		lnkCfg->rem_capab.param_40000fdx = B_TRUE;
423 	}
424 	if(lnk_state.partner_adv_speed &
425 		ECORE_LINK_PARTNER_SPEED_50G) {
426 		lnkCfg->rem_capab.param_50000fdx = B_TRUE;
427 	}
428 	if(lnk_state.partner_adv_speed &
429 		ECORE_LINK_PARTNER_SPEED_100G) {
430 		lnkCfg->rem_capab.param_100000fdx = B_TRUE;
431 	}
432 
433 	if(lnk_state.an_complete) {
434 	    lnkCfg->rem_capab.autoneg = B_TRUE;
435 	}
436 
437 	if(lnk_state.partner_adv_pause) {
438 	    lnkCfg->rem_capab.pause = B_TRUE;
439 	}
440 	if(lnk_state.partner_adv_pause ==
441 	    ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE ||
442 	    lnk_state.partner_adv_pause == ECORE_LINK_PARTNER_BOTH_PAUSE) {
443 	    lnkCfg->rem_capab.asym_pause = B_TRUE;
444 	}
445 }
446 
447 void
qede_osal_link_update(struct ecore_hwfn * hwfn)448 qede_osal_link_update(struct ecore_hwfn *hwfn)
449 {
450 	struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
451 	qede_t *qede = (qede_t *)(void *)edev;
452 	struct qede_link_cfg link_cfg;
453 
454         memset(&link_cfg, 0 , sizeof (struct qede_link_cfg));
455 	qede_get_link_info(hwfn, &link_cfg);
456 
457 	if (link_cfg.duplex == DUPLEX_FULL) {
458 		qede->props.link_duplex = DUPLEX_FULL;
459 	} else {
460 		qede->props.link_duplex = DUPLEX_HALF;
461 	}
462 
463 	if (!link_cfg.link_up) {
464 		qede_print("!%s(%d): Link marked down",
465 		    __func__, qede->instance);
466 		qede->params.link_state = 0;
467 	 	qede->props.link_duplex = B_FALSE;
468 		qede->props.link_speed = 0;
469 		qede->props.tx_pause = B_FALSE;
470 		qede->props.rx_pause = B_FALSE;
471 		qede->props.uptime = 0;
472 		mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
473 	} else if (link_cfg.link_up) {
474 		qede_print("!%s(%d): Link marked up",
475 		    __func__, qede->instance);
476 		qede->params.link_state = 1;
477 		qede->props.link_speed = link_cfg.speed;
478 		qede->props.link_duplex = link_cfg.duplex;
479 		qede->props.tx_pause = (link_cfg.pause_cfg &
480 		    QEDE_LINK_PAUSE_TX_ENABLE) ? B_TRUE : B_FALSE;
481 		qede->props.rx_pause = (link_cfg.pause_cfg &
482 		    QEDE_LINK_PAUSE_RX_ENABLE) ? B_TRUE : B_FALSE;
483 		qede->props.uptime = ddi_get_time();
484 		mac_link_update(qede->mac_handle, LINK_STATE_UP);
485 	}
486 }
487 
488 unsigned long
log2_align(unsigned long n)489 log2_align(unsigned long n)
490 {
491 	unsigned long ret = n ? 1 : 0;
492 	unsigned long _n  = n >> 1;
493 
494 	while (_n) {
495 		_n >>= 1;
496 		ret <<= 1;
497 	}
498 
499 	if (ret < n) {
500 		ret <<= 1;
501 	}
502 
503 	return (ret);
504 }
505 
506 u32
LOG2(u32 v)507 LOG2(u32 v)
508 {
509 	u32 r = 0;
510 	while (v >>= 1) {
511 		r++;
512 	}
513 	return (r);
514 }
515 
516 int
517 /* LINTED E_FUNC_ARG_UNUSED */
qede_osal_pci_find_ext_capab(struct ecore_dev * edev,u16 pcie_id)518 qede_osal_pci_find_ext_capab(struct ecore_dev *edev, u16 pcie_id)
519 {
520 	int offset = 0;
521 
522 	return (offset);
523 }
524 
525 void
qede_osal_pci_write32(struct ecore_hwfn * hwfn,u32 offset,u32 val)526 qede_osal_pci_write32(struct ecore_hwfn *hwfn, u32 offset, u32 val)
527 {
528 	struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
529 	qede_t *qede = (qede_t *)(void *)edev;
530 	u64 addr = qede->pci_bar0_base;
531 
532 	addr += offset;
533 
534 	ddi_put32(qede->regs_handle, (u32 *)addr, val);
535 }
536 
537 void
qede_osal_pci_write16(struct ecore_hwfn * hwfn,u32 offset,u16 val)538 qede_osal_pci_write16(struct ecore_hwfn *hwfn, u32 offset, u16 val)
539 {
540 	struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
541 	qede_t *qede = (qede_t *)(void *)edev;
542 	u64 addr = qede->pci_bar0_base;
543 
544 	addr += offset;
545 
546 	ddi_put16(qede->regs_handle, (u16 *)addr, val);
547 }
548 
549 u32
qede_osal_pci_read32(struct ecore_hwfn * hwfn,u32 offset)550 qede_osal_pci_read32(struct ecore_hwfn *hwfn, u32 offset)
551 {
552 	struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
553 	qede_t *qede = (qede_t *)(void *)edev;
554 	u32 val = 0;
555 	u64 addr = qede->pci_bar0_base;
556 
557 	addr += offset;
558 
559 	val = ddi_get32(qede->regs_handle, (u32 *)addr);
560 
561 	return (val);
562 }
563 
564 void
qede_osal_pci_bar2_write32(struct ecore_hwfn * hwfn,u32 offset,u32 val)565 qede_osal_pci_bar2_write32(struct ecore_hwfn *hwfn, u32 offset, u32 val)
566 {
567 	struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
568 	qede_t *qede = (qede_t *)(void *)edev;
569 	u64 addr = qede->pci_bar2_base;
570 
571 	addr += offset;
572 	ddi_put32(qede->doorbell_handle, (u32 *)addr, val);
573 }
574 
575 u32
qede_osal_direct_reg_read32(struct ecore_hwfn * hwfn,void * addr)576 qede_osal_direct_reg_read32(struct ecore_hwfn *hwfn, void *addr)
577 {
578 	struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
579 	qede_t *qede = (qede_t *)(void *)edev;
580 
581 	return (ddi_get32(qede->regs_handle, (u32 *)addr));
582 }
583 
584 void
qede_osal_direct_reg_write32(struct ecore_hwfn * hwfn,void * addr,u32 value)585 qede_osal_direct_reg_write32(struct ecore_hwfn *hwfn, void *addr, u32 value)
586 {
587 	struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
588 	qede_t *qede = (qede_t *)(void *)edev;
589 
590 	ddi_put32(qede->regs_handle, (u32 *)addr, value);
591 }
592 
593 u32 *
qede_osal_reg_addr(struct ecore_hwfn * hwfn,u32 addr)594 qede_osal_reg_addr(struct ecore_hwfn *hwfn, u32 addr)
595 {
596 	struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
597 	qede_t *qede = (qede_t *)(void *)edev;
598 
599 	return ((u32 *)(qede->pci_bar0_base + addr));
600 }
601 
602 void
qede_osal_pci_read_config_byte(struct ecore_dev * edev,u32 addr,u8 * val)603 qede_osal_pci_read_config_byte(struct ecore_dev *edev, u32 addr, u8 *val)
604 {
605 
606 	qede_t *qede = (qede_t *)edev;
607 
608 	*val = pci_config_get8(qede->pci_cfg_handle, (off_t)addr);
609 }
610 
611 void
qede_osal_pci_read_config_word(struct ecore_dev * edev,u32 addr,u16 * val)612 qede_osal_pci_read_config_word(struct ecore_dev *edev, u32 addr, u16 *val)
613 {
614 	qede_t *qede = (qede_t *)edev;
615 
616 	*val = pci_config_get16(qede->pci_cfg_handle, (off_t)addr);
617 }
618 
619 void
qede_osal_pci_read_config_dword(struct ecore_dev * edev,u32 addr,u32 * val)620 qede_osal_pci_read_config_dword(struct ecore_dev *edev, u32 addr, u32 *val)
621 {
622 	qede_t *qede = (qede_t *)edev;
623 
624 	*val = pci_config_get32(qede->pci_cfg_handle, (off_t)addr);
625 
626 }
627 
628 void
qede_print(char * format,...)629 qede_print(char *format, ...)
630 {
631 	va_list ap;
632 
633 	va_start(ap, format);
634 	vcmn_err(CE_NOTE, format, ap);
635 	va_end(ap);
636 }
637 
638 void
qede_print_err(char * format,...)639 qede_print_err(char *format, ...)
640 {
641 	va_list ap;
642 
643 	va_start(ap, format);
644 	vcmn_err(CE_WARN, format, ap);
645 	va_end(ap);
646 }
647 
648 /*
649  * Check if any mem/dma entries are left behind
650  * after unloading the ecore. If found
651  * then make sure they are freed
652  */
653 u32
qede_osal_cleanup(qede_t * qede)654 qede_osal_cleanup(qede_t *qede)
655 {
656 	qede_mem_list_entry_t *entry = NULL;
657 	qede_mem_list_entry_t *temp = NULL;
658 	qede_phys_mem_entry_t *entry_phys;
659 	qede_phys_mem_entry_t *temp_phys;
660 
661 	/*
662 	 * Check for misplaced mem. blocks(if any)
663 	 */
664 	mutex_enter(&qede->mem_list.mem_list_lock);
665 
666 	if (!QEDE_LIST_EMPTY(&qede->mem_list.mem_list_head)) {
667 		/*
668 		 * Something went wrong either in ecore
669 		 * or the osal mem management routines
670 		 * and the mem entry was not freed
671 		 */
672 		qede_print_err("!%s(%d): Mem entries left behind",
673 		    __func__, qede->instance);
674 
675 		QEDE_LIST_FOR_EACH_ENTRY_SAFE(entry,
676 		    temp,
677 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
678 		    &qede->mem_list.mem_list_head,
679 		    mem_entry,
680 		    qede_mem_list_entry_t) {
681 			qede_print("!%s(%d): Cleaning-up entry %p",
682 			    __func__, qede->instance, entry);
683 			QEDE_LIST_REMOVE(&entry->mem_entry,
684 			    &qede->mem_list.mem_list_head);
685 			if (entry->buf) {
686 				kmem_free(entry->buf, entry->size);
687 				kmem_free(entry,
688 				    sizeof (qede_mem_list_entry_t));
689 			}
690 		}
691 	}
692 
693 	mutex_exit(&qede->mem_list.mem_list_lock);
694 
695 	/*
696 	 * Check for misplaced dma blocks (if any)
697 	 */
698 	mutex_enter(&qede->phys_mem_list.lock);
699 
700 	if (!QEDE_LIST_EMPTY(&qede->phys_mem_list.head)) {
701 		qede_print("!%s(%d): Dma entries left behind",
702 		    __func__, qede->instance);
703 
704 		QEDE_LIST_FOR_EACH_ENTRY_SAFE(entry_phys,
705 		    temp_phys,
706 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
707 		    &qede->phys_mem_list.head,
708 		    list_entry,
709 		    qede_phys_mem_entry_t) {
710 			qede_print("!%s(%d): Cleaning-up entry %p",
711 			    __func__, qede->instance, entry_phys);
712 			QEDE_LIST_REMOVE(&entry_phys->list_entry,
713 			    &qede->phys_mem_list.head);
714 
715 			if (entry_phys->virt_addr) {
716 				ddi_dma_unbind_handle(entry_phys->dma_handle);
717 				ddi_dma_mem_free(&entry_phys->dma_acc_handle);
718 				ddi_dma_free_handle(&entry_phys->dma_handle);
719 				kmem_free(entry_phys,
720 				    sizeof (qede_phys_mem_entry_t));
721 			}
722 		}
723 	}
724 
725 	mutex_exit(&qede->phys_mem_list.lock);
726 
727 	return (0);
728 }
729 
730 
731 void
qede_osal_recovery_handler(struct ecore_hwfn * hwfn)732 qede_osal_recovery_handler(struct ecore_hwfn *hwfn)
733 {
734 	struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
735 	qede_t *qede = (qede_t *)(void *)edev;
736 
737 	 cmn_err(CE_WARN, "!%s(%d):Not implemented !",
738             __func__, qede->instance);
739 
740 }
741 
742 
743 enum _ecore_status_t
qede_osal_iov_vf_acquire(struct ecore_hwfn * p_hwfn,int vf_id)744 qede_osal_iov_vf_acquire(struct ecore_hwfn *p_hwfn, int vf_id)
745 {
746 	return (ECORE_SUCCESS);
747 }
748 
749 
750 void
qede_osal_pci_write_config_word(struct ecore_dev * dev,u32 addr,u16 pcie_id)751 qede_osal_pci_write_config_word(struct ecore_dev *dev, u32 addr, u16 pcie_id)
752 {
753 	qede_t *qede = (qede_t *)dev;
754 	ddi_acc_handle_t pci_cfg_handle = qede->pci_cfg_handle;
755 
756 	pci_config_put16(pci_cfg_handle, (off_t)addr, pcie_id);
757 }
758 
759 void *
qede_osal_valloc(struct ecore_dev * dev,u32 size)760 qede_osal_valloc(struct ecore_dev *dev, u32 size)
761 {
762 	void *ptr = 0;
763 
764 	return (ptr);
765 }
766 
767 void
qede_osal_vfree(struct ecore_dev * dev,void * mem)768 qede_osal_vfree(struct ecore_dev *dev, void* mem)
769 {
770 }
771 
772 int
773 /* LINTED E_FUNC_ARG_UNUSED */
qede_osal_pci_find_capability(struct ecore_dev * dev,u16 pcie_id)774 qede_osal_pci_find_capability(struct ecore_dev *dev, u16 pcie_id)
775 {
776 	return 1;
777 }
778 
779 void
qede_osal_poll_mode_dpc(struct ecore_hwfn * p_hwfn)780 qede_osal_poll_mode_dpc(struct ecore_hwfn *p_hwfn)
781 {
782 }
783 
784 int
785 /* LINTED E_FUNC_ARG_UNUSED */
qede_osal_bitmap_weight(unsigned long * bitmap,uint32_t nbits)786 qede_osal_bitmap_weight(unsigned long *bitmap, uint32_t nbits)
787 {
788 	uint32_t count = 0, temp = *bitmap;
789 	return count;
790 }
791 
792 void
793 /* LINTED E_FUNC_ARG_UNUSED */
qede_osal_mfw_tlv_req(struct ecore_hwfn * p_hwfn)794 qede_osal_mfw_tlv_req(struct ecore_hwfn *p_hwfn)
795 {
796 }
797 
798 u32
799 /* LINTED E_FUNC_ARG_UNUSED */
qede_osal_crc32(u32 crc,u8 * buf,u64 length)800 qede_osal_crc32(u32 crc, u8 *buf, u64 length)
801 {
802 	return 1;
803 }
804 
805 void
806 /* LINTED E_FUNC_ARG_UNUSED */
qede_osal_hw_info_change(struct ecore_hwfn * p_hwfn,int change)807 qede_osal_hw_info_change(struct ecore_hwfn *p_hwfn, int change)
808 {
809 }
810 
811 void
812 /* LINTED E_FUNC_ARG_UNUSED */
OSAL_CRC8_POPULATE(u8 * cdu_crc8_table,u8 polynomial)813 OSAL_CRC8_POPULATE(u8 * cdu_crc8_table, u8 polynomial)
814 {
815 }
816 u8
817 /* LINTED E_FUNC_ARG_UNUSED */
OSAL_CRC8(u8 * cdu_crc8_table,u8 * data_to_crc,int data_to_crc_len,u8 init_value)818 OSAL_CRC8(u8 * cdu_crc8_table, u8 * data_to_crc, int data_to_crc_len,
819     u8 init_value)
820 {
821 	return (0);
822 }
823 void
824 /* LINTED E_FUNC_ARG_UNUSED */
OSAL_DPC_SYNC(struct ecore_hwfn * p_hwfn)825 OSAL_DPC_SYNC(struct ecore_hwfn *p_hwfn)
826 {
827 	//Do nothing right now.
828 }
829