xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_hw.c (revision 5944f899a2519c6321bac3c17cc076418643a088)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File : ecore_hw.c
30  */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 
35 #include "bcm_osal.h"
36 #include "ecore_hsi_common.h"
37 #include "ecore_status.h"
38 #include "ecore.h"
39 #include "ecore_hw.h"
40 #include "reg_addr.h"
41 #include "ecore_utils.h"
42 #include "ecore_iov_api.h"
43 
44 #ifndef ASIC_ONLY
45 #define ECORE_EMUL_FACTOR 2000
46 #define ECORE_FPGA_FACTOR 200
47 #endif
48 
49 #define ECORE_BAR_ACQUIRE_TIMEOUT 1000
50 
51 /* Invalid values */
52 #define ECORE_BAR_INVALID_OFFSET	(OSAL_CPU_TO_LE32(-1))
53 
54 struct ecore_ptt {
55 	osal_list_entry_t	list_entry;
56 	unsigned int		idx;
57 	struct pxp_ptt_entry	pxp;
58 	u8			hwfn_id;
59 };
60 
61 struct ecore_ptt_pool {
62 	osal_list_t		free_list;
63 	osal_spinlock_t		lock; /* ptt synchronized access */
64 	struct ecore_ptt	ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
65 };
66 
67 enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
68 {
69 	struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev,
70 						   GFP_KERNEL,
71 						   sizeof(*p_pool));
72 	int i;
73 
74 	if (!p_pool)
75 		return ECORE_NOMEM;
76 
77 	OSAL_LIST_INIT(&p_pool->free_list);
78 	for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
79 		p_pool->ptts[i].idx = i;
80 		p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET;
81 		p_pool->ptts[i].pxp.pretend.control = 0;
82 		p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
83 
84 		/* There are special PTT entries that are taken only by design.
85 		 * The rest are added ot the list for general usage.
86 		 */
87 		if (i >= RESERVED_PTT_MAX)
88 			OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry,
89 					    &p_pool->free_list);
90 	}
91 
92 	p_hwfn->p_ptt_pool = p_pool;
93 	OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock);
94 	OSAL_SPIN_LOCK_INIT(&p_pool->lock);
95 
96 	return ECORE_SUCCESS;
97 }
98 
99 void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
100 {
101 	struct ecore_ptt *p_ptt;
102 	int i;
103 
104 	for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
105 		p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
106 		p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET;
107 	}
108 }
109 
110 void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
111 {
112 	if (p_hwfn->p_ptt_pool)
113 		OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
114 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
115 	p_hwfn->p_ptt_pool = OSAL_NULL;
116 }
117 
118 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn	*p_hwfn)
119 {
120 	struct ecore_ptt *p_ptt;
121 	unsigned int i;
122 
123 	/* Take the free PTT from the list */
124 	for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
125 		OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
126 
127 		if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list)) {
128 			p_ptt = OSAL_LIST_FIRST_ENTRY(&p_hwfn->p_ptt_pool->free_list,
129 						      struct ecore_ptt, list_entry);
130 			OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
131 					       &p_hwfn->p_ptt_pool->free_list);
132 
133 			OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
134 
135 			DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
136 				   "allocated ptt %d\n", p_ptt->idx);
137 
138 			return p_ptt;
139 		}
140 
141 		OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
142 		OSAL_MSLEEP(1);
143 	}
144 
145 	DP_NOTICE(p_hwfn, true, "PTT acquire timeout - failed to allocate PTT\n");
146 	return OSAL_NULL;
147 }
148 
149 void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
150 		       struct ecore_ptt *p_ptt) {
151 	/* This PTT should not be set to pretend if it is being released */
152 	/* TODO - add some pretend sanity checks, to make sure pretend isn't set on this ptt */
153 
154 	OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
155 	OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
156 	OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
157 }
158 
159 u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn,
160 			  struct ecore_ptt *p_ptt)
161 {
162 	/* The HW is using DWORDS and we need to translate it to Bytes */
163 	return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
164 }
165 
166 static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
167 {
168 	return PXP_PF_WINDOW_ADMIN_PER_PF_START +
169 	       p_ptt->idx * sizeof(struct pxp_ptt_entry);
170 }
171 
172 u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt)
173 {
174 	return PXP_EXTERNAL_BAR_PF_WINDOW_START +
175 	       p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
176 }
177 
178 void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
179 		       struct ecore_ptt *p_ptt,
180 		       u32 new_hw_addr)
181 {
182 	u32 prev_hw_addr;
183 
184 	prev_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
185 
186 	if (new_hw_addr == prev_hw_addr)
187 		return;
188 
189 	/* Update PTT entery in admin window */
190 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
191 		   "Updating PTT entry %d to offset 0x%x\n",
192 		   p_ptt->idx, new_hw_addr);
193 
194 	/* The HW is using DWORDS and the address is in Bytes */
195 	p_ptt->pxp.offset = OSAL_CPU_TO_LE32(new_hw_addr >> 2);
196 
197 	REG_WR(p_hwfn,
198 	       ecore_ptt_config_addr(p_ptt) +
199 	       OFFSETOF(struct pxp_ptt_entry, offset),
200 	       OSAL_LE32_TO_CPU(p_ptt->pxp.offset));
201 }
202 
203 static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
204 			 struct ecore_ptt *p_ptt,
205 			 u32 hw_addr)
206 {
207 	u32 win_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
208 	u32 offset;
209 
210 	offset = hw_addr - win_hw_addr;
211 
212 	if (p_ptt->hwfn_id != p_hwfn->my_id)
213 		DP_NOTICE(p_hwfn, true,
214 			  "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
215 			  p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
216 
217 	/* Verify the address is within the window */
218 	if (hw_addr < win_hw_addr ||
219 	    offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
220 		ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr);
221 		offset = 0;
222 	}
223 
224 	return ecore_ptt_get_bar_addr(p_ptt) + offset;
225 }
226 
227 struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn	*p_hwfn,
228 					 enum reserved_ptts	ptt_idx)
229 {
230 	if (ptt_idx >= RESERVED_PTT_MAX) {
231 		DP_NOTICE(p_hwfn, true,
232 			  "Requested PTT %d is out of range\n", ptt_idx);
233 		return OSAL_NULL;
234 	}
235 
236 	return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
237 }
238 
239 static bool ecore_is_reg_fifo_empty(struct ecore_hwfn *p_hwfn,
240 				    struct ecore_ptt *p_ptt)
241 {
242 	bool is_empty = true;
243 	u32 bar_addr;
244 
245 	if (!p_hwfn->p_dev->chk_reg_fifo)
246 		goto out;
247 
248 	/* ecore_rd() cannot be used here since it calls this function */
249 	bar_addr = ecore_set_ptt(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA);
250 	is_empty = REG_RD(p_hwfn, bar_addr) == 0;
251 
252 #ifndef ASIC_ONLY
253 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
254 		OSAL_UDELAY(100);
255 #endif
256 
257 out:
258 	return is_empty;
259 }
260 
261 void ecore_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr,
262 	      u32 val)
263 {
264 	bool prev_fifo_err;
265 	u32 bar_addr;
266 
267 	prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
268 
269 	bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
270 	REG_WR(p_hwfn, bar_addr, val);
271 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
272 		   "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
273 		   bar_addr, hw_addr, val);
274 
275 #ifndef ASIC_ONLY
276 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
277 		OSAL_UDELAY(100);
278 #endif
279 
280 	OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
281 		  "reg_fifo error was caused by a call to ecore_wr(0x%x, 0x%x)\n",
282 		  hw_addr, val);
283 }
284 
285 u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
286 {
287 	bool prev_fifo_err;
288 	u32 bar_addr, val;
289 
290 	prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
291 
292 	bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
293 	val = REG_RD(p_hwfn, bar_addr);
294 
295 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
296 		   "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
297 		   bar_addr, hw_addr, val);
298 
299 #ifndef ASIC_ONLY
300 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
301 		OSAL_UDELAY(100);
302 #endif
303 
304 	OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
305 		  "reg_fifo error was caused by a call to ecore_rd(0x%x)\n",
306 		  hw_addr);
307 
308 	return val;
309 }
310 
311 static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
312 			    struct ecore_ptt *p_ptt,
313 			    void *addr,
314 			    u32 hw_addr,
315 			    osal_size_t n,
316 			    bool to_device)
317 {
318 	u32 dw_count, *host_addr, hw_offset;
319 	osal_size_t quota, done = 0;
320 	u32 OSAL_IOMEM *reg_addr;
321 
322 	while (done < n) {
323 		quota = OSAL_MIN_T(osal_size_t, n - done,
324 				   PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
325 
326 		if (IS_PF(p_hwfn->p_dev)) {
327 			ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
328 			hw_offset = ecore_ptt_get_bar_addr(p_ptt);
329 		} else {
330 			hw_offset = hw_addr + done;
331 		}
332 
333 		dw_count = quota / 4;
334 		host_addr = (u32 *)((u8 *)addr + done);
335 		reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset);
336 
337 		if (to_device)
338 			while (dw_count--)
339 				DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++);
340 		else
341 			while (dw_count--)
342 				*host_addr++ = DIRECT_REG_RD(p_hwfn,
343 							     reg_addr++);
344 
345 		done += quota;
346 	}
347 }
348 
349 void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
350 		       struct ecore_ptt *p_ptt,
351 		       void *dest, u32 hw_addr, osal_size_t n)
352 {
353 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
354 		   "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
355 		   hw_addr, dest, hw_addr, (unsigned long) n);
356 
357 	ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
358 }
359 
360 void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
361 		     struct ecore_ptt *p_ptt,
362 		     u32 hw_addr, void *src, osal_size_t n)
363 {
364 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
365 		   "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
366 		   hw_addr, hw_addr, src, (unsigned long)n);
367 
368 	ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
369 }
370 
371 void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
372 		       struct ecore_ptt *p_ptt, u16 fid)
373 {
374 	u16 control = 0;
375 
376 	SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
377 	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
378 
379 	/* Every pretend undos previous pretends, including
380 	 * previous port pretend.
381 	 */
382 	SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
383 	SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
384 	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
385 
386 	if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
387 		fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
388 
389 	p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
390 	p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
391 
392 	REG_WR(p_hwfn,
393 	       ecore_ptt_config_addr(p_ptt) +
394 	       OFFSETOF(struct pxp_ptt_entry, pretend),
395 	       *(u32 *)&p_ptt->pxp.pretend);
396 }
397 
398 void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
399 			struct ecore_ptt *p_ptt, u8 port_id)
400 {
401 	u16 control = 0;
402 
403 	SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
404 	SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
405 	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
406 	p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
407 
408 	REG_WR(p_hwfn,
409 	       ecore_ptt_config_addr(p_ptt) +
410 	       OFFSETOF(struct pxp_ptt_entry, pretend),
411 	       *(u32 *)&p_ptt->pxp.pretend);
412 }
413 
414 void ecore_port_unpretend(struct ecore_hwfn *p_hwfn,
415 			  struct ecore_ptt *p_ptt)
416 {
417 	u16 control = 0;
418 
419 	SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
420 	SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
421 	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
422 
423 	p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
424 
425 	REG_WR(p_hwfn,
426 	       ecore_ptt_config_addr(p_ptt) +
427 	       OFFSETOF(struct pxp_ptt_entry, pretend),
428 	       *(u32 *)&p_ptt->pxp.pretend);
429 }
430 
431 u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
432 {
433 	u32 concrete_fid = 0;
434 
435 	SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
436 	SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
437 	SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
438 
439 	return concrete_fid;
440 }
441 
442 #if 0
443 /* Ecore HW lock
444  * =============
445  * Although the implementation is ready, today we don't have any flow that
446  * utliizes said locks - and we want to keep it this way.
447  * If this changes, this needs to be revisted.
448  */
449 #define HW_LOCK_MAX_RETRIES 1000
450 enum _ecore_status_t ecore_hw_lock(struct ecore_hwfn		*p_hwfn,
451 				   struct ecore_ptt		*p_ptt,
452 				   u8                           resource,
453 				   bool				block)
454 {
455 	u32 cnt, lock_status, hw_lock_cntr_reg;
456 	enum _ecore_status_t ecore_status;
457 
458 	/* Locate the proper lock register for this function.
459 	 * Note This code assumes all the H/W lock registers are sequential
460 	 * in memory.
461 	 */
462 	hw_lock_cntr_reg = MISCS_REG_DRIVER_CONTROL_0 +
463 			   p_hwfn->rel_pf_id *
464 			   MISCS_REG_DRIVER_CONTROL_0_SIZE * sizeof(u32);
465 
466 	/* Validate that the resource is not already taken */
467 	lock_status = ecore_rd(p_hwfn, p_ptt, hw_lock_cntr_reg);
468 
469 	if (lock_status & resource) {
470 		DP_NOTICE(p_hwfn, true,
471 			  "Resource already locked: lock_status=0x%x resource=0x%x\n",
472 			  lock_status, resource);
473 
474 		return ECORE_BUSY;
475 	}
476 
477 	/* Register for the lock */
478 	ecore_wr(p_hwfn, p_ptt, hw_lock_cntr_reg + sizeof(u32), resource);
479 
480 	/* Try for 5 seconds every 5ms */
481 	for (cnt = 0; cnt < HW_LOCK_MAX_RETRIES; cnt++) {
482 		lock_status = ecore_rd(p_hwfn, p_ptt, hw_lock_cntr_reg);
483 
484 		if (lock_status & resource)
485 			return ECORE_SUCCESS;
486 
487 		if (!block) {
488 			ecore_status = ECORE_BUSY;
489 			break;
490 		}
491 
492 		OSAL_MSLEEP(5);
493 	}
494 
495 	if (cnt == HW_LOCK_MAX_RETRIES) {
496 		DP_NOTICE(p_hwfn, true, "Lock timeout resource=0x%x\n",
497 			  resource);
498 		ecore_status = ECORE_TIMEOUT;
499 	}
500 
501 	/* Clear the pending request */
502 	ecore_wr(p_hwfn, p_ptt, hw_lock_cntr_reg, resource);
503 
504 	return ecore_status;
505 }
506 
507 enum _ecore_status_t ecore_hw_unlock(struct ecore_hwfn		*p_hwfn,
508 				     struct ecore_ptt		*p_ptt,
509 				     u8                         resource)
510 {
511 	u32 lock_status, hw_lock_cntr_reg;
512 
513 	/* Locate the proper lock register for this function.
514 	 * Note This code assumes all the H/W lock registers are sequential
515 	 * in memory.
516 	 */
517 	hw_lock_cntr_reg = MISCS_REG_DRIVER_CONTROL_0 +
518 			   p_hwfn->rel_pf_id *
519 			   MISCS_REG_DRIVER_CONTROL_0_SIZE * sizeof(u32);
520 
521 	/*  Validate that the resource is currently taken */
522 	lock_status = ecore_rd(p_hwfn, p_ptt, hw_lock_cntr_reg);
523 
524 	if (!(lock_status & resource)) {
525 		DP_NOTICE(p_hwfn, true,
526 			  "resource 0x%x was not taken (lock status 0x%x)\n",
527 			  resource, lock_status);
528 
529 		return ECORE_NODEV;
530 	}
531 
532 	/* clear lock for resource */
533 	ecore_wr(p_hwfn, p_ptt, hw_lock_cntr_reg, resource);
534 	return ECORE_SUCCESS;
535 }
536 #endif /* HW locks logic */
537 
538 /* DMAE */
539 static void ecore_dmae_opcode(struct ecore_hwfn	*p_hwfn,
540 			      const u8	is_src_type_grc,
541 			      const u8	is_dst_type_grc,
542 			      struct ecore_dmae_params *p_params)
543 {
544 	u16 opcode_b = 0;
545 	u32 opcode = 0;
546 
547 	/* Whether the source is the PCIe or the GRC.
548 	 * 0- The source is the PCIe
549 	 * 1- The source is the GRC.
550 	 */
551 	opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
552 				   : DMAE_CMD_SRC_MASK_PCIE) <<
553 		  DMAE_CMD_SRC_SHIFT;
554 	opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
555 		  DMAE_CMD_SRC_PF_ID_SHIFT;
556 
557 	/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
558 	opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
559 				   : DMAE_CMD_DST_MASK_PCIE) <<
560 		  DMAE_CMD_DST_SHIFT;
561 	opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
562 		  DMAE_CMD_DST_PF_ID_SHIFT;
563 
564 	/* DMAE_E4_TODO need to check which value to specifiy here. */
565 	/* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT;*/
566 
567 	/* Whether to write a completion word to the completion destination:
568 	 * 0-Do not write a completion word
569 	 * 1-Write the completion word
570 	 */
571 	opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
572 	opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK <<
573 		  DMAE_CMD_SRC_ADDR_RESET_SHIFT;
574 
575 	if (p_params->flags & ECORE_DMAE_FLAG_COMPLETION_DST)
576 		opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
577 
578 	/* swapping mode 3 - big endian there should be a define ifdefed in
579 	 * the HSI somewhere. Since it is currently
580 	 */
581 	opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
582 
583 	opcode |= p_hwfn->port_id << DMAE_CMD_PORT_ID_SHIFT;
584 
585 	/* reset source address in next go */
586 	opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK <<
587 		  DMAE_CMD_SRC_ADDR_RESET_SHIFT;
588 
589 	/* reset dest address in next go */
590 	opcode |= DMAE_CMD_DST_ADDR_RESET_MASK <<
591 		  DMAE_CMD_DST_ADDR_RESET_SHIFT;
592 
593 	/* SRC/DST VFID: all 1's - pf, otherwise VF id */
594 	if (p_params->flags & ECORE_DMAE_FLAG_VF_SRC) {
595 		opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
596 		opcode_b |= (p_params->src_vfid <<  DMAE_CMD_SRC_VF_ID_SHIFT);
597 	} else {
598 		opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
599 			     DMAE_CMD_SRC_VF_ID_SHIFT);
600 	}
601 	if (p_params->flags & ECORE_DMAE_FLAG_VF_DST) {
602 		opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
603 		opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
604 	} else {
605 		opcode_b |= DMAE_CMD_DST_VF_ID_MASK <<
606 			    DMAE_CMD_DST_VF_ID_SHIFT;
607 	}
608 
609 	p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode);
610 	p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b);
611 }
612 
613 static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
614 {
615 	OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) !=
616 			  31 * 4);
617 
618 	/* All the DMAE 'go' registers form an array in internal memory */
619 	return DMAE_REG_GO_C0 + (idx << 2);
620 }
621 
622 static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn,
623 						    struct ecore_ptt *p_ptt)
624 {
625 	struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
626 	u8 idx_cmd = p_hwfn->dmae_info.channel, i;
627 	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
628 
629 	/* verify address is not OSAL_NULL */
630 	if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
631 	     ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
632 		DP_NOTICE(p_hwfn, true,
633 			  "source or destination address 0 idx_cmd=%d\n"
634 			  "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
635 			  idx_cmd,
636 			  OSAL_LE32_TO_CPU(p_command->opcode),
637 			  OSAL_LE16_TO_CPU(p_command->opcode_b),
638 			  OSAL_LE16_TO_CPU(p_command->length_dw),
639 			  OSAL_LE32_TO_CPU(p_command->src_addr_hi),
640 			  OSAL_LE32_TO_CPU(p_command->src_addr_lo),
641 			  OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
642 			  OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
643 
644 		return ECORE_INVAL;
645 	}
646 
647 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
648 		   "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
649 		   idx_cmd,
650 		   OSAL_LE32_TO_CPU(p_command->opcode),
651 		   OSAL_LE16_TO_CPU(p_command->opcode_b),
652 		   OSAL_LE16_TO_CPU(p_command->length_dw),
653 		   OSAL_LE32_TO_CPU(p_command->src_addr_hi),
654 		   OSAL_LE32_TO_CPU(p_command->src_addr_lo),
655 		   OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
656 		   OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
657 
658 	/* Copy the command to DMAE - need to do it before every call
659 	 * for source/dest address no reset.
660 	 * The number of commands have been increased to 16 (previous was 14)
661 	 * The first 9 DWs are the command registers, the 10 DW is the
662 	 * GO register, and
663 	 * the rest are result registers (which are read only by the client).
664 	 */
665 	for (i = 0; i < DMAE_CMD_SIZE; i++) {
666 		u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
667 			    *(((u32 *)p_command) + i) : 0;
668 
669 		ecore_wr(p_hwfn, p_ptt,
670 			 DMAE_REG_CMD_MEM +
671 			 (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
672 			 (i * sizeof(u32)), data);
673 	}
674 
675 	ecore_wr(p_hwfn, p_ptt,
676 		 ecore_dmae_idx_to_go_cmd(idx_cmd),
677 		 DMAE_GO_VALUE);
678 
679 	return ecore_status;
680 }
681 
682 enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
683 {
684 	dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
685 	struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
686 	u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
687 	u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
688 
689 	*p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
690 	if (*p_comp == OSAL_NULL) {
691 		DP_NOTICE(p_hwfn, true,
692 			  "Failed to allocate `p_completion_word'\n");
693 		goto err;
694 	}
695 
696 	p_addr =  &p_hwfn->dmae_info.dmae_cmd_phys_addr;
697 	*p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
698 					 sizeof(struct dmae_cmd));
699 	if (*p_cmd == OSAL_NULL) {
700 		DP_NOTICE(p_hwfn, true,
701 			  "Failed to allocate `struct dmae_cmd'\n");
702 		goto err;
703 	}
704 
705 	p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
706 	*p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
707 					  sizeof(u32) * DMAE_MAX_RW_SIZE);
708 	if (*p_buff == OSAL_NULL) {
709 		DP_NOTICE(p_hwfn, true,
710 			  "Failed to allocate `intermediate_buffer'\n");
711 		goto err;
712 	}
713 
714 	p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
715 
716 	return ECORE_SUCCESS;
717 err:
718 	ecore_dmae_info_free(p_hwfn);
719 	return ECORE_NOMEM;
720 }
721 
722 void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
723 {
724 	dma_addr_t p_phys;
725 
726 	/* Just make sure no one is in the middle */
727 	OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
728 
729 	if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
730 		p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
731 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
732 				       p_hwfn->dmae_info.p_completion_word,
733 				       p_phys, sizeof(u32));
734 		p_hwfn->dmae_info.p_completion_word = OSAL_NULL;
735 	}
736 
737 	if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) {
738 		p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
739 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
740 				       p_hwfn->dmae_info.p_dmae_cmd,
741 				       p_phys, sizeof(struct dmae_cmd));
742 		p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL;
743 	}
744 
745 	if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) {
746 		p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
747 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
748 				       p_hwfn->dmae_info.p_intermediate_buffer,
749 				       p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
750 		p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
751 	}
752 
753 	OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
754 }
755 
756 static enum _ecore_status_t
757 ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
758 {
759 	u32 wait_cnt_limit = 10000, wait_cnt = 0;
760 	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
761 
762 #ifndef ASIC_ONLY
763 	u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
764 		      ECORE_EMUL_FACTOR :
765 		      (CHIP_REV_IS_FPGA(p_hwfn->p_dev) ?
766 		       ECORE_FPGA_FACTOR : 1));
767 
768 	wait_cnt_limit *= factor;
769 #endif
770 
771 	/* DMAE_E4_TODO : TODO check if we have to call any other function
772 	 * other than BARRIER to sync the completion_word since we are not
773 	 * using the volatile keyword for this
774 	 */
775 	OSAL_BARRIER(p_hwfn->p_dev);
776 	while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
777 		OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
778 		if (++wait_cnt > wait_cnt_limit) {
779 			DP_NOTICE(p_hwfn->p_dev, ECORE_MSG_HW,
780 				  "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
781 				  *(p_hwfn->dmae_info.p_completion_word),
782 				  DMAE_COMPLETION_VAL);
783 			ecore_status = ECORE_TIMEOUT;
784 			break;
785 		}
786 
787 		/* to sync the completion_word since we are not
788 		 * using the volatile keyword for p_completion_word
789 		 */
790 		OSAL_BARRIER(p_hwfn->p_dev);
791 	}
792 
793 	if (ecore_status == ECORE_SUCCESS)
794 		*p_hwfn->dmae_info.p_completion_word = 0;
795 
796 	return ecore_status;
797 }
798 
799 static enum _ecore_status_t ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
800 							     struct ecore_ptt *p_ptt,
801 							     u64 src_addr,
802 							     u64 dst_addr,
803 							     u8 src_type,
804 							     u8 dst_type,
805 							     u32 length_dw)
806 {
807 	dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
808 	struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
809 	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
810 
811 	switch (src_type) {
812 	case ECORE_DMAE_ADDRESS_GRC:
813 	case ECORE_DMAE_ADDRESS_HOST_PHYS:
814 		cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(src_addr));
815 		cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(src_addr));
816 		break;
817 	/* for virtual source addresses we use the intermediate buffer. */
818 	case ECORE_DMAE_ADDRESS_HOST_VIRT:
819 		cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
820 		cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
821 		OSAL_MEMCPY(&(p_hwfn->dmae_info.p_intermediate_buffer[0]),
822 			    (void *)(osal_uintptr_t)src_addr,
823 			    length_dw * sizeof(u32));
824 		break;
825 	default:
826 		return ECORE_INVAL;
827 	}
828 
829 	switch (dst_type) {
830 	case ECORE_DMAE_ADDRESS_GRC:
831 	case ECORE_DMAE_ADDRESS_HOST_PHYS:
832 		cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(dst_addr));
833 		cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(dst_addr));
834 		break;
835 	/* for virtual destination addresses we use the intermediate buffer. */
836 	case ECORE_DMAE_ADDRESS_HOST_VIRT:
837 		cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
838 		cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
839 		break;
840 	default:
841 		return ECORE_INVAL;
842 	}
843 
844 	cmd->length_dw = OSAL_CPU_TO_LE16((u16)length_dw);
845 	if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
846 	    src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
847 		OSAL_DMA_SYNC(p_hwfn->p_dev,
848 			      (void *)HILO_U64(cmd->src_addr_hi,
849 					       cmd->src_addr_lo),
850 			      length_dw * sizeof(u32), false);
851 
852 	ecore_dmae_post_command(p_hwfn, p_ptt);
853 
854 	ecore_status = ecore_dmae_operation_wait(p_hwfn);
855 
856 	/* TODO - is it true ? */
857 	if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
858 	    src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
859 		OSAL_DMA_SYNC(p_hwfn->p_dev,
860 			      (void *)HILO_U64(cmd->src_addr_hi,
861 					       cmd->src_addr_lo),
862 			      length_dw * sizeof(u32), true);
863 
864 	if (ecore_status != ECORE_SUCCESS) {
865 		DP_NOTICE(p_hwfn, ECORE_MSG_HW,
866 			  "Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x, intermediate buffer 0x%llx.\n",
867 			  (unsigned long long)src_addr,
868 			(unsigned long long)dst_addr, length_dw,
869 			  (unsigned long long)p_hwfn->dmae_info.intermediate_buffer_phys_addr);
870 		return ecore_status;
871 	}
872 
873 	if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT)
874 		OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr),
875 			    &p_hwfn->dmae_info.p_intermediate_buffer[0],
876 			    length_dw * sizeof(u32));
877 
878 	return ECORE_SUCCESS;
879 }
880 
881 static enum _ecore_status_t ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
882 						       struct ecore_ptt *p_ptt,
883 						       u64 src_addr, u64 dst_addr,
884 						       u8 src_type, u8 dst_type,
885 						       u32 size_in_dwords,
886 						       struct ecore_dmae_params *p_params)
887 {
888 	dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
889 	u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
890 	struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
891 	u64 src_addr_split = 0, dst_addr_split = 0;
892 	u16 length_limit = DMAE_MAX_RW_SIZE;
893 	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
894 	u32 offset = 0;
895 
896 	if (p_hwfn->p_dev->recov_in_prog) {
897 		DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
898 			   "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n",
899 			   (unsigned long long)src_addr, src_type, (unsigned long long)dst_addr, dst_type,
900 			   size_in_dwords);
901 		/* Return success to let the flow to be completed successfully
902 		 * w/o any error handling.
903 		 */
904 		return ECORE_SUCCESS;
905 	}
906 
907 	ecore_dmae_opcode(p_hwfn,
908 			  (src_type == ECORE_DMAE_ADDRESS_GRC),
909 			  (dst_type == ECORE_DMAE_ADDRESS_GRC),
910 			  p_params);
911 
912 	cmd->comp_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
913 	cmd->comp_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
914 	cmd->comp_val = OSAL_CPU_TO_LE32(DMAE_COMPLETION_VAL);
915 
916 	/* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
917 	cnt_split = size_in_dwords / length_limit;
918 	length_mod = size_in_dwords % length_limit;
919 
920 	src_addr_split = src_addr;
921 	dst_addr_split = dst_addr;
922 
923 	for (i = 0; i <= cnt_split; i++) {
924 		offset = length_limit * i;
925 
926 		if (!(p_params->flags & ECORE_DMAE_FLAG_RW_REPL_SRC)) {
927 			if (src_type == ECORE_DMAE_ADDRESS_GRC)
928 				src_addr_split = src_addr + offset;
929 			else
930 				src_addr_split = src_addr + (offset*4);
931 		}
932 
933 		if (dst_type == ECORE_DMAE_ADDRESS_GRC)
934 			dst_addr_split = dst_addr + offset;
935 		else
936 			dst_addr_split = dst_addr + (offset*4);
937 
938 		length_cur = (cnt_split == i) ? length_mod : length_limit;
939 
940 		/* might be zero on last iteration */
941 		if (!length_cur)
942 			continue;
943 
944 		ecore_status = ecore_dmae_execute_sub_operation(p_hwfn,
945 								p_ptt,
946 								src_addr_split,
947 								dst_addr_split,
948 								src_type,
949 								dst_type,
950 								length_cur);
951 		if (ecore_status != ECORE_SUCCESS) {
952 			DP_NOTICE(p_hwfn, false,
953 				  "ecore_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
954 				  ecore_status, (unsigned long long)src_addr, (unsigned long long)dst_addr, length_cur);
955 
956 			ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL);
957 			break;
958 		}
959 	}
960 
961 	return ecore_status;
962 }
963 
964 enum _ecore_status_t ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
965 					 struct ecore_ptt *p_ptt,
966 					 u64 source_addr,
967 					 u32 grc_addr,
968 					 u32 size_in_dwords,
969 					 u32 flags)
970 {
971 	u32 grc_addr_in_dw = grc_addr / sizeof(u32);
972 	struct ecore_dmae_params params;
973 	enum _ecore_status_t rc;
974 
975 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
976 	params.flags = flags;
977 
978 	OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
979 
980 	rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
981 					grc_addr_in_dw,
982 					ECORE_DMAE_ADDRESS_HOST_VIRT,
983 					ECORE_DMAE_ADDRESS_GRC,
984 					size_in_dwords, &params);
985 
986 	OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
987 
988 	return rc;
989 }
990 
991 enum _ecore_status_t ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
992 					 struct ecore_ptt *p_ptt,
993 					 u32 grc_addr,
994 					 dma_addr_t dest_addr,
995 					 u32 size_in_dwords,
996 					 u32 flags)
997 {
998 	u32 grc_addr_in_dw = grc_addr / sizeof(u32);
999 	struct ecore_dmae_params params;
1000 	enum _ecore_status_t rc;
1001 
1002 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
1003 	params.flags = flags;
1004 
1005 	OSAL_MUTEX_ACQUIRE(&(p_hwfn->dmae_info.mutex));
1006 
1007 	rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
1008 					dest_addr, ECORE_DMAE_ADDRESS_GRC,
1009 					ECORE_DMAE_ADDRESS_HOST_VIRT,
1010 					size_in_dwords, &params);
1011 
1012 	OSAL_MUTEX_RELEASE(&(p_hwfn->dmae_info.mutex));
1013 
1014 	return rc;
1015 }
1016 
1017 enum _ecore_status_t ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
1018 					  struct ecore_ptt *p_ptt,
1019 					  dma_addr_t source_addr,
1020 					  dma_addr_t dest_addr,
1021 					  u32 size_in_dwords,
1022 					  struct ecore_dmae_params *p_params)
1023 {
1024 	enum _ecore_status_t rc;
1025 
1026 	OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
1027 
1028 	rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
1029 					dest_addr,
1030 					ECORE_DMAE_ADDRESS_HOST_PHYS,
1031 					ECORE_DMAE_ADDRESS_HOST_PHYS,
1032 					size_in_dwords,
1033 					p_params);
1034 
1035 	OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
1036 
1037 	return rc;
1038 }
1039 
1040 void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
1041 			 enum ecore_hw_err_type err_type)
1042 {
1043 	/* Fan failure cannot be masked by handling of another HW error */
1044 	if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) {
1045 		DP_VERBOSE(p_hwfn, ECORE_MSG_DRV,
1046 			   "Recovery is in progress. Avoid notifying about HW error %d.\n",
1047 			   err_type);
1048 		return;
1049 	}
1050 
1051 	OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
1052 }
1053