xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_hw.c (revision 685dc743dc3b5645e34836464128e1c0558b404b)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File : ecore_hw.c
30  */
31 #include <sys/cdefs.h>
32 #include "bcm_osal.h"
33 #include "ecore_hsi_common.h"
34 #include "ecore_status.h"
35 #include "ecore.h"
36 #include "ecore_hw.h"
37 #include "reg_addr.h"
38 #include "ecore_utils.h"
39 #include "ecore_iov_api.h"
40 
41 #ifdef _NTDDK_
42 #pragma warning(push)
43 #pragma warning(disable : 28167)
44 #pragma warning(disable : 28123)
45 #pragma warning(disable : 28121)
46 #endif
47 
48 #ifndef ASIC_ONLY
49 #define ECORE_EMUL_FACTOR 2000
50 #define ECORE_FPGA_FACTOR 200
51 #endif
52 
53 #define ECORE_BAR_ACQUIRE_TIMEOUT 1000
54 
55 /* Invalid values */
56 #define ECORE_BAR_INVALID_OFFSET	(OSAL_CPU_TO_LE32(-1))
57 
58 struct ecore_ptt {
59 	osal_list_entry_t	list_entry;
60 	unsigned int		idx;
61 	struct pxp_ptt_entry	pxp;
62 	u8			hwfn_id;
63 };
64 
65 struct ecore_ptt_pool {
66 	osal_list_t		free_list;
67 	osal_spinlock_t		lock; /* ptt synchronized access */
68 	struct ecore_ptt	ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
69 };
70 
__ecore_ptt_pool_free(struct ecore_hwfn * p_hwfn)71 static void __ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
72 {
73 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
74 	p_hwfn->p_ptt_pool = OSAL_NULL;
75 }
76 
ecore_ptt_pool_alloc(struct ecore_hwfn * p_hwfn)77 enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
78 {
79 	struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev,
80 						   GFP_KERNEL,
81 						   sizeof(*p_pool));
82 	int i;
83 
84 	if (!p_pool)
85 		return ECORE_NOMEM;
86 
87 	OSAL_LIST_INIT(&p_pool->free_list);
88 	for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
89 		p_pool->ptts[i].idx = i;
90 		p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET;
91 		p_pool->ptts[i].pxp.pretend.control = 0;
92 		p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
93 
94 		/* There are special PTT entries that are taken only by design.
95 		 * The rest are added ot the list for general usage.
96 		 */
97 		if (i >= RESERVED_PTT_MAX)
98 			OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry,
99 					    &p_pool->free_list);
100 	}
101 
102 	p_hwfn->p_ptt_pool = p_pool;
103 #ifdef CONFIG_ECORE_LOCK_ALLOC
104 	if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock)) {
105 		__ecore_ptt_pool_free(p_hwfn);
106 		return ECORE_NOMEM;
107 	}
108 #endif
109 	OSAL_SPIN_LOCK_INIT(&p_pool->lock);
110 	return ECORE_SUCCESS;
111 }
112 
ecore_ptt_invalidate(struct ecore_hwfn * p_hwfn)113 void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
114 {
115 	struct ecore_ptt *p_ptt;
116 	int i;
117 
118 	for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
119 		p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
120 		p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET;
121 	}
122 }
123 
ecore_ptt_pool_free(struct ecore_hwfn * p_hwfn)124 void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
125 {
126 #ifdef CONFIG_ECORE_LOCK_ALLOC
127 	if (p_hwfn->p_ptt_pool)
128 		OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
129 #endif
130 	__ecore_ptt_pool_free(p_hwfn);
131 }
132 
ecore_ptt_acquire(struct ecore_hwfn * p_hwfn)133 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
134 {
135 	struct ecore_ptt *p_ptt;
136 	unsigned int i;
137 
138 	/* Take the free PTT from the list */
139 	for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
140 		OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
141 
142 		if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list)) {
143 			p_ptt = OSAL_LIST_FIRST_ENTRY(&p_hwfn->p_ptt_pool->free_list,
144 						      struct ecore_ptt, list_entry);
145 			OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
146 					       &p_hwfn->p_ptt_pool->free_list);
147 
148 			OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
149 
150 			DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
151 				   "allocated ptt %d\n", p_ptt->idx);
152 
153 			return p_ptt;
154 		}
155 
156 		OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
157 		OSAL_MSLEEP(1);
158 	}
159 
160 	DP_NOTICE(p_hwfn, true, "PTT acquire timeout - failed to allocate PTT\n");
161 	return OSAL_NULL;
162 }
163 
ecore_ptt_release(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)164 void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
165 		       struct ecore_ptt *p_ptt) {
166 	/* This PTT should not be set to pretend if it is being released */
167 	/* TODO - add some pretend sanity checks, to make sure pretend isn't set on this ptt */
168 
169 	OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
170 	OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
171 	OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
172 }
173 
ecore_ptt_get_hw_addr(struct ecore_ptt * p_ptt)174 static u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt)
175 {
176 	/* The HW is using DWORDS and we need to translate it to Bytes */
177 	return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
178 }
179 
ecore_ptt_config_addr(struct ecore_ptt * p_ptt)180 static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
181 {
182 	return PXP_PF_WINDOW_ADMIN_PER_PF_START +
183 	       p_ptt->idx * sizeof(struct pxp_ptt_entry);
184 }
185 
ecore_ptt_get_bar_addr(struct ecore_ptt * p_ptt)186 u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt)
187 {
188 	return PXP_EXTERNAL_BAR_PF_WINDOW_START +
189 	       p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
190 }
191 
ecore_ptt_set_win(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 new_hw_addr)192 void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
193 		       struct ecore_ptt *p_ptt,
194 		       u32 new_hw_addr)
195 {
196 	u32 prev_hw_addr;
197 
198 	prev_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
199 
200 	if (new_hw_addr == prev_hw_addr)
201 		return;
202 
203 	/* Update PTT entery in admin window */
204 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
205 		   "Updating PTT entry %d to offset 0x%x\n",
206 		   p_ptt->idx, new_hw_addr);
207 
208 	/* The HW is using DWORDS and the address is in Bytes */
209 	p_ptt->pxp.offset = OSAL_CPU_TO_LE32(new_hw_addr >> 2);
210 
211 	REG_WR(p_hwfn,
212 	       ecore_ptt_config_addr(p_ptt) +
213 	       OFFSETOF(struct pxp_ptt_entry, offset),
214 	       OSAL_LE32_TO_CPU(p_ptt->pxp.offset));
215 }
216 
ecore_set_ptt(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 hw_addr)217 static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
218 			 struct ecore_ptt *p_ptt,
219 			 u32 hw_addr)
220 {
221 	u32 win_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
222 	u32 offset;
223 
224 	offset = hw_addr - win_hw_addr;
225 
226 	if (p_ptt->hwfn_id != p_hwfn->my_id)
227 		DP_NOTICE(p_hwfn, true,
228 			  "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
229 			  p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
230 
231 	/* Verify the address is within the window */
232 	if (hw_addr < win_hw_addr ||
233 	    offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
234 		ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr);
235 		offset = 0;
236 	}
237 
238 	return ecore_ptt_get_bar_addr(p_ptt) + offset;
239 }
240 
ecore_get_reserved_ptt(struct ecore_hwfn * p_hwfn,enum reserved_ptts ptt_idx)241 struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
242 					 enum reserved_ptts ptt_idx)
243 {
244 	if (ptt_idx >= RESERVED_PTT_MAX) {
245 		DP_NOTICE(p_hwfn, true,
246 			  "Requested PTT %d is out of range\n", ptt_idx);
247 		return OSAL_NULL;
248 	}
249 
250 	return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
251 }
252 
ecore_is_reg_fifo_empty(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)253 static bool ecore_is_reg_fifo_empty(struct ecore_hwfn *p_hwfn,
254 				    struct ecore_ptt *p_ptt)
255 {
256 	bool is_empty = true;
257 	u32 bar_addr;
258 
259 	if (!p_hwfn->p_dev->chk_reg_fifo)
260 		goto out;
261 
262 	/* ecore_rd() cannot be used here since it calls this function */
263 	bar_addr = ecore_set_ptt(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA);
264 	is_empty = REG_RD(p_hwfn, bar_addr) == 0;
265 
266 #ifndef ASIC_ONLY
267 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
268 		OSAL_UDELAY(100);
269 #endif
270 
271 out:
272 	return is_empty;
273 }
274 
ecore_wr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 hw_addr,u32 val)275 void ecore_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr,
276 	      u32 val)
277 {
278 	bool prev_fifo_err;
279 	u32 bar_addr;
280 
281 	prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
282 
283 	bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
284 	REG_WR(p_hwfn, bar_addr, val);
285 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
286 		   "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
287 		   bar_addr, hw_addr, val);
288 
289 #ifndef ASIC_ONLY
290 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
291 		OSAL_UDELAY(100);
292 #endif
293 
294 	OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
295 		  "reg_fifo error was caused by a call to ecore_wr(0x%x, 0x%x)\n",
296 		  hw_addr, val);
297 }
298 
ecore_rd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 hw_addr)299 u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
300 {
301 	bool prev_fifo_err;
302 	u32 bar_addr, val;
303 
304 	prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
305 
306 	bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
307 	val = REG_RD(p_hwfn, bar_addr);
308 
309 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
310 		   "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
311 		   bar_addr, hw_addr, val);
312 
313 #ifndef ASIC_ONLY
314 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
315 		OSAL_UDELAY(100);
316 #endif
317 
318 	OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
319 		  "reg_fifo error was caused by a call to ecore_rd(0x%x)\n",
320 		  hw_addr);
321 
322 	return val;
323 }
324 
ecore_memcpy_hw(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,void * addr,u32 hw_addr,osal_size_t n,bool to_device)325 static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
326 			    struct ecore_ptt *p_ptt,
327 			    void *addr,
328 			    u32 hw_addr,
329 			    osal_size_t n,
330 			    bool to_device)
331 {
332 	u32 dw_count, *host_addr, hw_offset;
333 	osal_size_t quota, done = 0;
334 	u32 OSAL_IOMEM *reg_addr;
335 
336 	while (done < n) {
337 		quota = OSAL_MIN_T(osal_size_t, n - done,
338 				   PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
339 
340 		if (IS_PF(p_hwfn->p_dev)) {
341 			ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
342 			hw_offset = ecore_ptt_get_bar_addr(p_ptt);
343 		} else {
344 			hw_offset = hw_addr + done;
345 		}
346 
347 		dw_count = quota / 4;
348 		host_addr = (u32 *)((u8 *)addr + done);
349 		reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset);
350 
351 		if (to_device)
352 			while (dw_count--)
353 				DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++);
354 		else
355 			while (dw_count--)
356 				*host_addr++ = DIRECT_REG_RD(p_hwfn,
357 							     reg_addr++);
358 
359 		done += quota;
360 	}
361 }
362 
ecore_memcpy_from(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,void * dest,u32 hw_addr,osal_size_t n)363 void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
364 		       struct ecore_ptt *p_ptt,
365 		       void *dest, u32 hw_addr, osal_size_t n)
366 {
367 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
368 		   "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
369 		   hw_addr, dest, hw_addr, (unsigned long) n);
370 
371 	ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
372 }
373 
ecore_memcpy_to(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 hw_addr,void * src,osal_size_t n)374 void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
375 		     struct ecore_ptt *p_ptt,
376 		     u32 hw_addr, void *src, osal_size_t n)
377 {
378 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
379 		   "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
380 		   hw_addr, hw_addr, src, (unsigned long)n);
381 
382 	ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
383 }
384 
ecore_fid_pretend(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 fid)385 void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
386 		       struct ecore_ptt *p_ptt, u16 fid)
387 {
388 	u16 control = 0;
389 
390 	SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
391 	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
392 
393 	/* Every pretend undos previous pretends, including
394 	 * previous port pretend.
395 	 */
396 	SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
397 	SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
398 	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
399 
400 	if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
401 		fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
402 
403 	p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
404 	p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
405 
406 	REG_WR(p_hwfn,
407 	       ecore_ptt_config_addr(p_ptt) +
408 	       OFFSETOF(struct pxp_ptt_entry, pretend),
409 	       *(u32 *)&p_ptt->pxp.pretend);
410 }
411 
ecore_port_pretend(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 port_id)412 void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
413 			struct ecore_ptt *p_ptt, u8 port_id)
414 {
415 	u16 control = 0;
416 
417 	SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
418 	SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
419 	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
420 	p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
421 
422 	REG_WR(p_hwfn,
423 	       ecore_ptt_config_addr(p_ptt) +
424 	       OFFSETOF(struct pxp_ptt_entry, pretend),
425 	       *(u32 *)&p_ptt->pxp.pretend);
426 }
427 
ecore_port_unpretend(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)428 void ecore_port_unpretend(struct ecore_hwfn *p_hwfn,
429 			  struct ecore_ptt *p_ptt)
430 {
431 	u16 control = 0;
432 
433 	SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
434 	SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
435 	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
436 
437 	p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
438 
439 	REG_WR(p_hwfn,
440 	       ecore_ptt_config_addr(p_ptt) +
441 	       OFFSETOF(struct pxp_ptt_entry, pretend),
442 	       *(u32 *)&p_ptt->pxp.pretend);
443 }
444 
ecore_vfid_to_concrete(struct ecore_hwfn * p_hwfn,u8 vfid)445 u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
446 {
447 	u32 concrete_fid = 0;
448 
449 	SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
450 	SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
451 	SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
452 
453 	return concrete_fid;
454 }
455 
456 #if 0
457 /* Ecore HW lock
458  * =============
459  * Although the implemention is ready, today we don't have any flow that
460  * utliizes said locks - and we want to keep it this way.
461  * If this changes, this needs to be revisted.
462  */
463 #define HW_LOCK_MAX_RETRIES 1000
464 enum _ecore_status_t ecore_hw_lock(struct ecore_hwfn		*p_hwfn,
465 				   struct ecore_ptt		*p_ptt,
466 				   u8                           resource,
467 				   bool				block)
468 {
469 	u32 cnt, lock_status, hw_lock_cntr_reg;
470 	enum _ecore_status_t ecore_status;
471 
472 	/* Locate the proper lock register for this function.
473 	 * Note This code assumes all the H/W lock registers are sequential
474 	 * in memory.
475 	 */
476 	hw_lock_cntr_reg = MISCS_REG_DRIVER_CONTROL_0 +
477 			   p_hwfn->rel_pf_id *
478 			   MISCS_REG_DRIVER_CONTROL_0_SIZE * sizeof(u32);
479 
480 	/* Validate that the resource is not already taken */
481 	lock_status = ecore_rd(p_hwfn, p_ptt, hw_lock_cntr_reg);
482 
483 	if (lock_status & resource) {
484 		DP_NOTICE(p_hwfn, true,
485 			  "Resource already locked: lock_status=0x%x resource=0x%x\n",
486 			  lock_status, resource);
487 
488 		return ECORE_BUSY;
489 	}
490 
491 	/* Register for the lock */
492 	ecore_wr(p_hwfn, p_ptt, hw_lock_cntr_reg + sizeof(u32), resource);
493 
494 	/* Try for 5 seconds every 5ms */
495 	for (cnt = 0; cnt < HW_LOCK_MAX_RETRIES; cnt++) {
496 		lock_status = ecore_rd(p_hwfn, p_ptt, hw_lock_cntr_reg);
497 
498 		if (lock_status & resource)
499 			return ECORE_SUCCESS;
500 
501 		if (!block) {
502 			ecore_status = ECORE_BUSY;
503 			break;
504 		}
505 
506 		OSAL_MSLEEP(5);
507 	}
508 
509 	if (cnt == HW_LOCK_MAX_RETRIES) {
510 		DP_NOTICE(p_hwfn, true, "Lock timeout resource=0x%x\n",
511 			  resource);
512 		ecore_status = ECORE_TIMEOUT;
513 	}
514 
515 	/* Clear the pending request */
516 	ecore_wr(p_hwfn, p_ptt, hw_lock_cntr_reg, resource);
517 
518 	return ecore_status;
519 }
520 
521 enum _ecore_status_t ecore_hw_unlock(struct ecore_hwfn		*p_hwfn,
522 				     struct ecore_ptt		*p_ptt,
523 				     u8                         resource)
524 {
525 	u32 lock_status, hw_lock_cntr_reg;
526 
527 	/* Locate the proper lock register for this function.
528 	 * Note This code assumes all the H/W lock registers are sequential
529 	 * in memory.
530 	 */
531 	hw_lock_cntr_reg = MISCS_REG_DRIVER_CONTROL_0 +
532 			   p_hwfn->rel_pf_id *
533 			   MISCS_REG_DRIVER_CONTROL_0_SIZE * sizeof(u32);
534 
535 	/*  Validate that the resource is currently taken */
536 	lock_status = ecore_rd(p_hwfn, p_ptt, hw_lock_cntr_reg);
537 
538 	if (!(lock_status & resource)) {
539 		DP_NOTICE(p_hwfn, true,
540 			  "resource 0x%x was not taken (lock status 0x%x)\n",
541 			  resource, lock_status);
542 
543 		return ECORE_NODEV;
544 	}
545 
546 	/* clear lock for resource */
547 	ecore_wr(p_hwfn, p_ptt, hw_lock_cntr_reg, resource);
548 	return ECORE_SUCCESS;
549 }
550 #endif /* HW locks logic */
551 
552 /* DMAE */
553 
554 #define ECORE_DMAE_FLAGS_IS_SET(params, flag)	\
555 	((params) != OSAL_NULL && ((params)->flags & ECORE_DMAE_FLAG_##flag))
556 
ecore_dmae_opcode(struct ecore_hwfn * p_hwfn,const u8 is_src_type_grc,const u8 is_dst_type_grc,struct ecore_dmae_params * p_params)557 static void ecore_dmae_opcode(struct ecore_hwfn	*p_hwfn,
558 			      const u8	is_src_type_grc,
559 			      const u8	is_dst_type_grc,
560 			      struct ecore_dmae_params *p_params)
561 {
562 	u8 src_pfid, dst_pfid, port_id;
563 	u16 opcode_b = 0;
564 	u32 opcode = 0;
565 
566 	/* Whether the source is the PCIe or the GRC.
567 	 * 0- The source is the PCIe
568 	 * 1- The source is the GRC.
569 	 */
570 	opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
571 				   : DMAE_CMD_SRC_MASK_PCIE) <<
572 		  DMAE_CMD_SRC_SHIFT;
573 	src_pfid = ECORE_DMAE_FLAGS_IS_SET(p_params, PF_SRC) ?
574 		   p_params->src_pfid : p_hwfn->rel_pf_id;
575 	opcode |= (src_pfid & DMAE_CMD_SRC_PF_ID_MASK) <<
576 		  DMAE_CMD_SRC_PF_ID_SHIFT;
577 
578 	/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
579 	opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
580 				   : DMAE_CMD_DST_MASK_PCIE) <<
581 		  DMAE_CMD_DST_SHIFT;
582 	dst_pfid = ECORE_DMAE_FLAGS_IS_SET(p_params, PF_DST) ?
583 		   p_params->dst_pfid : p_hwfn->rel_pf_id;
584 	opcode |= (dst_pfid & DMAE_CMD_DST_PF_ID_MASK) <<
585 		  DMAE_CMD_DST_PF_ID_SHIFT;
586 
587 	/* DMAE_E4_TODO need to check which value to specify here. */
588 	/* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT;*/
589 
590 	/* Whether to write a completion word to the completion destination:
591 	 * 0-Do not write a completion word
592 	 * 1-Write the completion word
593 	 */
594 	opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
595 	opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK <<
596 		  DMAE_CMD_SRC_ADDR_RESET_SHIFT;
597 
598 	if (ECORE_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST))
599 		opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
600 
601 	/* swapping mode 3 - big endian there should be a define ifdefed in
602 	 * the HSI somewhere. Since it is currently
603 	 */
604 	opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
605 
606 	port_id = (ECORE_DMAE_FLAGS_IS_SET(p_params, PORT)) ?
607 		  p_params->port_id : p_hwfn->port_id;
608 	opcode |= port_id << DMAE_CMD_PORT_ID_SHIFT;
609 
610 	/* reset source address in next go */
611 	opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK <<
612 		  DMAE_CMD_SRC_ADDR_RESET_SHIFT;
613 
614 	/* reset dest address in next go */
615 	opcode |= DMAE_CMD_DST_ADDR_RESET_MASK <<
616 		  DMAE_CMD_DST_ADDR_RESET_SHIFT;
617 
618 	/* SRC/DST VFID: all 1's - pf, otherwise VF id */
619 	if (ECORE_DMAE_FLAGS_IS_SET(p_params, VF_SRC)) {
620 		opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
621 		opcode_b |= (p_params->src_vfid <<  DMAE_CMD_SRC_VF_ID_SHIFT);
622 	} else {
623 		opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
624 			     DMAE_CMD_SRC_VF_ID_SHIFT);
625 	}
626 	if (ECORE_DMAE_FLAGS_IS_SET(p_params, VF_DST)) {
627 		opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
628 		opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
629 	} else {
630 		opcode_b |= DMAE_CMD_DST_VF_ID_MASK <<
631 			    DMAE_CMD_DST_VF_ID_SHIFT;
632 	}
633 
634 	p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode);
635 	p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b);
636 }
637 
ecore_dmae_idx_to_go_cmd(u8 idx)638 static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
639 {
640 	OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) !=
641 			  31 * 4);
642 
643 	/* All the DMAE 'go' registers form an array in internal memory */
644 	return DMAE_REG_GO_C0 + (idx << 2);
645 }
646 
ecore_dmae_post_command(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)647 static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn,
648 						    struct ecore_ptt *p_ptt)
649 {
650 	struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
651 	u8 idx_cmd = p_hwfn->dmae_info.channel, i;
652 	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
653 
654 	/* verify address is not OSAL_NULL */
655 	if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
656 	     ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
657 		DP_NOTICE(p_hwfn, true,
658 			  "source or destination address 0 idx_cmd=%d\n"
659 			  "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
660 			  idx_cmd,
661 			  OSAL_LE32_TO_CPU(p_command->opcode),
662 			  OSAL_LE16_TO_CPU(p_command->opcode_b),
663 			  OSAL_LE16_TO_CPU(p_command->length_dw),
664 			  OSAL_LE32_TO_CPU(p_command->src_addr_hi),
665 			  OSAL_LE32_TO_CPU(p_command->src_addr_lo),
666 			  OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
667 			  OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
668 
669 		return ECORE_INVAL;
670 	}
671 
672 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
673 		   "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
674 		   idx_cmd,
675 		   OSAL_LE32_TO_CPU(p_command->opcode),
676 		   OSAL_LE16_TO_CPU(p_command->opcode_b),
677 		   OSAL_LE16_TO_CPU(p_command->length_dw),
678 		   OSAL_LE32_TO_CPU(p_command->src_addr_hi),
679 		   OSAL_LE32_TO_CPU(p_command->src_addr_lo),
680 		   OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
681 		   OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
682 
683 	/* Copy the command to DMAE - need to do it before every call
684 	 * for source/dest address no reset.
685 	 * The number of commands have been increased to 16 (previous was 14)
686 	 * The first 9 DWs are the command registers, the 10 DW is the
687 	 * GO register, and
688 	 * the rest are result registers (which are read only by the client).
689 	 */
690 	for (i = 0; i < DMAE_CMD_SIZE; i++) {
691 		u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
692 			    *(((u32 *)p_command) + i) : 0;
693 
694 		ecore_wr(p_hwfn, p_ptt,
695 			 DMAE_REG_CMD_MEM +
696 			 (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
697 			 (i * sizeof(u32)), data);
698 	}
699 
700 	ecore_wr(p_hwfn, p_ptt,
701 		 ecore_dmae_idx_to_go_cmd(idx_cmd),
702 		 DMAE_GO_VALUE);
703 
704 	return ecore_status;
705 }
706 
ecore_dmae_info_alloc(struct ecore_hwfn * p_hwfn)707 enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
708 {
709 	dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
710 	struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
711 	u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
712 	u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
713 
714 	*p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
715 	if (*p_comp == OSAL_NULL) {
716 		DP_NOTICE(p_hwfn, false,
717 			  "Failed to allocate `p_completion_word'\n");
718 		goto err;
719 	}
720 
721 	p_addr =  &p_hwfn->dmae_info.dmae_cmd_phys_addr;
722 	*p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
723 					 sizeof(struct dmae_cmd));
724 	if (*p_cmd == OSAL_NULL) {
725 		DP_NOTICE(p_hwfn, false,
726 			  "Failed to allocate `struct dmae_cmd'\n");
727 		goto err;
728 	}
729 
730 	p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
731 	*p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
732 					  sizeof(u32) * DMAE_MAX_RW_SIZE);
733 	if (*p_buff == OSAL_NULL) {
734 		DP_NOTICE(p_hwfn, false,
735 			  "Failed to allocate `intermediate_buffer'\n");
736 		goto err;
737 	}
738 
739 		p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
740 		p_hwfn->dmae_info.b_mem_ready = true;
741 
742 	return ECORE_SUCCESS;
743 err:
744 	ecore_dmae_info_free(p_hwfn);
745 	return ECORE_NOMEM;
746 }
747 
ecore_dmae_info_free(struct ecore_hwfn * p_hwfn)748 void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
749 {
750 	dma_addr_t p_phys;
751 
752 	OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
753 	p_hwfn->dmae_info.b_mem_ready = false;
754 	OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
755 
756 	if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
757 		p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
758 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
759 				       p_hwfn->dmae_info.p_completion_word,
760 				       p_phys, sizeof(u32));
761 		p_hwfn->dmae_info.p_completion_word = OSAL_NULL;
762 	}
763 
764 	if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) {
765 		p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
766 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
767 				       p_hwfn->dmae_info.p_dmae_cmd,
768 				       p_phys, sizeof(struct dmae_cmd));
769 		p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL;
770 	}
771 
772 	if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) {
773 		p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
774 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
775 				       p_hwfn->dmae_info.p_intermediate_buffer,
776 				       p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
777 		p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
778 	}
779 }
780 
781 static enum _ecore_status_t
ecore_dmae_operation_wait(struct ecore_hwfn * p_hwfn)782 ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
783 {
784 	u32 wait_cnt_limit = 10000, wait_cnt = 0;
785 	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
786 
787 #ifndef ASIC_ONLY
788 	u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
789 		      ECORE_EMUL_FACTOR :
790 		      (CHIP_REV_IS_FPGA(p_hwfn->p_dev) ?
791 		       ECORE_FPGA_FACTOR : 1));
792 
793 	wait_cnt_limit *= factor;
794 #endif
795 
796 	/* DMAE_E4_TODO : TODO check if we have to call any other function
797 	 * other than BARRIER to sync the completion_word since we are not
798 	 * using the volatile keyword for this
799 	 */
800 	OSAL_BARRIER(p_hwfn->p_dev);
801 	while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
802 		OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
803 		if (++wait_cnt > wait_cnt_limit) {
804 			DP_NOTICE(p_hwfn->p_dev, ECORE_MSG_HW,
805 				  "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
806 				  *(p_hwfn->dmae_info.p_completion_word),
807 				  DMAE_COMPLETION_VAL);
808 			ecore_status = ECORE_TIMEOUT;
809 			break;
810 		}
811 
812 		/* to sync the completion_word since we are not
813 		 * using the volatile keyword for p_completion_word
814 		 */
815 		OSAL_BARRIER(p_hwfn->p_dev);
816 	}
817 
818 	if (ecore_status == ECORE_SUCCESS)
819 		*p_hwfn->dmae_info.p_completion_word = 0;
820 
821 	return ecore_status;
822 }
823 
ecore_dmae_execute_sub_operation(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u64 src_addr,u64 dst_addr,u8 src_type,u8 dst_type,u32 length_dw)824 static enum _ecore_status_t ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
825 							     struct ecore_ptt *p_ptt,
826 							     u64 src_addr,
827 							     u64 dst_addr,
828 							     u8 src_type,
829 							     u8 dst_type,
830 							     u32 length_dw)
831 {
832 	dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
833 	struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
834 	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
835 
836 	switch (src_type) {
837 	case ECORE_DMAE_ADDRESS_GRC:
838 	case ECORE_DMAE_ADDRESS_HOST_PHYS:
839 		cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(src_addr));
840 		cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(src_addr));
841 		break;
842 	/* for virtual source addresses we use the intermediate buffer. */
843 	case ECORE_DMAE_ADDRESS_HOST_VIRT:
844 		cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
845 		cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
846 		OSAL_MEMCPY(&(p_hwfn->dmae_info.p_intermediate_buffer[0]),
847 			    (void *)(osal_uintptr_t)src_addr,
848 			    length_dw * sizeof(u32));
849 		break;
850 	default:
851 		return ECORE_INVAL;
852 	}
853 
854 	switch (dst_type) {
855 	case ECORE_DMAE_ADDRESS_GRC:
856 	case ECORE_DMAE_ADDRESS_HOST_PHYS:
857 		cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(dst_addr));
858 		cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(dst_addr));
859 		break;
860 	/* for virtual destination addresses we use the intermediate buffer. */
861 	case ECORE_DMAE_ADDRESS_HOST_VIRT:
862 		cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
863 		cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
864 		break;
865 	default:
866 		return ECORE_INVAL;
867 	}
868 
869 	cmd->length_dw = OSAL_CPU_TO_LE16((u16)length_dw);
870 #ifndef __EXTRACT__LINUX__
871 	if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
872 	    src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
873 		OSAL_DMA_SYNC(p_hwfn->p_dev,
874 			      (void *)HILO_U64(cmd->src_addr_hi,
875 					       cmd->src_addr_lo),
876 			      length_dw * sizeof(u32), false);
877 #endif
878 
879 	ecore_dmae_post_command(p_hwfn, p_ptt);
880 
881 	ecore_status = ecore_dmae_operation_wait(p_hwfn);
882 
883 #ifndef __EXTRACT__LINUX__
884 	/* TODO - is it true ? */
885 	if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
886 	    src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
887 		OSAL_DMA_SYNC(p_hwfn->p_dev,
888 			      (void *)HILO_U64(cmd->src_addr_hi,
889 					       cmd->src_addr_lo),
890 			      length_dw * sizeof(u32), true);
891 #endif
892 
893 	if (ecore_status != ECORE_SUCCESS) {
894 		DP_NOTICE(p_hwfn, ECORE_MSG_HW,
895 			  "Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x, intermediate buffer 0x%llx.\n",
896 			  (unsigned long long)src_addr, (unsigned long long)dst_addr, length_dw,
897 			  (unsigned long long)p_hwfn->dmae_info.intermediate_buffer_phys_addr);
898 		return ecore_status;
899 	}
900 
901 	if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT)
902 		OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr),
903 			    &p_hwfn->dmae_info.p_intermediate_buffer[0],
904 			    length_dw * sizeof(u32));
905 
906 	return ECORE_SUCCESS;
907 }
908 
ecore_dmae_execute_command(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u64 src_addr,u64 dst_addr,u8 src_type,u8 dst_type,u32 size_in_dwords,struct ecore_dmae_params * p_params)909 static enum _ecore_status_t ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
910 						       struct ecore_ptt *p_ptt,
911 						       u64 src_addr, u64 dst_addr,
912 						       u8 src_type, u8 dst_type,
913 						       u32 size_in_dwords,
914 						       struct ecore_dmae_params *p_params)
915 {
916 	dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
917 	u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
918 	struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
919 	u64 src_addr_split = 0, dst_addr_split = 0;
920 	u16 length_limit = DMAE_MAX_RW_SIZE;
921 	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
922 	u32 offset = 0;
923 
924 	if (!p_hwfn->dmae_info.b_mem_ready) {
925 		DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
926 			   "No buffers allocated. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n",
927 			   (unsigned long long)src_addr, src_type, (unsigned long long)dst_addr, dst_type,
928 			   size_in_dwords);
929 		return ECORE_NOMEM;
930 	}
931 
932 	if (p_hwfn->p_dev->recov_in_prog) {
933 		DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
934 			   "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n",
935 			   (unsigned long long)src_addr, src_type, (unsigned long long)dst_addr, dst_type,
936 			   size_in_dwords);
937 		/* Return success to let the flow to be completed successfully
938 		 * w/o any error handling.
939 		 */
940 		return ECORE_SUCCESS;
941 	}
942 
943 	if (!cmd) {
944 		DP_NOTICE(p_hwfn, true,
945 			  "ecore_dmae_execute_sub_operation failed. Invalid state. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
946 			  (unsigned long long)src_addr, (unsigned long long)dst_addr, length_cur);
947 		return ECORE_INVAL;
948 	}
949 
950 	ecore_dmae_opcode(p_hwfn,
951 			  (src_type == ECORE_DMAE_ADDRESS_GRC),
952 			  (dst_type == ECORE_DMAE_ADDRESS_GRC),
953 			  p_params);
954 
955 	cmd->comp_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
956 	cmd->comp_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
957 	cmd->comp_val = OSAL_CPU_TO_LE32(DMAE_COMPLETION_VAL);
958 
959 	/* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
960 	cnt_split = size_in_dwords / length_limit;
961 	length_mod = size_in_dwords % length_limit;
962 
963 	src_addr_split = src_addr;
964 	dst_addr_split = dst_addr;
965 
966 	for (i = 0; i <= cnt_split; i++) {
967 		offset = length_limit * i;
968 
969 		if (!ECORE_DMAE_FLAGS_IS_SET(p_params, RW_REPL_SRC)) {
970 			if (src_type == ECORE_DMAE_ADDRESS_GRC)
971 				src_addr_split = src_addr + offset;
972 			else
973 				src_addr_split = src_addr + (offset*4);
974 		}
975 
976 		if (dst_type == ECORE_DMAE_ADDRESS_GRC)
977 			dst_addr_split = dst_addr + offset;
978 		else
979 			dst_addr_split = dst_addr + (offset*4);
980 
981 		length_cur = (cnt_split == i) ? length_mod : length_limit;
982 
983 		/* might be zero on last iteration */
984 		if (!length_cur)
985 			continue;
986 
987 		ecore_status = ecore_dmae_execute_sub_operation(p_hwfn,
988 								p_ptt,
989 								src_addr_split,
990 								dst_addr_split,
991 								src_type,
992 								dst_type,
993 								length_cur);
994 		if (ecore_status != ECORE_SUCCESS) {
995 			DP_NOTICE(p_hwfn, false,
996 				  "ecore_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
997 				  ecore_status, (unsigned long long)src_addr, (unsigned long long)dst_addr, length_cur);
998 
999 			ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL);
1000 			break;
1001 		}
1002 	}
1003 
1004 	return ecore_status;
1005 }
1006 
ecore_dmae_host2grc(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u64 source_addr,u32 grc_addr,u32 size_in_dwords,struct ecore_dmae_params * p_params)1007 enum _ecore_status_t ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
1008 					 struct ecore_ptt *p_ptt,
1009 					 u64 source_addr,
1010 					 u32 grc_addr,
1011 					 u32 size_in_dwords,
1012 					 struct ecore_dmae_params *p_params)
1013 {
1014 	u32 grc_addr_in_dw = grc_addr / sizeof(u32);
1015 	enum _ecore_status_t rc;
1016 
1017 	OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
1018 
1019 	rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
1020 					grc_addr_in_dw,
1021 					ECORE_DMAE_ADDRESS_HOST_VIRT,
1022 					ECORE_DMAE_ADDRESS_GRC,
1023 					size_in_dwords, p_params);
1024 
1025 	OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
1026 
1027 	return rc;
1028 }
1029 
ecore_dmae_grc2host(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 grc_addr,dma_addr_t dest_addr,u32 size_in_dwords,struct ecore_dmae_params * p_params)1030 enum _ecore_status_t ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
1031 					 struct ecore_ptt *p_ptt,
1032 					 u32 grc_addr,
1033 					 dma_addr_t dest_addr,
1034 					 u32 size_in_dwords,
1035 					 struct ecore_dmae_params *p_params)
1036 {
1037 	u32 grc_addr_in_dw = grc_addr / sizeof(u32);
1038 	enum _ecore_status_t rc;
1039 
1040 	OSAL_SPIN_LOCK(&(p_hwfn->dmae_info.lock));
1041 
1042 	rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
1043 					dest_addr, ECORE_DMAE_ADDRESS_GRC,
1044 					ECORE_DMAE_ADDRESS_HOST_VIRT,
1045 					size_in_dwords, p_params);
1046 
1047 	OSAL_SPIN_UNLOCK(&(p_hwfn->dmae_info.lock));
1048 
1049 	return rc;
1050 }
1051 
ecore_dmae_host2host(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,dma_addr_t source_addr,dma_addr_t dest_addr,u32 size_in_dwords,struct ecore_dmae_params * p_params)1052 enum _ecore_status_t ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
1053 					  struct ecore_ptt *p_ptt,
1054 					  dma_addr_t source_addr,
1055 					  dma_addr_t dest_addr,
1056 					  u32 size_in_dwords,
1057 					  struct ecore_dmae_params *p_params)
1058 {
1059 	enum _ecore_status_t rc;
1060 
1061 	OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
1062 
1063 	rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
1064 					dest_addr,
1065 					ECORE_DMAE_ADDRESS_HOST_PHYS,
1066 					ECORE_DMAE_ADDRESS_HOST_PHYS,
1067 					size_in_dwords,
1068 					p_params);
1069 
1070 	OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
1071 
1072 	return rc;
1073 }
1074 
ecore_hw_err_notify(struct ecore_hwfn * p_hwfn,enum ecore_hw_err_type err_type)1075 void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
1076 			 enum ecore_hw_err_type err_type)
1077 {
1078 	/* Fan failure cannot be masked by handling of another HW error */
1079 	if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) {
1080 		DP_VERBOSE(p_hwfn, ECORE_MSG_DRV,
1081 			   "Recovery is in progress. Avoid notifying about HW error %d.\n",
1082 			   err_type);
1083 		return;
1084 	}
1085 
1086 	OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
1087 }
1088 
ecore_dmae_sanity(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,const char * phase)1089 enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
1090 				       struct ecore_ptt *p_ptt,
1091 				       const char *phase)
1092 {
1093 	u32 size = OSAL_PAGE_SIZE / 2, val;
1094 	enum _ecore_status_t rc = ECORE_SUCCESS;
1095 	dma_addr_t p_phys;
1096 	void *p_virt;
1097 	u32 *p_tmp;
1098 
1099 	p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, 2 * size);
1100 	if (!p_virt) {
1101 		DP_NOTICE(p_hwfn, false,
1102 			  "DMAE sanity [%s]: failed to allocate memory\n",
1103 			  phase);
1104 		return ECORE_NOMEM;
1105 	}
1106 
1107 	/* Fill the bottom half of the allocated memory with a known pattern */
1108 	for (p_tmp = (u32 *)p_virt;
1109 	     p_tmp < (u32 *)((u8 *)p_virt + size);
1110 	     p_tmp++) {
1111 		/* Save the address itself as the value */
1112 		val = (u32)(osal_uintptr_t)p_tmp;
1113 		*p_tmp = val;
1114 	}
1115 
1116 	/* Zero the top half of the allocated memory */
1117 	OSAL_MEM_ZERO((u8 *)p_virt + size, size);
1118 
1119 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1120 		   "DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n",
1121 		   phase, (unsigned long long)p_phys, p_virt,
1122 		   (unsigned long long)(p_phys + size), (u8 *)p_virt + size,
1123 		   size);
1124 
1125 	rc = ecore_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
1126 				  size / 4 /* size_in_dwords */,
1127 				  OSAL_NULL /* default parameters */);
1128 	if (rc != ECORE_SUCCESS) {
1129 		DP_NOTICE(p_hwfn, false,
1130 			  "DMAE sanity [%s]: ecore_dmae_host2host() failed. rc = %d.\n",
1131 			  phase, rc);
1132 		goto out;
1133 	}
1134 
1135 	/* Verify that the top half of the allocated memory has the pattern */
1136 	for (p_tmp = (u32 *)((u8 *)p_virt + size);
1137 	     p_tmp < (u32 *)((u8 *)p_virt + (2 * size));
1138 	     p_tmp++) {
1139 		/* The corresponding address in the bottom half */
1140 		val = (u32)(osal_uintptr_t)p_tmp - size;
1141 
1142 		if (*p_tmp != val) {
1143 			DP_NOTICE(p_hwfn, false,
1144 				  "DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
1145 				  phase,
1146 				  (unsigned long long)(p_phys + (u32)((u8 *)p_tmp - (u8 *)p_virt)),
1147 				  p_tmp, *p_tmp, val);
1148 			rc = ECORE_UNKNOWN_ERROR;
1149 			goto out;
1150 		}
1151 	}
1152 
1153 out:
1154 	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_virt, p_phys, 2 * size);
1155 	return rc;
1156 }
1157 
ecore_ppfid_wr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 abs_ppfid,u32 hw_addr,u32 val)1158 void ecore_ppfid_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1159 		    u8 abs_ppfid, u32 hw_addr, u32 val)
1160 {
1161 	u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
1162 
1163 	ecore_fid_pretend(p_hwfn, p_ptt,
1164 			  pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
1165 	ecore_wr(p_hwfn, p_ptt, hw_addr, val);
1166 	ecore_fid_pretend(p_hwfn, p_ptt,
1167 			  p_hwfn->rel_pf_id <<
1168 			  PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
1169 }
1170 
ecore_ppfid_rd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 abs_ppfid,u32 hw_addr)1171 u32 ecore_ppfid_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1172 		   u8 abs_ppfid, u32 hw_addr)
1173 {
1174 	u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
1175 	u32 val;
1176 
1177 	ecore_fid_pretend(p_hwfn, p_ptt,
1178 			  pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
1179 	val = ecore_rd(p_hwfn, p_ptt, hw_addr);
1180 	ecore_fid_pretend(p_hwfn, p_ptt,
1181 			  p_hwfn->rel_pf_id <<
1182 			  PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
1183 
1184 	return val;
1185 }
1186 
1187 #ifdef _NTDDK_
1188 #pragma warning(pop)
1189 #endif
1190