xref: /freebsd/sys/dev/sfxge/common/efsys.h (revision a0eb270b125e75d3e36c2533749c4c196d4dbc39)
1 /*-
2  * Copyright (c) 2010-2015 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * This software was developed in part by Philip Paeps under contract for
6  * Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  *    this list of conditions and the following disclaimer in the documentation
15  *    and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * The views and conclusions contained in the software and documentation are
30  * those of the authors and should not be interpreted as representing official
31  * policies, either expressed or implied, of the FreeBSD Project.
32  *
33  * $FreeBSD$
34  */
35 
36 #ifndef	_SYS_EFSYS_H
37 #define	_SYS_EFSYS_H
38 
39 #ifdef	__cplusplus
40 extern "C" {
41 #endif
42 
43 #include <sys/param.h>
44 #include <sys/bus.h>
45 #include <sys/endian.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/mutex.h>
50 #include <sys/rwlock.h>
51 #include <sys/sdt.h>
52 #include <sys/systm.h>
53 
54 #include <machine/bus.h>
55 #include <machine/endian.h>
56 
57 #define	EFSYS_HAS_UINT64 1
58 #if defined(__x86_64__)
59 #define	EFSYS_USE_UINT64 1
60 #else
61 #define	EFSYS_USE_UINT64 0
62 #endif
63 #define	EFSYS_HAS_SSE2_M128 0
64 #if _BYTE_ORDER == _BIG_ENDIAN
65 #define	EFSYS_IS_BIG_ENDIAN 1
66 #define	EFSYS_IS_LITTLE_ENDIAN 0
67 #elif _BYTE_ORDER == _LITTLE_ENDIAN
68 #define	EFSYS_IS_BIG_ENDIAN 0
69 #define	EFSYS_IS_LITTLE_ENDIAN 1
70 #endif
71 #include "efx_types.h"
72 
73 /* Common code requires this */
74 #if __FreeBSD_version < 800068
75 #define	memmove(d, s, l) bcopy(s, d, l)
76 #endif
77 
78 /* FreeBSD equivalents of Solaris things */
79 #ifndef _NOTE
80 #define	_NOTE(s)
81 #endif
82 
83 #ifndef B_FALSE
84 #define	B_FALSE	FALSE
85 #endif
86 #ifndef B_TRUE
87 #define	B_TRUE	TRUE
88 #endif
89 
90 #ifndef IS_P2ALIGNED
91 #define	IS_P2ALIGNED(v, a)	((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
92 #endif
93 
94 #ifndef P2ROUNDUP
95 #define	P2ROUNDUP(x, align)	(-(-(x) & -(align)))
96 #endif
97 
98 #ifndef P2ALIGN
99 #define	P2ALIGN(_x, _a)		((_x) & -(_a))
100 #endif
101 
102 #ifndef IS2P
103 #define	ISP2(x)			(((x) & ((x) - 1)) == 0)
104 #endif
105 
106 #if defined(__x86_64__) && __FreeBSD_version >= 1000000
107 
108 #define	SFXGE_USE_BUS_SPACE_8		1
109 
110 #if !defined(bus_space_read_stream_8)
111 
112 #define	bus_space_read_stream_8(t, h, o)				\
113 	bus_space_read_8((t), (h), (o))
114 
115 #define	bus_space_write_stream_8(t, h, o, v)				\
116 	bus_space_write_8((t), (h), (o), (v))
117 
118 #endif
119 
120 #endif
121 
122 #define	ENOTACTIVE EINVAL
123 
124 /* Memory type to use on FreeBSD */
125 MALLOC_DECLARE(M_SFXGE);
126 
127 /* Machine dependend prefetch wrappers */
128 #if defined(__i386__) || defined(__amd64__)
129 static __inline void
130 prefetch_read_many(void *addr)
131 {
132 
133 	__asm__(
134 	    "prefetcht0 (%0)"
135 	    :
136 	    : "r" (addr));
137 }
138 
139 static __inline void
140 prefetch_read_once(void *addr)
141 {
142 
143 	__asm__(
144 	    "prefetchnta (%0)"
145 	    :
146 	    : "r" (addr));
147 }
148 #elif defined(__sparc64__)
149 static __inline void
150 prefetch_read_many(void *addr)
151 {
152 
153 	__asm__(
154 	    "prefetch [%0], 0"
155 	    :
156 	    : "r" (addr));
157 }
158 
159 static __inline void
160 prefetch_read_once(void *addr)
161 {
162 
163 	__asm__(
164 	    "prefetch [%0], 1"
165 	    :
166 	    : "r" (addr));
167 }
168 #else
169 static __inline void
170 prefetch_read_many(void *addr)
171 {
172 
173 }
174 
175 static __inline void
176 prefetch_read_once(void *addr)
177 {
178 
179 }
180 #endif
181 
182 #if defined(__i386__) || defined(__amd64__)
183 #include <vm/vm.h>
184 #include <vm/pmap.h>
185 #endif
186 static __inline void
187 sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map,
188 		    struct mbuf *m, bus_dma_segment_t *seg)
189 {
190 #if defined(__i386__) || defined(__amd64__)
191 	seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t));
192 	seg->ds_len = m->m_len;
193 #else
194 	int nsegstmp;
195 
196 	bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0);
197 #endif
198 }
199 
200 /* Modifiers used for Windows builds */
201 #define	__in
202 #define	__in_opt
203 #define	__in_ecount(_n)
204 #define	__in_ecount_opt(_n)
205 #define	__in_bcount(_n)
206 #define	__in_bcount_opt(_n)
207 
208 #define	__out
209 #define	__out_opt
210 #define	__out_ecount(_n)
211 #define	__out_ecount_opt(_n)
212 #define	__out_bcount(_n)
213 #define	__out_bcount_opt(_n)
214 
215 #define	__deref_out
216 
217 #define	__inout
218 #define	__inout_opt
219 #define	__inout_ecount(_n)
220 #define	__inout_ecount_opt(_n)
221 #define	__inout_bcount(_n)
222 #define	__inout_bcount_opt(_n)
223 #define	__inout_bcount_full_opt(_n)
224 
225 #define	__deref_out_bcount_opt(n)
226 
227 #define	__checkReturn
228 #define	__success(_x)
229 
230 #define	__drv_when(_p, _c)
231 
232 /* Code inclusion options */
233 
234 
235 #define	EFSYS_OPT_NAMES 1
236 
237 #define	EFSYS_OPT_FALCON 0
238 #define	EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE 0
239 #define	EFSYS_OPT_SIENA 1
240 #define	EFSYS_OPT_HUNTINGTON 1
241 #ifdef DEBUG
242 #define	EFSYS_OPT_CHECK_REG 1
243 #else
244 #define	EFSYS_OPT_CHECK_REG 0
245 #endif
246 
247 #define	EFSYS_OPT_MCDI 1
248 
249 #define	EFSYS_OPT_MAC_FALCON_GMAC 0
250 #define	EFSYS_OPT_MAC_FALCON_XMAC 0
251 #define	EFSYS_OPT_MAC_STATS 1
252 
253 #define	EFSYS_OPT_LOOPBACK 0
254 
255 #define	EFSYS_OPT_MON_NULL 0
256 #define	EFSYS_OPT_MON_LM87 0
257 #define	EFSYS_OPT_MON_MAX6647 0
258 #define	EFSYS_OPT_MON_MCDI 0
259 #define	EFSYS_OPT_MON_STATS 0
260 
261 #define	EFSYS_OPT_PHY_NULL 0
262 #define	EFSYS_OPT_PHY_QT2022C2 0
263 #define	EFSYS_OPT_PHY_SFX7101 0
264 #define	EFSYS_OPT_PHY_TXC43128 0
265 #define	EFSYS_OPT_PHY_SFT9001 0
266 #define	EFSYS_OPT_PHY_QT2025C 0
267 #define	EFSYS_OPT_PHY_STATS 1
268 #define	EFSYS_OPT_PHY_PROPS 0
269 #define	EFSYS_OPT_PHY_BIST 0
270 #define	EFSYS_OPT_BIST 1
271 #define	EFSYS_OPT_PHY_LED_CONTROL 1
272 #define	EFSYS_OPT_PHY_FLAGS 0
273 
274 #define	EFSYS_OPT_VPD 1
275 #define	EFSYS_OPT_NVRAM 1
276 #define	EFSYS_OPT_NVRAM_FALCON_BOOTROM 0
277 #define	EFSYS_OPT_NVRAM_SFT9001	0
278 #define	EFSYS_OPT_NVRAM_SFX7101	0
279 #define	EFSYS_OPT_BOOTCFG 0
280 
281 #define	EFSYS_OPT_PCIE_TUNE 0
282 #define	EFSYS_OPT_DIAG 0
283 #define	EFSYS_OPT_WOL 1
284 #define	EFSYS_OPT_RX_SCALE 1
285 #define	EFSYS_OPT_QSTATS 1
286 #define	EFSYS_OPT_FILTER 1
287 #define	EFSYS_OPT_MCAST_FILTER_LIST 1
288 #define	EFSYS_OPT_RX_SCATTER 0
289 #define	EFSYS_OPT_RX_HDR_SPLIT 0
290 
291 #define	EFSYS_OPT_EV_PREFETCH 0
292 
293 #define	EFSYS_OPT_DECODE_INTR_FATAL 1
294 
295 /* ID */
296 
297 typedef struct __efsys_identifier_s	efsys_identifier_t;
298 
299 /* PROBE */
300 
301 #ifndef DTRACE_PROBE
302 
303 #define	EFSYS_PROBE(_name)
304 
305 #define	EFSYS_PROBE1(_name, _type1, _arg1)
306 
307 #define	EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)
308 
309 #define	EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
310 	    _type3, _arg3)
311 
312 #define	EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
313 	    _type3, _arg3, _type4, _arg4)
314 
315 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
316 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
317 
318 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
319 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
320 	    _type6, _arg6)
321 
322 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
323 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
324 	    _type6, _arg6, _type7, _arg7)
325 
326 #else /* DTRACE_PROBE */
327 
328 #define	EFSYS_PROBE(_name)						\
329 	DTRACE_PROBE(_name)
330 
331 #define	EFSYS_PROBE1(_name, _type1, _arg1)				\
332 	DTRACE_PROBE1(_name, _type1, _arg1)
333 
334 #define	EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)		\
335 	DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2)
336 
337 #define	EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
338 	    _type3, _arg3)						\
339 	DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
340 	    _type3, _arg3)
341 
342 #define	EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
343 	    _type3, _arg3, _type4, _arg4)				\
344 	DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
345 	    _type3, _arg3, _type4, _arg4)
346 
347 #ifdef DTRACE_PROBE5
348 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
349 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)		\
350 	DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
351 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
352 #else
353 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
354 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)		\
355 	DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
356 	    _type3, _arg3, _type4, _arg4)
357 #endif
358 
359 #ifdef DTRACE_PROBE6
360 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
361 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
362 	    _type6, _arg6)						\
363 	DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
364 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
365 	    _type6, _arg6)
366 #else
367 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
368 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
369 	    _type6, _arg6)						\
370 	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
371 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
372 #endif
373 
374 #ifdef DTRACE_PROBE7
375 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
376 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
377 	    _type6, _arg6, _type7, _arg7)				\
378 	DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
379 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
380 	    _type6, _arg6, _type7, _arg7)
381 #else
382 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
383 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
384 	    _type6, _arg6, _type7, _arg7)				\
385 	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
386 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
387 	    _type6, _arg6)
388 #endif
389 
390 #endif /* DTRACE_PROBE */
391 
392 /* DMA */
393 
394 typedef uint64_t		efsys_dma_addr_t;
395 
396 typedef struct efsys_mem_s {
397 	bus_dma_tag_t		esm_tag;
398 	bus_dmamap_t		esm_map;
399 	caddr_t			esm_base;
400 	efsys_dma_addr_t	esm_addr;
401 } efsys_mem_t;
402 
403 
404 #define	EFSYS_MEM_ZERO(_esmp, _size)					\
405 	do {								\
406 		(void) memset((_esmp)->esm_base, 0, (_size));		\
407 									\
408 	_NOTE(CONSTANTCONDITION)					\
409 	} while (B_FALSE)
410 
411 #define	EFSYS_MEM_READD(_esmp, _offset, _edp)				\
412 	do {								\
413 		uint32_t *addr;						\
414 									\
415 		_NOTE(CONSTANTCONDITION)				\
416 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
417 		    ("not power of 2 aligned"));			\
418 									\
419 		addr = (void *)((_esmp)->esm_base + (_offset));		\
420 									\
421 		(_edp)->ed_u32[0] = *addr;				\
422 									\
423 		EFSYS_PROBE2(mem_readd, unsigned int, (_offset),	\
424 		    uint32_t, (_edp)->ed_u32[0]);			\
425 									\
426 	_NOTE(CONSTANTCONDITION)					\
427 	} while (B_FALSE)
428 
429 #if defined(__x86_64__)
430 #define	EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
431 	do {								\
432 		uint64_t *addr;						\
433 									\
434 		_NOTE(CONSTANTCONDITION)				\
435 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
436 		    ("not power of 2 aligned"));			\
437 									\
438 		addr = (void *)((_esmp)->esm_base + (_offset));		\
439 									\
440 		(_eqp)->eq_u64[0] = *addr;				\
441 									\
442 		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
443 		    uint32_t, (_eqp)->eq_u32[1],			\
444 		    uint32_t, (_eqp)->eq_u32[0]);			\
445 									\
446 	_NOTE(CONSTANTCONDITION)					\
447 	} while (B_FALSE)
448 #else
449 #define	EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
450 	do {								\
451 		uint32_t *addr;						\
452 									\
453 		_NOTE(CONSTANTCONDITION)				\
454 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
455 		    ("not power of 2 aligned"));			\
456 									\
457 		addr = (void *)((_esmp)->esm_base + (_offset));		\
458 									\
459 		(_eqp)->eq_u32[0] = *addr++;				\
460 		(_eqp)->eq_u32[1] = *addr;				\
461 									\
462 		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
463 		    uint32_t, (_eqp)->eq_u32[1],			\
464 		    uint32_t, (_eqp)->eq_u32[0]);			\
465 									\
466 	_NOTE(CONSTANTCONDITION)					\
467 	} while (B_FALSE)
468 #endif
469 
470 #if defined(__x86_64__)
471 #define	EFSYS_MEM_READO(_esmp, _offset, _eop)				\
472 	do {								\
473 		uint64_t *addr;						\
474 									\
475 		_NOTE(CONSTANTCONDITION)				\
476 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
477 		    ("not power of 2 aligned"));			\
478 									\
479 		addr = (void *)((_esmp)->esm_base + (_offset));		\
480 									\
481 		(_eop)->eo_u64[0] = *addr++;				\
482 		(_eop)->eo_u64[1] = *addr;				\
483 									\
484 		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
485 		    uint32_t, (_eop)->eo_u32[3],			\
486 		    uint32_t, (_eop)->eo_u32[2],			\
487 		    uint32_t, (_eop)->eo_u32[1],			\
488 		    uint32_t, (_eop)->eo_u32[0]);			\
489 									\
490 	_NOTE(CONSTANTCONDITION)					\
491 	} while (B_FALSE)
492 #else
493 #define	EFSYS_MEM_READO(_esmp, _offset, _eop)				\
494 	do {								\
495 		uint32_t *addr;						\
496 									\
497 		_NOTE(CONSTANTCONDITION)				\
498 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
499 		    ("not power of 2 aligned"));			\
500 									\
501 		addr = (void *)((_esmp)->esm_base + (_offset));		\
502 									\
503 		(_eop)->eo_u32[0] = *addr++;				\
504 		(_eop)->eo_u32[1] = *addr++;				\
505 		(_eop)->eo_u32[2] = *addr++;				\
506 		(_eop)->eo_u32[3] = *addr;				\
507 									\
508 		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
509 		    uint32_t, (_eop)->eo_u32[3],			\
510 		    uint32_t, (_eop)->eo_u32[2],			\
511 		    uint32_t, (_eop)->eo_u32[1],			\
512 		    uint32_t, (_eop)->eo_u32[0]);			\
513 									\
514 	_NOTE(CONSTANTCONDITION)					\
515 	} while (B_FALSE)
516 #endif
517 
518 #define	EFSYS_MEM_WRITED(_esmp, _offset, _edp)				\
519 	do {								\
520 		uint32_t *addr;						\
521 									\
522 		_NOTE(CONSTANTCONDITION)				\
523 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
524 		    ("not power of 2 aligned"));			\
525 									\
526 		EFSYS_PROBE2(mem_writed, unsigned int, (_offset),	\
527 		    uint32_t, (_edp)->ed_u32[0]);			\
528 									\
529 		addr = (void *)((_esmp)->esm_base + (_offset));		\
530 									\
531 		*addr = (_edp)->ed_u32[0];				\
532 									\
533 	_NOTE(CONSTANTCONDITION)					\
534 	} while (B_FALSE)
535 
536 #if defined(__x86_64__)
537 #define	EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
538 	do {								\
539 		uint64_t *addr;						\
540 									\
541 		_NOTE(CONSTANTCONDITION)				\
542 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
543 		    ("not power of 2 aligned"));			\
544 									\
545 		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
546 		    uint32_t, (_eqp)->eq_u32[1],			\
547 		    uint32_t, (_eqp)->eq_u32[0]);			\
548 									\
549 		addr = (void *)((_esmp)->esm_base + (_offset));		\
550 									\
551 		*addr   = (_eqp)->eq_u64[0];				\
552 									\
553 	_NOTE(CONSTANTCONDITION)					\
554 	} while (B_FALSE)
555 
556 #else
557 #define	EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
558 	do {								\
559 		uint32_t *addr;						\
560 									\
561 		_NOTE(CONSTANTCONDITION)				\
562 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
563 		    ("not power of 2 aligned"));			\
564 									\
565 		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
566 		    uint32_t, (_eqp)->eq_u32[1],			\
567 		    uint32_t, (_eqp)->eq_u32[0]);			\
568 									\
569 		addr = (void *)((_esmp)->esm_base + (_offset));		\
570 									\
571 		*addr++ = (_eqp)->eq_u32[0];				\
572 		*addr   = (_eqp)->eq_u32[1];				\
573 									\
574 	_NOTE(CONSTANTCONDITION)					\
575 	} while (B_FALSE)
576 #endif
577 
578 #if defined(__x86_64__)
579 #define	EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
580 	do {								\
581 		uint64_t *addr;						\
582 									\
583 		_NOTE(CONSTANTCONDITION)				\
584 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
585 		    ("not power of 2 aligned"));			\
586 									\
587 		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
588 		    uint32_t, (_eop)->eo_u32[3],			\
589 		    uint32_t, (_eop)->eo_u32[2],			\
590 		    uint32_t, (_eop)->eo_u32[1],			\
591 		    uint32_t, (_eop)->eo_u32[0]);			\
592 									\
593 		addr = (void *)((_esmp)->esm_base + (_offset));		\
594 									\
595 		*addr++ = (_eop)->eo_u64[0];				\
596 		*addr   = (_eop)->eo_u64[1];				\
597 									\
598 	_NOTE(CONSTANTCONDITION)					\
599 	} while (B_FALSE)
600 #else
601 #define	EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
602 	do {								\
603 		uint32_t *addr;						\
604 									\
605 		_NOTE(CONSTANTCONDITION)				\
606 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
607 		    ("not power of 2 aligned"));			\
608 									\
609 		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
610 		    uint32_t, (_eop)->eo_u32[3],			\
611 		    uint32_t, (_eop)->eo_u32[2],			\
612 		    uint32_t, (_eop)->eo_u32[1],			\
613 		    uint32_t, (_eop)->eo_u32[0]);			\
614 									\
615 		addr = (void *)((_esmp)->esm_base + (_offset));		\
616 									\
617 		*addr++ = (_eop)->eo_u32[0];				\
618 		*addr++ = (_eop)->eo_u32[1];				\
619 		*addr++ = (_eop)->eo_u32[2];				\
620 		*addr   = (_eop)->eo_u32[3];				\
621 									\
622 	_NOTE(CONSTANTCONDITION)					\
623 	} while (B_FALSE)
624 #endif
625 
626 #define	EFSYS_MEM_ADDR(_esmp)						\
627 	((_esmp)->esm_addr)
628 
629 #define	EFSYS_MEM_IS_NULL(_esmp)					\
630 	((_esmp)->esm_base == NULL)
631 
632 /* BAR */
633 
634 #define	SFXGE_LOCK_NAME_MAX	16
635 
636 typedef struct efsys_bar_s {
637 	struct mtx		esb_lock;
638 	char			esb_lock_name[SFXGE_LOCK_NAME_MAX];
639 	bus_space_tag_t		esb_tag;
640 	bus_space_handle_t	esb_handle;
641 	int			esb_rid;
642 	struct resource		*esb_res;
643 } efsys_bar_t;
644 
645 #define	SFXGE_BAR_LOCK_INIT(_esbp, _ifname)				\
646 	do {								\
647 		snprintf((_esbp)->esb_lock_name,			\
648 			 sizeof((_esbp)->esb_lock_name),		\
649 			 "%s:bar", (_ifname));				\
650 		mtx_init(&(_esbp)->esb_lock, (_esbp)->esb_lock_name,	\
651 			 NULL, MTX_DEF);				\
652 	_NOTE(CONSTANTCONDITION)					\
653 	} while (B_FALSE)
654 #define	SFXGE_BAR_LOCK_DESTROY(_esbp)					\
655 	mtx_destroy(&(_esbp)->esb_lock)
656 #define	SFXGE_BAR_LOCK(_esbp)						\
657 	mtx_lock(&(_esbp)->esb_lock)
658 #define	SFXGE_BAR_UNLOCK(_esbp)						\
659 	mtx_unlock(&(_esbp)->esb_lock)
660 
661 #define	EFSYS_BAR_READD(_esbp, _offset, _edp, _lock)			\
662 	do {								\
663 		_NOTE(CONSTANTCONDITION)				\
664 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
665 		    ("not power of 2 aligned"));			\
666 									\
667 		_NOTE(CONSTANTCONDITION)				\
668 		if (_lock)						\
669 			SFXGE_BAR_LOCK(_esbp);				\
670 									\
671 		(_edp)->ed_u32[0] = bus_space_read_stream_4(		\
672 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
673 		    (_offset));						\
674 									\
675 		EFSYS_PROBE2(bar_readd, unsigned int, (_offset),	\
676 		    uint32_t, (_edp)->ed_u32[0]);			\
677 									\
678 		_NOTE(CONSTANTCONDITION)				\
679 		if (_lock)						\
680 			SFXGE_BAR_UNLOCK(_esbp);			\
681 	_NOTE(CONSTANTCONDITION)					\
682 	} while (B_FALSE)
683 
684 #if defined(SFXGE_USE_BUS_SPACE_8)
685 #define	EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
686 	do {								\
687 		_NOTE(CONSTANTCONDITION)				\
688 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
689 		    ("not power of 2 aligned"));			\
690 									\
691 		SFXGE_BAR_LOCK(_esbp);					\
692 									\
693 		(_eqp)->eq_u64[0] = bus_space_read_stream_8(		\
694 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
695 		    (_offset));						\
696 									\
697 		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
698 		    uint32_t, (_eqp)->eq_u32[1],			\
699 		    uint32_t, (_eqp)->eq_u32[0]);			\
700 									\
701 		SFXGE_BAR_UNLOCK(_esbp);				\
702 	_NOTE(CONSTANTCONDITION)					\
703 	} while (B_FALSE)
704 
705 #define	EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
706 	do {								\
707 		_NOTE(CONSTANTCONDITION)				\
708 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
709 		    ("not power of 2 aligned"));			\
710 									\
711 		_NOTE(CONSTANTCONDITION)				\
712 		if (_lock)						\
713 			SFXGE_BAR_LOCK(_esbp);				\
714 									\
715 		(_eop)->eo_u64[0] = bus_space_read_stream_8(		\
716 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
717 		    (_offset));						\
718 		(_eop)->eo_u64[1] = bus_space_read_stream_8(		\
719 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
720 		    (_offset) + 8);					\
721 									\
722 		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
723 		    uint32_t, (_eop)->eo_u32[3],			\
724 		    uint32_t, (_eop)->eo_u32[2],			\
725 		    uint32_t, (_eop)->eo_u32[1],			\
726 		    uint32_t, (_eop)->eo_u32[0]);			\
727 									\
728 		_NOTE(CONSTANTCONDITION)				\
729 		if (_lock)						\
730 			SFXGE_BAR_UNLOCK(_esbp);			\
731 	_NOTE(CONSTANTCONDITION)					\
732 	} while (B_FALSE)
733 
734 #else
735 #define	EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
736 	do {								\
737 		_NOTE(CONSTANTCONDITION)				\
738 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
739 		    ("not power of 2 aligned"));			\
740 									\
741 		SFXGE_BAR_LOCK(_esbp);					\
742 									\
743 		(_eqp)->eq_u32[0] = bus_space_read_stream_4(		\
744 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
745 		    (_offset));						\
746 		(_eqp)->eq_u32[1] = bus_space_read_stream_4(		\
747 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
748 		    (_offset) + 4);					\
749 									\
750 		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
751 		    uint32_t, (_eqp)->eq_u32[1],			\
752 		    uint32_t, (_eqp)->eq_u32[0]);			\
753 									\
754 		SFXGE_BAR_UNLOCK(_esbp);				\
755 	_NOTE(CONSTANTCONDITION)					\
756 	} while (B_FALSE)
757 
758 #define	EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
759 	do {								\
760 		_NOTE(CONSTANTCONDITION)				\
761 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
762 		    ("not power of 2 aligned"));			\
763 									\
764 		_NOTE(CONSTANTCONDITION)				\
765 		if (_lock)						\
766 			SFXGE_BAR_LOCK(_esbp);				\
767 									\
768 		(_eop)->eo_u32[0] = bus_space_read_stream_4(		\
769 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
770 		    (_offset));						\
771 		(_eop)->eo_u32[1] = bus_space_read_stream_4(		\
772 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
773 		    (_offset) + 4);					\
774 		(_eop)->eo_u32[2] = bus_space_read_stream_4(		\
775 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
776 		    (_offset) + 8);					\
777 		(_eop)->eo_u32[3] = bus_space_read_stream_4(		\
778 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
779 		    (_offset) + 12);					\
780 									\
781 		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
782 		    uint32_t, (_eop)->eo_u32[3],			\
783 		    uint32_t, (_eop)->eo_u32[2],			\
784 		    uint32_t, (_eop)->eo_u32[1],			\
785 		    uint32_t, (_eop)->eo_u32[0]);			\
786 									\
787 		_NOTE(CONSTANTCONDITION)				\
788 		if (_lock)						\
789 			SFXGE_BAR_UNLOCK(_esbp);			\
790 	_NOTE(CONSTANTCONDITION)					\
791 	} while (B_FALSE)
792 #endif
793 
794 #define	EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock)			\
795 	do {								\
796 		_NOTE(CONSTANTCONDITION)				\
797 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
798 		    ("not power of 2 aligned"));			\
799 									\
800 		_NOTE(CONSTANTCONDITION)				\
801 		if (_lock)						\
802 			SFXGE_BAR_LOCK(_esbp);				\
803 									\
804 		EFSYS_PROBE2(bar_writed, unsigned int, (_offset),	\
805 		    uint32_t, (_edp)->ed_u32[0]);			\
806 									\
807 		/*							\
808 		 * Make sure that previous writes to the dword have	\
809 		 * been done. It should be cheaper than barrier just	\
810 		 * after the write below.				\
811 		 */							\
812 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
813 		    (_offset), sizeof (efx_dword_t),			\
814 		    BUS_SPACE_BARRIER_WRITE);				\
815 		bus_space_write_stream_4((_esbp)->esb_tag,		\
816 		    (_esbp)->esb_handle,				\
817 		    (_offset), (_edp)->ed_u32[0]);			\
818 									\
819 		_NOTE(CONSTANTCONDITION)				\
820 		if (_lock)						\
821 			SFXGE_BAR_UNLOCK(_esbp);			\
822 	_NOTE(CONSTANTCONDITION)					\
823 	} while (B_FALSE)
824 
825 #if defined(SFXGE_USE_BUS_SPACE_8)
826 #define	EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
827 	do {								\
828 		_NOTE(CONSTANTCONDITION)				\
829 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
830 		    ("not power of 2 aligned"));			\
831 									\
832 		SFXGE_BAR_LOCK(_esbp);					\
833 									\
834 		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
835 		    uint32_t, (_eqp)->eq_u32[1],			\
836 		    uint32_t, (_eqp)->eq_u32[0]);			\
837 									\
838 		/*							\
839 		 * Make sure that previous writes to the qword have	\
840 		 * been done. It should be cheaper than barrier just	\
841 		 * after the write below.				\
842 		 */							\
843 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
844 		    (_offset), sizeof (efx_qword_t),			\
845 		    BUS_SPACE_BARRIER_WRITE);				\
846 		bus_space_write_stream_8((_esbp)->esb_tag,		\
847 		    (_esbp)->esb_handle,				\
848 		    (_offset), (_eqp)->eq_u64[0]);			\
849 									\
850 		SFXGE_BAR_UNLOCK(_esbp);				\
851 	_NOTE(CONSTANTCONDITION)					\
852 	} while (B_FALSE)
853 #else
854 #define	EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
855 	do {								\
856 		_NOTE(CONSTANTCONDITION)				\
857 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
858 		    ("not power of 2 aligned"));			\
859 									\
860 		SFXGE_BAR_LOCK(_esbp);					\
861 									\
862 		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
863 		    uint32_t, (_eqp)->eq_u32[1],			\
864 		    uint32_t, (_eqp)->eq_u32[0]);			\
865 									\
866 		/*							\
867 		 * Make sure that previous writes to the qword have	\
868 		 * been done. It should be cheaper than barrier just	\
869 		 * after the last write below.				\
870 		 */							\
871 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
872 		    (_offset), sizeof (efx_qword_t),			\
873 		    BUS_SPACE_BARRIER_WRITE);				\
874 		bus_space_write_stream_4((_esbp)->esb_tag,		\
875 		    (_esbp)->esb_handle,				\
876 		    (_offset), (_eqp)->eq_u32[0]);			\
877 		/*							\
878 		 * It should be guaranteed that the last dword comes	\
879 		 * the last, so barrier entire qword to be sure that	\
880 		 * neither above nor below writes are reordered.	\
881 		 */							\
882 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
883 		    (_offset), sizeof (efx_qword_t),			\
884 		    BUS_SPACE_BARRIER_WRITE);				\
885 		bus_space_write_stream_4((_esbp)->esb_tag,		\
886 		    (_esbp)->esb_handle,				\
887 		    (_offset) + 4, (_eqp)->eq_u32[1]);			\
888 									\
889 		SFXGE_BAR_UNLOCK(_esbp);				\
890 	_NOTE(CONSTANTCONDITION)					\
891 	} while (B_FALSE)
892 #endif
893 
894 /*
895  * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
896  * (required by PIO hardware)
897  */
898 #define	EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp)			\
899 	do {								\
900 		_NOTE(CONSTANTCONDITION)				\
901 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
902 		    ("not power of 2 aligned"));			\
903 									\
904 		(void) (_esbp);						\
905 									\
906 		/* FIXME: Perform a 64-bit write */			\
907 		KASSERT(0, ("not implemented"));			\
908 									\
909 	_NOTE(CONSTANTCONDITION)					\
910 	} while (B_FALSE)
911 
912 #if defined(SFXGE_USE_BUS_SPACE_8)
913 #define	EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
914 	do {								\
915 		_NOTE(CONSTANTCONDITION)				\
916 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
917 		    ("not power of 2 aligned"));			\
918 									\
919 		_NOTE(CONSTANTCONDITION)				\
920 		if (_lock)						\
921 			SFXGE_BAR_LOCK(_esbp);				\
922 									\
923 		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
924 		    uint32_t, (_eop)->eo_u32[3],			\
925 		    uint32_t, (_eop)->eo_u32[2],			\
926 		    uint32_t, (_eop)->eo_u32[1],			\
927 		    uint32_t, (_eop)->eo_u32[0]);			\
928 									\
929 		/*							\
930 		 * Make sure that previous writes to the oword have	\
931 		 * been done. It should be cheaper than barrier just	\
932 		 * after the last write below.				\
933 		 */							\
934 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
935 		    (_offset), sizeof (efx_oword_t),			\
936 		    BUS_SPACE_BARRIER_WRITE);				\
937 		bus_space_write_stream_8((_esbp)->esb_tag,		\
938 		    (_esbp)->esb_handle,				\
939 		    (_offset), (_eop)->eo_u64[0]);			\
940 		/*							\
941 		 * It should be guaranteed that the last qword comes	\
942 		 * the last, so barrier entire oword to be sure that	\
943 		 * neither above nor below writes are reordered.	\
944 		 */							\
945 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
946 		    (_offset), sizeof (efx_oword_t),			\
947 		    BUS_SPACE_BARRIER_WRITE);				\
948 		bus_space_write_stream_8((_esbp)->esb_tag,		\
949 		    (_esbp)->esb_handle,				\
950 		    (_offset) + 8, (_eop)->eo_u64[1]);			\
951 									\
952 		_NOTE(CONSTANTCONDITION)				\
953 		if (_lock)						\
954 			SFXGE_BAR_UNLOCK(_esbp);			\
955 	_NOTE(CONSTANTCONDITION)					\
956 	} while (B_FALSE)
957 
958 #else
959 #define	EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
960 	do {								\
961 		_NOTE(CONSTANTCONDITION)				\
962 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
963 		    ("not power of 2 aligned"));			\
964 									\
965 		_NOTE(CONSTANTCONDITION)				\
966 		if (_lock)						\
967 			SFXGE_BAR_LOCK(_esbp);				\
968 									\
969 		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
970 		    uint32_t, (_eop)->eo_u32[3],			\
971 		    uint32_t, (_eop)->eo_u32[2],			\
972 		    uint32_t, (_eop)->eo_u32[1],			\
973 		    uint32_t, (_eop)->eo_u32[0]);			\
974 									\
975 		/*							\
976 		 * Make sure that previous writes to the oword have	\
977 		 * been done. It should be cheaper than barrier just	\
978 		 * after the last write below.				\
979 		 */							\
980 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
981 		    (_offset), sizeof (efx_oword_t),			\
982 		    BUS_SPACE_BARRIER_WRITE);				\
983 		bus_space_write_stream_4((_esbp)->esb_tag,		\
984 		    (_esbp)->esb_handle,				\
985 		    (_offset), (_eop)->eo_u32[0]);			\
986 		bus_space_write_stream_4((_esbp)->esb_tag,		\
987 		    (_esbp)->esb_handle,				\
988 		    (_offset) + 4, (_eop)->eo_u32[1]);			\
989 		bus_space_write_stream_4((_esbp)->esb_tag,		\
990 		    (_esbp)->esb_handle,				\
991 		    (_offset) + 8, (_eop)->eo_u32[2]);			\
992 		/*							\
993 		 * It should be guaranteed that the last dword comes	\
994 		 * the last, so barrier entire oword to be sure that	\
995 		 * neither above nor below writes are reordered.	\
996 		 */							\
997 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
998 		    (_offset), sizeof (efx_oword_t),			\
999 		    BUS_SPACE_BARRIER_WRITE);				\
1000 		bus_space_write_stream_4((_esbp)->esb_tag,		\
1001 		    (_esbp)->esb_handle,				\
1002 		    (_offset) + 12, (_eop)->eo_u32[3]);			\
1003 									\
1004 		_NOTE(CONSTANTCONDITION)				\
1005 		if (_lock)						\
1006 			SFXGE_BAR_UNLOCK(_esbp);			\
1007 	_NOTE(CONSTANTCONDITION)					\
1008 	} while (B_FALSE)
1009 #endif
1010 
1011 /* Use the standard octo-word write for doorbell writes */
1012 #define	EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop)			\
1013 	do {								\
1014 		EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE);	\
1015 	_NOTE(CONSTANTCONDITION)					\
1016 	} while (B_FALSE)
1017 
1018 /* SPIN */
1019 
1020 #define	EFSYS_SPIN(_us)							\
1021 	do {								\
1022 		DELAY(_us);						\
1023 	_NOTE(CONSTANTCONDITION)					\
1024 	} while (B_FALSE)
1025 
1026 #define	EFSYS_SLEEP	EFSYS_SPIN
1027 
1028 /* BARRIERS */
1029 
1030 #define	EFSYS_MEM_READ_BARRIER()	rmb()
1031 #define	EFSYS_PIO_WRITE_BARRIER()
1032 
1033 /* DMA SYNC */
1034 #define	EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size)		\
1035 	do {								\
1036 		bus_dmamap_sync((_esmp)->esm_tag,			\
1037 		    (_esmp)->esm_map,					\
1038 		    BUS_DMASYNC_POSTREAD);				\
1039 	_NOTE(CONSTANTCONDITION)					\
1040 	} while (B_FALSE)
1041 
1042 #define	EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size)		\
1043 	do {								\
1044 		bus_dmamap_sync((_esmp)->esm_tag,			\
1045 		    (_esmp)->esm_map,					\
1046 		    BUS_DMASYNC_PREWRITE);				\
1047 	_NOTE(CONSTANTCONDITION)					\
1048 	} while (B_FALSE)
1049 
1050 /* TIMESTAMP */
1051 
1052 typedef	clock_t	efsys_timestamp_t;
1053 
1054 #define	EFSYS_TIMESTAMP(_usp)						\
1055 	do {								\
1056 		clock_t now;						\
1057 									\
1058 		now = ticks;						\
1059 		*(_usp) = now * hz / 1000000;				\
1060 	_NOTE(CONSTANTCONDITION)					\
1061 	} while (B_FALSE)
1062 
1063 /* KMEM */
1064 
1065 #define	EFSYS_KMEM_ALLOC(_esip, _size, _p)				\
1066 	do {								\
1067 		(_esip) = (_esip);					\
1068 		/*							\
1069 		 * The macro is used in non-sleepable contexts, for	\
1070 		 * example, holding a mutex.				\
1071 		 */							\
1072 		(_p) = malloc((_size), M_SFXGE, M_NOWAIT|M_ZERO);	\
1073 	_NOTE(CONSTANTCONDITION)					\
1074 	} while (B_FALSE)
1075 
1076 #define	EFSYS_KMEM_FREE(_esip, _size, _p)				\
1077 	do {								\
1078 		(void) (_esip);						\
1079 		(void) (_size);						\
1080 		free((_p), M_SFXGE);					\
1081 	_NOTE(CONSTANTCONDITION)					\
1082 	} while (B_FALSE)
1083 
1084 /* LOCK */
1085 
1086 typedef struct efsys_lock_s {
1087 	struct mtx	lock;
1088 	char		lock_name[SFXGE_LOCK_NAME_MAX];
1089 } efsys_lock_t;
1090 
1091 #define	SFXGE_EFSYS_LOCK_INIT(_eslp, _ifname, _label)			\
1092 	do {								\
1093 		efsys_lock_t *__eslp = (_eslp);				\
1094 									\
1095 		snprintf((__eslp)->lock_name,				\
1096 			 sizeof((__eslp)->lock_name),			\
1097 			 "%s:%s", (_ifname), (_label));			\
1098 		mtx_init(&(__eslp)->lock, (__eslp)->lock_name,		\
1099 			 NULL, MTX_DEF);				\
1100 	} while (B_FALSE)
1101 #define	SFXGE_EFSYS_LOCK_DESTROY(_eslp)					\
1102 	mtx_destroy(&(_eslp)->lock)
1103 #define	SFXGE_EFSYS_LOCK(_eslp)						\
1104 	mtx_lock(&(_eslp)->lock)
1105 #define	SFXGE_EFSYS_UNLOCK(_eslp)					\
1106 	mtx_unlock(&(_eslp)->lock)
1107 #define	SFXGE_EFSYS_LOCK_ASSERT_OWNED(_eslp)				\
1108 	mtx_assert(&(_eslp)->lock, MA_OWNED)
1109 
1110 #define	EFSYS_LOCK_MAGIC	0x000010c4
1111 
1112 #define	EFSYS_LOCK(_lockp, _state)					\
1113 	do {								\
1114 		SFXGE_EFSYS_LOCK(_lockp);				\
1115 		(_state) = EFSYS_LOCK_MAGIC;				\
1116 	_NOTE(CONSTANTCONDITION)					\
1117 	} while (B_FALSE)
1118 
1119 #define	EFSYS_UNLOCK(_lockp, _state)					\
1120 	do {								\
1121 		if ((_state) != EFSYS_LOCK_MAGIC)			\
1122 			KASSERT(B_FALSE, ("not locked"));		\
1123 		SFXGE_EFSYS_UNLOCK(_lockp);				\
1124 	_NOTE(CONSTANTCONDITION)					\
1125 	} while (B_FALSE)
1126 
1127 /* PREEMPT */
1128 
1129 #define	EFSYS_PREEMPT_DISABLE(_state)					\
1130 	do {								\
1131 		(_state) = (_state);					\
1132 		critical_enter();					\
1133 	_NOTE(CONSTANTCONDITION)					\
1134 	} while (B_FALSE)
1135 
1136 #define	EFSYS_PREEMPT_ENABLE(_state)					\
1137 	do {								\
1138 		(_state) = (_state);					\
1139 		critical_exit(_state);					\
1140 	_NOTE(CONSTANTCONDITION)					\
1141 	} while (B_FALSE)
1142 
1143 /* STAT */
1144 
1145 typedef uint64_t		efsys_stat_t;
1146 
1147 #define	EFSYS_STAT_INCR(_knp, _delta) 					\
1148 	do {								\
1149 		*(_knp) += (_delta);					\
1150 	_NOTE(CONSTANTCONDITION)					\
1151 	} while (B_FALSE)
1152 
1153 #define	EFSYS_STAT_DECR(_knp, _delta) 					\
1154 	do {								\
1155 		*(_knp) -= (_delta);					\
1156 	_NOTE(CONSTANTCONDITION)					\
1157 	} while (B_FALSE)
1158 
1159 #define	EFSYS_STAT_SET(_knp, _val)					\
1160 	do {								\
1161 		*(_knp) = (_val);					\
1162 	_NOTE(CONSTANTCONDITION)					\
1163 	} while (B_FALSE)
1164 
1165 #define	EFSYS_STAT_SET_QWORD(_knp, _valp)				\
1166 	do {								\
1167 		*(_knp) = le64toh((_valp)->eq_u64[0]);			\
1168 	_NOTE(CONSTANTCONDITION)					\
1169 	} while (B_FALSE)
1170 
1171 #define	EFSYS_STAT_SET_DWORD(_knp, _valp)				\
1172 	do {								\
1173 		*(_knp) = le32toh((_valp)->ed_u32[0]);			\
1174 	_NOTE(CONSTANTCONDITION)					\
1175 	} while (B_FALSE)
1176 
1177 #define	EFSYS_STAT_INCR_QWORD(_knp, _valp)				\
1178 	do {								\
1179 		*(_knp) += le64toh((_valp)->eq_u64[0]);			\
1180 	_NOTE(CONSTANTCONDITION)					\
1181 	} while (B_FALSE)
1182 
1183 #define	EFSYS_STAT_SUBR_QWORD(_knp, _valp)				\
1184 	do {								\
1185 		*(_knp) -= le64toh((_valp)->eq_u64[0]);			\
1186 	_NOTE(CONSTANTCONDITION)					\
1187 	} while (B_FALSE)
1188 
1189 /* ERR */
1190 
1191 extern void	sfxge_err(efsys_identifier_t *, unsigned int,
1192 		    uint32_t, uint32_t);
1193 
1194 #if EFSYS_OPT_DECODE_INTR_FATAL
1195 #define	EFSYS_ERR(_esip, _code, _dword0, _dword1)			\
1196 	do {								\
1197 		sfxge_err((_esip), (_code), (_dword0), (_dword1));	\
1198 	_NOTE(CONSTANTCONDITION)					\
1199 	} while (B_FALSE)
1200 #endif
1201 
1202 /* ASSERT */
1203 
1204 #define	EFSYS_ASSERT(_exp) do {						\
1205 	if (!(_exp))							\
1206 		panic("%s", #_exp);					\
1207 	} while (0)
1208 
1209 #define	EFSYS_ASSERT3(_x, _op, _y, _t) do {				\
1210 	const _t __x = (_t)(_x);					\
1211 	const _t __y = (_t)(_y);					\
1212 	if (!(__x _op __y))						\
1213 		panic("assertion failed at %s:%u", __FILE__, __LINE__);	\
1214 	} while(0)
1215 
1216 #define	EFSYS_ASSERT3U(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uint64_t)
1217 #define	EFSYS_ASSERT3S(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, int64_t)
1218 #define	EFSYS_ASSERT3P(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
1219 
1220 /* ROTATE */
1221 
1222 #define	EFSYS_HAS_ROTL_DWORD 0
1223 
1224 #ifdef	__cplusplus
1225 }
1226 #endif
1227 
1228 #endif	/* _SYS_EFSYS_H */
1229