xref: /freebsd/sys/dev/ice/ice_osdep.h (revision 71d104536b513298902be65342afe6f3792f29e4)
1*71d10453SEric Joyner /* SPDX-License-Identifier: BSD-3-Clause */
2*71d10453SEric Joyner /*  Copyright (c) 2020, Intel Corporation
3*71d10453SEric Joyner  *  All rights reserved.
4*71d10453SEric Joyner  *
5*71d10453SEric Joyner  *  Redistribution and use in source and binary forms, with or without
6*71d10453SEric Joyner  *  modification, are permitted provided that the following conditions are met:
7*71d10453SEric Joyner  *
8*71d10453SEric Joyner  *   1. Redistributions of source code must retain the above copyright notice,
9*71d10453SEric Joyner  *      this list of conditions and the following disclaimer.
10*71d10453SEric Joyner  *
11*71d10453SEric Joyner  *   2. Redistributions in binary form must reproduce the above copyright
12*71d10453SEric Joyner  *      notice, this list of conditions and the following disclaimer in the
13*71d10453SEric Joyner  *      documentation and/or other materials provided with the distribution.
14*71d10453SEric Joyner  *
15*71d10453SEric Joyner  *   3. Neither the name of the Intel Corporation nor the names of its
16*71d10453SEric Joyner  *      contributors may be used to endorse or promote products derived from
17*71d10453SEric Joyner  *      this software without specific prior written permission.
18*71d10453SEric Joyner  *
19*71d10453SEric Joyner  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20*71d10453SEric Joyner  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21*71d10453SEric Joyner  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22*71d10453SEric Joyner  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23*71d10453SEric Joyner  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24*71d10453SEric Joyner  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25*71d10453SEric Joyner  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26*71d10453SEric Joyner  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27*71d10453SEric Joyner  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28*71d10453SEric Joyner  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29*71d10453SEric Joyner  *  POSSIBILITY OF SUCH DAMAGE.
30*71d10453SEric Joyner  */
31*71d10453SEric Joyner /*$FreeBSD$*/
32*71d10453SEric Joyner 
33*71d10453SEric Joyner /**
34*71d10453SEric Joyner  * @file ice_osdep.h
35*71d10453SEric Joyner  * @brief OS compatibility layer
36*71d10453SEric Joyner  *
37*71d10453SEric Joyner  * Contains various definitions and functions which are part of an OS
38*71d10453SEric Joyner  * compatibility layer for sharing code with other operating systems.
39*71d10453SEric Joyner  */
40*71d10453SEric Joyner #ifndef _ICE_OSDEP_H_
41*71d10453SEric Joyner #define _ICE_OSDEP_H_
42*71d10453SEric Joyner 
43*71d10453SEric Joyner #include <sys/endian.h>
44*71d10453SEric Joyner #include <sys/param.h>
45*71d10453SEric Joyner #include <sys/kernel.h>
46*71d10453SEric Joyner #include <sys/malloc.h>
47*71d10453SEric Joyner #include <sys/proc.h>
48*71d10453SEric Joyner #include <sys/systm.h>
49*71d10453SEric Joyner #include <sys/lock.h>
50*71d10453SEric Joyner #include <sys/mutex.h>
51*71d10453SEric Joyner #include <sys/bus.h>
52*71d10453SEric Joyner #include <machine/bus.h>
53*71d10453SEric Joyner #include <sys/bus_dma.h>
54*71d10453SEric Joyner #include <netinet/in.h>
55*71d10453SEric Joyner #include <sys/counter.h>
56*71d10453SEric Joyner #include <sys/sbuf.h>
57*71d10453SEric Joyner 
58*71d10453SEric Joyner #include "ice_alloc.h"
59*71d10453SEric Joyner 
60*71d10453SEric Joyner #define ICE_INTEL_VENDOR_ID 0x8086
61*71d10453SEric Joyner 
62*71d10453SEric Joyner #define ICE_STR_BUF_LEN 32
63*71d10453SEric Joyner 
64*71d10453SEric Joyner struct ice_hw;
65*71d10453SEric Joyner 
66*71d10453SEric Joyner device_t ice_hw_to_dev(struct ice_hw *hw);
67*71d10453SEric Joyner 
68*71d10453SEric Joyner /* configure hw->debug_mask to enable debug prints */
69*71d10453SEric Joyner void ice_debug(struct ice_hw *hw, uint64_t mask, char *fmt, ...) __printflike(3, 4);
70*71d10453SEric Joyner void ice_debug_array(struct ice_hw *hw, uint64_t mask, uint32_t rowsize,
71*71d10453SEric Joyner 		     uint32_t groupsize, uint8_t *buf, size_t len);
72*71d10453SEric Joyner 
73*71d10453SEric Joyner #define ice_info(_hw, _fmt, args...) \
74*71d10453SEric Joyner 	device_printf(ice_hw_to_dev(_hw), (_fmt), ##args)
75*71d10453SEric Joyner 
76*71d10453SEric Joyner #define ice_warn(_hw, _fmt, args...) \
77*71d10453SEric Joyner 	device_printf(ice_hw_to_dev(_hw), (_fmt), ##args)
78*71d10453SEric Joyner 
79*71d10453SEric Joyner #define DIVIDE_AND_ROUND_UP howmany
80*71d10453SEric Joyner #define ROUND_UP roundup
81*71d10453SEric Joyner 
82*71d10453SEric Joyner uint32_t rd32(struct ice_hw *hw, uint32_t reg);
83*71d10453SEric Joyner uint64_t rd64(struct ice_hw *hw, uint32_t reg);
84*71d10453SEric Joyner void wr32(struct ice_hw *hw, uint32_t reg, uint32_t val);
85*71d10453SEric Joyner void wr64(struct ice_hw *hw, uint32_t reg, uint64_t val);
86*71d10453SEric Joyner 
87*71d10453SEric Joyner #define ice_flush(_hw) rd32((_hw), GLGEN_STAT)
88*71d10453SEric Joyner 
89*71d10453SEric Joyner MALLOC_DECLARE(M_ICE_OSDEP);
90*71d10453SEric Joyner 
91*71d10453SEric Joyner /**
92*71d10453SEric Joyner  * ice_calloc - Allocate an array of elementes
93*71d10453SEric Joyner  * @hw: the hardware private structure
94*71d10453SEric Joyner  * @count: number of elements to allocate
95*71d10453SEric Joyner  * @size: the size of each element
96*71d10453SEric Joyner  *
97*71d10453SEric Joyner  * Allocate memory for an array of items equal to size. Note that the OS
98*71d10453SEric Joyner  * compatibility layer assumes all allocation functions will provide zero'd
99*71d10453SEric Joyner  * memory.
100*71d10453SEric Joyner  */
101*71d10453SEric Joyner static inline void *
102*71d10453SEric Joyner ice_calloc(struct ice_hw __unused *hw, size_t count, size_t size)
103*71d10453SEric Joyner {
104*71d10453SEric Joyner 	return malloc(count * size, M_ICE_OSDEP, M_ZERO | M_NOWAIT);
105*71d10453SEric Joyner }
106*71d10453SEric Joyner 
107*71d10453SEric Joyner /**
108*71d10453SEric Joyner  * ice_malloc - Allocate memory of a specified size
109*71d10453SEric Joyner  * @hw: the hardware private structure
110*71d10453SEric Joyner  * @size: the size to allocate
111*71d10453SEric Joyner  *
112*71d10453SEric Joyner  * Allocates memory of the specified size. Note that the OS compatibility
113*71d10453SEric Joyner  * layer assumes that all allocations will provide zero'd memory.
114*71d10453SEric Joyner  */
115*71d10453SEric Joyner static inline void *
116*71d10453SEric Joyner ice_malloc(struct ice_hw __unused *hw, size_t size)
117*71d10453SEric Joyner {
118*71d10453SEric Joyner 	return malloc(size, M_ICE_OSDEP, M_ZERO | M_NOWAIT);
119*71d10453SEric Joyner }
120*71d10453SEric Joyner 
121*71d10453SEric Joyner /**
122*71d10453SEric Joyner  * ice_memdup - Allocate a copy of some other memory
123*71d10453SEric Joyner  * @hw: private hardware structure
124*71d10453SEric Joyner  * @src: the source to copy from
125*71d10453SEric Joyner  * @size: allocation size
126*71d10453SEric Joyner  * @dir: the direction of copying
127*71d10453SEric Joyner  *
128*71d10453SEric Joyner  * Allocate memory of the specified size, and copy bytes from the src to fill
129*71d10453SEric Joyner  * it. We don't need to zero this memory as we immediately initialize it by
130*71d10453SEric Joyner  * copying from the src pointer.
131*71d10453SEric Joyner  */
132*71d10453SEric Joyner static inline void *
133*71d10453SEric Joyner ice_memdup(struct ice_hw __unused *hw, const void *src, size_t size,
134*71d10453SEric Joyner 	   enum ice_memcpy_type __unused dir)
135*71d10453SEric Joyner {
136*71d10453SEric Joyner 	void *dst = malloc(size, M_ICE_OSDEP, M_NOWAIT);
137*71d10453SEric Joyner 
138*71d10453SEric Joyner 	if (dst != NULL)
139*71d10453SEric Joyner 		memcpy(dst, src, size);
140*71d10453SEric Joyner 
141*71d10453SEric Joyner 	return dst;
142*71d10453SEric Joyner }
143*71d10453SEric Joyner 
144*71d10453SEric Joyner /**
145*71d10453SEric Joyner  * ice_free - Free previously allocated memory
146*71d10453SEric Joyner  * @hw: the hardware private structure
147*71d10453SEric Joyner  * @mem: pointer to the memory to free
148*71d10453SEric Joyner  *
149*71d10453SEric Joyner  * Free memory that was previously allocated by ice_calloc, ice_malloc, or
150*71d10453SEric Joyner  * ice_memdup.
151*71d10453SEric Joyner  */
152*71d10453SEric Joyner static inline void
153*71d10453SEric Joyner ice_free(struct ice_hw __unused *hw, void *mem)
154*71d10453SEric Joyner {
155*71d10453SEric Joyner 	free(mem, M_ICE_OSDEP);
156*71d10453SEric Joyner }
157*71d10453SEric Joyner 
158*71d10453SEric Joyner /* These are macros in order to drop the unused direction enumeration constant */
159*71d10453SEric Joyner #define ice_memset(addr, c, len, unused) memset((addr), (c), (len))
160*71d10453SEric Joyner #define ice_memcpy(dst, src, len, unused) memcpy((dst), (src), (len))
161*71d10453SEric Joyner 
162*71d10453SEric Joyner void ice_usec_delay(uint32_t time, bool sleep);
163*71d10453SEric Joyner void ice_msec_delay(uint32_t time, bool sleep);
164*71d10453SEric Joyner void ice_msec_pause(uint32_t time);
165*71d10453SEric Joyner void ice_msec_spin(uint32_t time);
166*71d10453SEric Joyner 
167*71d10453SEric Joyner #define UNREFERENCED_PARAMETER(_p) _p = _p
168*71d10453SEric Joyner #define UNREFERENCED_1PARAMETER(_p) do {			\
169*71d10453SEric Joyner 	UNREFERENCED_PARAMETER(_p);				\
170*71d10453SEric Joyner } while (0)
171*71d10453SEric Joyner #define UNREFERENCED_2PARAMETER(_p, _q) do {			\
172*71d10453SEric Joyner 	UNREFERENCED_PARAMETER(_p);				\
173*71d10453SEric Joyner 	UNREFERENCED_PARAMETER(_q);				\
174*71d10453SEric Joyner } while (0)
175*71d10453SEric Joyner #define UNREFERENCED_3PARAMETER(_p, _q, _r) do {		\
176*71d10453SEric Joyner 	UNREFERENCED_PARAMETER(_p);				\
177*71d10453SEric Joyner 	UNREFERENCED_PARAMETER(_q);				\
178*71d10453SEric Joyner 	UNREFERENCED_PARAMETER(_r);				\
179*71d10453SEric Joyner } while (0)
180*71d10453SEric Joyner #define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) do {		\
181*71d10453SEric Joyner 	UNREFERENCED_PARAMETER(_p);				\
182*71d10453SEric Joyner 	UNREFERENCED_PARAMETER(_q);				\
183*71d10453SEric Joyner 	UNREFERENCED_PARAMETER(_r);				\
184*71d10453SEric Joyner 	UNREFERENCED_PARAMETER(_s);				\
185*71d10453SEric Joyner } while (0)
186*71d10453SEric Joyner #define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) do {	\
187*71d10453SEric Joyner 	UNREFERENCED_PARAMETER(_p);				\
188*71d10453SEric Joyner 	UNREFERENCED_PARAMETER(_q);				\
189*71d10453SEric Joyner 	UNREFERENCED_PARAMETER(_r);				\
190*71d10453SEric Joyner 	UNREFERENCED_PARAMETER(_s);				\
191*71d10453SEric Joyner 	UNREFERENCED_PARAMETER(_t);				\
192*71d10453SEric Joyner } while (0)
193*71d10453SEric Joyner 
194*71d10453SEric Joyner #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
195*71d10453SEric Joyner #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
196*71d10453SEric Joyner #define MAKEMASK(_m, _s) ((_m) << (_s))
197*71d10453SEric Joyner 
198*71d10453SEric Joyner #define LIST_HEAD_TYPE ice_list_head
199*71d10453SEric Joyner #define LIST_ENTRY_TYPE ice_list_node
200*71d10453SEric Joyner 
201*71d10453SEric Joyner /**
202*71d10453SEric Joyner  * @struct ice_list_node
203*71d10453SEric Joyner  * @brief simplified linked list node API
204*71d10453SEric Joyner  *
205*71d10453SEric Joyner  * Represents a node in a linked list, which can be embedded into a structure
206*71d10453SEric Joyner  * to allow that structure to be inserted into a linked list. Access to the
207*71d10453SEric Joyner  * contained structure is done via __containerof
208*71d10453SEric Joyner  */
209*71d10453SEric Joyner struct ice_list_node {
210*71d10453SEric Joyner 	LIST_ENTRY(ice_list_node) entries;
211*71d10453SEric Joyner };
212*71d10453SEric Joyner 
213*71d10453SEric Joyner /**
214*71d10453SEric Joyner  * @struct ice_list_head
215*71d10453SEric Joyner  * @brief simplified linked list head API
216*71d10453SEric Joyner  *
217*71d10453SEric Joyner  * Represents the head of a linked list. The linked list should consist of
218*71d10453SEric Joyner  * a series of ice_list_node structures embedded into another structure
219*71d10453SEric Joyner  * accessed using __containerof. This way, the ice_list_head doesn't need to
220*71d10453SEric Joyner  * know the type of the structure it contains.
221*71d10453SEric Joyner  */
222*71d10453SEric Joyner LIST_HEAD(ice_list_head, ice_list_node);
223*71d10453SEric Joyner 
224*71d10453SEric Joyner #define INIT_LIST_HEAD LIST_INIT
225*71d10453SEric Joyner /* LIST_EMPTY doesn't need to be changed */
226*71d10453SEric Joyner #define LIST_ADD(entry, head) LIST_INSERT_HEAD(head, entry, entries)
227*71d10453SEric Joyner #define LIST_ADD_AFTER(entry, elem) LIST_INSERT_AFTER(elem, entry, entries)
228*71d10453SEric Joyner #define LIST_DEL(entry) LIST_REMOVE(entry, entries)
229*71d10453SEric Joyner #define _osdep_LIST_ENTRY(ptr, type, member) \
230*71d10453SEric Joyner 	__containerof(ptr, type, member)
231*71d10453SEric Joyner #define LIST_FIRST_ENTRY(head, type, member) \
232*71d10453SEric Joyner 	_osdep_LIST_ENTRY(LIST_FIRST(head), type, member)
233*71d10453SEric Joyner #define LIST_NEXT_ENTRY(ptr, unused, member) \
234*71d10453SEric Joyner 	_osdep_LIST_ENTRY(LIST_NEXT(&(ptr->member), entries), __typeof(*ptr), member)
235*71d10453SEric Joyner #define LIST_REPLACE_INIT(old_head, new_head) do {			\
236*71d10453SEric Joyner 	__typeof(new_head) _new_head = (new_head);			\
237*71d10453SEric Joyner 	LIST_INIT(_new_head);						\
238*71d10453SEric Joyner 	LIST_SWAP(old_head, _new_head, ice_list_node, entries);		\
239*71d10453SEric Joyner } while (0)
240*71d10453SEric Joyner 
241*71d10453SEric Joyner #define LIST_ENTRY_SAFE(_ptr, _type, _member) \
242*71d10453SEric Joyner ({ __typeof(_ptr) ____ptr = (_ptr); \
243*71d10453SEric Joyner    ____ptr ? _osdep_LIST_ENTRY(____ptr, _type, _member) : NULL; \
244*71d10453SEric Joyner })
245*71d10453SEric Joyner 
246*71d10453SEric Joyner /**
247*71d10453SEric Joyner  * ice_get_list_tail - Return the pointer to the last node in the list
248*71d10453SEric Joyner  * @head: the pointer to the head of the list
249*71d10453SEric Joyner  *
250*71d10453SEric Joyner  * A helper function for implementing LIST_ADD_TAIL and LIST_LAST_ENTRY.
251*71d10453SEric Joyner  * Returns the pointer to the last node in the list, or NULL of the list is
252*71d10453SEric Joyner  * empty.
253*71d10453SEric Joyner  *
254*71d10453SEric Joyner  * Note: due to the list implementation this is O(N), where N is the size of
255*71d10453SEric Joyner  * the list. An O(1) implementation requires replacing the underlying list
256*71d10453SEric Joyner  * datastructure with one that has a tail pointer. This is problematic,
257*71d10453SEric Joyner  * because using a simple TAILQ would require that the addition and deletion
258*71d10453SEric Joyner  * be given the head of the list.
259*71d10453SEric Joyner  */
260*71d10453SEric Joyner static inline struct ice_list_node *
261*71d10453SEric Joyner ice_get_list_tail(struct ice_list_head *head)
262*71d10453SEric Joyner {
263*71d10453SEric Joyner 	struct ice_list_node *node = LIST_FIRST(head);
264*71d10453SEric Joyner 
265*71d10453SEric Joyner 	if (node == NULL)
266*71d10453SEric Joyner 		return NULL;
267*71d10453SEric Joyner 	while (LIST_NEXT(node, entries) != NULL)
268*71d10453SEric Joyner 		node = LIST_NEXT(node, entries);
269*71d10453SEric Joyner 
270*71d10453SEric Joyner 	return node;
271*71d10453SEric Joyner }
272*71d10453SEric Joyner 
273*71d10453SEric Joyner /* TODO: This is O(N). An O(1) implementation would require a different
274*71d10453SEric Joyner  * underlying list structure, such as a circularly linked list. */
275*71d10453SEric Joyner #define LIST_ADD_TAIL(entry, head) do {					\
276*71d10453SEric Joyner 	struct ice_list_node *node = ice_get_list_tail(head);		\
277*71d10453SEric Joyner 									\
278*71d10453SEric Joyner 	if (node == NULL) {						\
279*71d10453SEric Joyner 		LIST_ADD(entry, head);					\
280*71d10453SEric Joyner 	} else {							\
281*71d10453SEric Joyner 		LIST_INSERT_AFTER(node, entry, entries);		\
282*71d10453SEric Joyner 	}								\
283*71d10453SEric Joyner } while (0)
284*71d10453SEric Joyner 
285*71d10453SEric Joyner #define LIST_LAST_ENTRY(head, type, member) \
286*71d10453SEric Joyner 	LIST_ENTRY_SAFE(ice_get_list_tail(head), type, member)
287*71d10453SEric Joyner 
288*71d10453SEric Joyner #define LIST_FIRST_ENTRY_SAFE(head, type, member) \
289*71d10453SEric Joyner 	LIST_ENTRY_SAFE(LIST_FIRST(head), type, member)
290*71d10453SEric Joyner 
291*71d10453SEric Joyner #define LIST_NEXT_ENTRY_SAFE(ptr, member) \
292*71d10453SEric Joyner 	LIST_ENTRY_SAFE(LIST_NEXT(&(ptr->member), entries), __typeof(*ptr), member)
293*71d10453SEric Joyner 
294*71d10453SEric Joyner #define LIST_FOR_EACH_ENTRY(pos, head, unused, member) \
295*71d10453SEric Joyner 	for (pos = LIST_FIRST_ENTRY_SAFE(head, __typeof(*pos), member);		\
296*71d10453SEric Joyner 	    pos;								\
297*71d10453SEric Joyner 	    pos = LIST_NEXT_ENTRY_SAFE(pos, member))
298*71d10453SEric Joyner 
299*71d10453SEric Joyner #define LIST_FOR_EACH_ENTRY_SAFE(pos, n, head, unused, member) \
300*71d10453SEric Joyner 	for (pos = LIST_FIRST_ENTRY_SAFE(head, __typeof(*pos), member);		\
301*71d10453SEric Joyner 	     pos && ({ n = LIST_NEXT_ENTRY_SAFE(pos, member); 1; });		\
302*71d10453SEric Joyner 	     pos = n)
303*71d10453SEric Joyner 
304*71d10453SEric Joyner #define STATIC static
305*71d10453SEric Joyner 
306*71d10453SEric Joyner #define NTOHS ntohs
307*71d10453SEric Joyner #define NTOHL ntohl
308*71d10453SEric Joyner #define HTONS htons
309*71d10453SEric Joyner #define HTONL htonl
310*71d10453SEric Joyner #define LE16_TO_CPU le16toh
311*71d10453SEric Joyner #define LE32_TO_CPU le32toh
312*71d10453SEric Joyner #define LE64_TO_CPU le64toh
313*71d10453SEric Joyner #define CPU_TO_LE16 htole16
314*71d10453SEric Joyner #define CPU_TO_LE32 htole32
315*71d10453SEric Joyner #define CPU_TO_LE64 htole64
316*71d10453SEric Joyner #define CPU_TO_BE16 htobe16
317*71d10453SEric Joyner #define CPU_TO_BE32 htobe32
318*71d10453SEric Joyner 
319*71d10453SEric Joyner #define SNPRINTF snprintf
320*71d10453SEric Joyner 
321*71d10453SEric Joyner /**
322*71d10453SEric Joyner  * @typedef u8
323*71d10453SEric Joyner  * @brief compatibility typedef for uint8_t
324*71d10453SEric Joyner  */
325*71d10453SEric Joyner typedef uint8_t  u8;
326*71d10453SEric Joyner 
327*71d10453SEric Joyner /**
328*71d10453SEric Joyner  * @typedef u16
329*71d10453SEric Joyner  * @brief compatibility typedef for uint16_t
330*71d10453SEric Joyner  */
331*71d10453SEric Joyner typedef uint16_t u16;
332*71d10453SEric Joyner 
333*71d10453SEric Joyner /**
334*71d10453SEric Joyner  * @typedef u32
335*71d10453SEric Joyner  * @brief compatibility typedef for uint32_t
336*71d10453SEric Joyner  */
337*71d10453SEric Joyner typedef uint32_t u32;
338*71d10453SEric Joyner 
339*71d10453SEric Joyner /**
340*71d10453SEric Joyner  * @typedef u64
341*71d10453SEric Joyner  * @brief compatibility typedef for uint64_t
342*71d10453SEric Joyner  */
343*71d10453SEric Joyner typedef uint64_t u64;
344*71d10453SEric Joyner 
345*71d10453SEric Joyner /**
346*71d10453SEric Joyner  * @typedef s8
347*71d10453SEric Joyner  * @brief compatibility typedef for int8_t
348*71d10453SEric Joyner  */
349*71d10453SEric Joyner typedef int8_t  s8;
350*71d10453SEric Joyner 
351*71d10453SEric Joyner /**
352*71d10453SEric Joyner  * @typedef s16
353*71d10453SEric Joyner  * @brief compatibility typedef for int16_t
354*71d10453SEric Joyner  */
355*71d10453SEric Joyner typedef int16_t s16;
356*71d10453SEric Joyner 
357*71d10453SEric Joyner /**
358*71d10453SEric Joyner  * @typedef s32
359*71d10453SEric Joyner  * @brief compatibility typedef for int32_t
360*71d10453SEric Joyner  */
361*71d10453SEric Joyner typedef int32_t s32;
362*71d10453SEric Joyner 
363*71d10453SEric Joyner /**
364*71d10453SEric Joyner  * @typedef s64
365*71d10453SEric Joyner  * @brief compatibility typedef for int64_t
366*71d10453SEric Joyner  */
367*71d10453SEric Joyner typedef int64_t s64;
368*71d10453SEric Joyner 
369*71d10453SEric Joyner #define __le16 u16
370*71d10453SEric Joyner #define __le32 u32
371*71d10453SEric Joyner #define __le64 u64
372*71d10453SEric Joyner #define __be16 u16
373*71d10453SEric Joyner #define __be32 u32
374*71d10453SEric Joyner #define __be64 u64
375*71d10453SEric Joyner 
376*71d10453SEric Joyner #define ice_hweight8(x) bitcount16((u8)x)
377*71d10453SEric Joyner #define ice_hweight16(x) bitcount16(x)
378*71d10453SEric Joyner #define ice_hweight32(x) bitcount32(x)
379*71d10453SEric Joyner #define ice_hweight64(x) bitcount64(x)
380*71d10453SEric Joyner 
381*71d10453SEric Joyner /**
382*71d10453SEric Joyner  * @struct ice_dma_mem
383*71d10453SEric Joyner  * @brief DMA memory allocation
384*71d10453SEric Joyner  *
385*71d10453SEric Joyner  * Contains DMA allocation bits, used to simplify DMA allocations.
386*71d10453SEric Joyner  */
387*71d10453SEric Joyner struct ice_dma_mem {
388*71d10453SEric Joyner 	void *va;
389*71d10453SEric Joyner 	uint64_t pa;
390*71d10453SEric Joyner 	size_t size;
391*71d10453SEric Joyner 
392*71d10453SEric Joyner 	bus_dma_tag_t		tag;
393*71d10453SEric Joyner 	bus_dmamap_t		map;
394*71d10453SEric Joyner 	bus_dma_segment_t	seg;
395*71d10453SEric Joyner };
396*71d10453SEric Joyner 
397*71d10453SEric Joyner 
398*71d10453SEric Joyner void * ice_alloc_dma_mem(struct ice_hw *hw, struct ice_dma_mem *mem, u64 size);
399*71d10453SEric Joyner void ice_free_dma_mem(struct ice_hw __unused *hw, struct ice_dma_mem *mem);
400*71d10453SEric Joyner 
401*71d10453SEric Joyner /**
402*71d10453SEric Joyner  * @struct ice_lock
403*71d10453SEric Joyner  * @brief simplified lock API
404*71d10453SEric Joyner  *
405*71d10453SEric Joyner  * Contains a simple lock implementation used to lock various resources.
406*71d10453SEric Joyner  */
407*71d10453SEric Joyner struct ice_lock {
408*71d10453SEric Joyner 	struct mtx mutex;
409*71d10453SEric Joyner 	char name[ICE_STR_BUF_LEN];
410*71d10453SEric Joyner };
411*71d10453SEric Joyner 
412*71d10453SEric Joyner extern u16 ice_lock_count;
413*71d10453SEric Joyner 
414*71d10453SEric Joyner /**
415*71d10453SEric Joyner  * ice_init_lock - Initialize a lock for use
416*71d10453SEric Joyner  * @lock: the lock memory to initialize
417*71d10453SEric Joyner  *
418*71d10453SEric Joyner  * OS compatibility layer to provide a simple locking mechanism. We use
419*71d10453SEric Joyner  * a mutex for this purpose.
420*71d10453SEric Joyner  */
421*71d10453SEric Joyner static inline void
422*71d10453SEric Joyner ice_init_lock(struct ice_lock *lock)
423*71d10453SEric Joyner {
424*71d10453SEric Joyner 	/*
425*71d10453SEric Joyner 	 * Make each lock unique by incrementing a counter each time this
426*71d10453SEric Joyner 	 * function is called. Use of a u16 allows 65535 possible locks before
427*71d10453SEric Joyner 	 * we'd hit a duplicate.
428*71d10453SEric Joyner 	 */
429*71d10453SEric Joyner 	memset(lock->name, 0, sizeof(lock->name));
430*71d10453SEric Joyner 	snprintf(lock->name, ICE_STR_BUF_LEN, "ice_lock_%u", ice_lock_count++);
431*71d10453SEric Joyner 	mtx_init(&lock->mutex, lock->name, NULL, MTX_DEF);
432*71d10453SEric Joyner }
433*71d10453SEric Joyner 
434*71d10453SEric Joyner /**
435*71d10453SEric Joyner  * ice_acquire_lock - Acquire the lock
436*71d10453SEric Joyner  * @lock: the lock to acquire
437*71d10453SEric Joyner  *
438*71d10453SEric Joyner  * Acquires the mutex specified by the lock pointer.
439*71d10453SEric Joyner  */
440*71d10453SEric Joyner static inline void
441*71d10453SEric Joyner ice_acquire_lock(struct ice_lock *lock)
442*71d10453SEric Joyner {
443*71d10453SEric Joyner 	mtx_lock(&lock->mutex);
444*71d10453SEric Joyner }
445*71d10453SEric Joyner 
446*71d10453SEric Joyner /**
447*71d10453SEric Joyner  * ice_release_lock - Release the lock
448*71d10453SEric Joyner  * @lock: the lock to release
449*71d10453SEric Joyner  *
450*71d10453SEric Joyner  * Releases the mutex specified by the lock pointer.
451*71d10453SEric Joyner  */
452*71d10453SEric Joyner static inline void
453*71d10453SEric Joyner ice_release_lock(struct ice_lock *lock)
454*71d10453SEric Joyner {
455*71d10453SEric Joyner 	mtx_unlock(&lock->mutex);
456*71d10453SEric Joyner }
457*71d10453SEric Joyner 
458*71d10453SEric Joyner /**
459*71d10453SEric Joyner  * ice_destroy_lock - Destroy the lock to de-allocate it
460*71d10453SEric Joyner  * @lock: the lock to destroy
461*71d10453SEric Joyner  *
462*71d10453SEric Joyner  * Destroys a previously initialized lock. We only do this if the mutex was
463*71d10453SEric Joyner  * previously initialized.
464*71d10453SEric Joyner  */
465*71d10453SEric Joyner static inline void
466*71d10453SEric Joyner ice_destroy_lock(struct ice_lock *lock)
467*71d10453SEric Joyner {
468*71d10453SEric Joyner 	if (mtx_initialized(&lock->mutex))
469*71d10453SEric Joyner 		mtx_destroy(&lock->mutex);
470*71d10453SEric Joyner 	memset(lock->name, 0, sizeof(lock->name));
471*71d10453SEric Joyner }
472*71d10453SEric Joyner 
473*71d10453SEric Joyner /* Some function parameters are unused outside of MPASS/KASSERT macros. Rather
474*71d10453SEric Joyner  * than marking these as __unused all the time, mark them as __invariant_only,
475*71d10453SEric Joyner  * and define this to __unused when INVARIANTS is disabled. Otherwise, define
476*71d10453SEric Joyner  * it empty so that __invariant_only parameters are caught as unused by the
477*71d10453SEric Joyner  * INVARIANTS build.
478*71d10453SEric Joyner  */
479*71d10453SEric Joyner #ifndef INVARIANTS
480*71d10453SEric Joyner #define __invariant_only __unused
481*71d10453SEric Joyner #else
482*71d10453SEric Joyner #define __invariant_only
483*71d10453SEric Joyner #endif
484*71d10453SEric Joyner 
485*71d10453SEric Joyner #define __ALWAYS_UNUSED __unused
486*71d10453SEric Joyner 
487*71d10453SEric Joyner /**
488*71d10453SEric Joyner  * ice_ilog2 - Calculate the integer log base 2 of a 64bit value
489*71d10453SEric Joyner  * @n: 64bit number
490*71d10453SEric Joyner  *
491*71d10453SEric Joyner  * Calculates the integer log base 2 of a 64bit value, rounded down.
492*71d10453SEric Joyner  *
493*71d10453SEric Joyner  * @remark The integer log base 2 of zero is technically undefined, but this
494*71d10453SEric Joyner  * function will return 0 in that case.
495*71d10453SEric Joyner  *
496*71d10453SEric Joyner  */
497*71d10453SEric Joyner static inline int
498*71d10453SEric Joyner ice_ilog2(u64 n) {
499*71d10453SEric Joyner 	if (n == 0)
500*71d10453SEric Joyner 		return 0;
501*71d10453SEric Joyner 	return flsll(n) - 1;
502*71d10453SEric Joyner }
503*71d10453SEric Joyner 
504*71d10453SEric Joyner /**
505*71d10453SEric Joyner  * ice_is_pow2 - Check if the value is a power of 2
506*71d10453SEric Joyner  * @n: 64bit number
507*71d10453SEric Joyner  *
508*71d10453SEric Joyner  * Check if the given value is a power of 2.
509*71d10453SEric Joyner  *
510*71d10453SEric Joyner  * @remark FreeBSD's powerof2 function treats zero as a power of 2, while this
511*71d10453SEric Joyner  * function does not.
512*71d10453SEric Joyner  *
513*71d10453SEric Joyner  * @returns true or false
514*71d10453SEric Joyner  */
515*71d10453SEric Joyner static inline bool
516*71d10453SEric Joyner ice_is_pow2(u64 n) {
517*71d10453SEric Joyner 	if (n == 0)
518*71d10453SEric Joyner 		return false;
519*71d10453SEric Joyner 	return powerof2(n);
520*71d10453SEric Joyner }
521*71d10453SEric Joyner #endif /* _ICE_OSDEP_H_ */
522