xref: /freebsd/sys/dev/ice/ice_osdep.h (revision 7ee6b0f125a092ed99d327bb8d608dd2ff77b7aa)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2023, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32 
33 /**
34  * @file ice_osdep.h
35  * @brief OS compatibility layer
36  *
37  * Contains various definitions and functions which are part of an OS
38  * compatibility layer for sharing code with other operating systems.
39  */
40 #ifndef _ICE_OSDEP_H_
41 #define _ICE_OSDEP_H_
42 
43 #include <sys/endian.h>
44 #include <sys/param.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/proc.h>
48 #include <sys/systm.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/bus.h>
52 #include <machine/bus.h>
53 #include <sys/bus_dma.h>
54 #include <netinet/in.h>
55 #include <sys/counter.h>
56 #include <sys/sbuf.h>
57 
58 #include "ice_alloc.h"
59 
60 #define ICE_INTEL_VENDOR_ID 0x8086
61 
62 #define ICE_STR_BUF_LEN 32
63 
64 struct ice_hw;
65 
66 device_t ice_hw_to_dev(struct ice_hw *hw);
67 
68 /* configure hw->debug_mask to enable debug prints */
69 void ice_debug(struct ice_hw *hw, uint64_t mask, char *fmt, ...) __printflike(3, 4);
70 void ice_debug_array(struct ice_hw *hw, uint64_t mask, uint32_t rowsize,
71 		     uint32_t groupsize, uint8_t *buf, size_t len);
72 void ice_info_fwlog(struct ice_hw *hw, uint32_t rowsize, uint32_t groupsize,
73 		    uint8_t *buf, size_t len);
74 
75 #define ice_fls(_n) flsl(_n)
76 
77 #define ice_info(_hw, _fmt, args...) \
78 	device_printf(ice_hw_to_dev(_hw), (_fmt), ##args)
79 
80 #define ice_warn(_hw, _fmt, args...) \
81 	device_printf(ice_hw_to_dev(_hw), (_fmt), ##args)
82 
83 #define DIVIDE_AND_ROUND_UP howmany
84 #define ROUND_UP roundup
85 
86 uint32_t rd32(struct ice_hw *hw, uint32_t reg);
87 uint64_t rd64(struct ice_hw *hw, uint32_t reg);
88 void wr32(struct ice_hw *hw, uint32_t reg, uint32_t val);
89 void wr64(struct ice_hw *hw, uint32_t reg, uint64_t val);
90 
91 #define ice_flush(_hw) rd32((_hw), GLGEN_STAT)
92 
93 MALLOC_DECLARE(M_ICE_OSDEP);
94 
95 /**
96  * ice_calloc - Allocate an array of elementes
97  * @hw: the hardware private structure
98  * @count: number of elements to allocate
99  * @size: the size of each element
100  *
101  * Allocate memory for an array of items equal to size. Note that the OS
102  * compatibility layer assumes all allocation functions will provide zero'd
103  * memory.
104  */
105 static inline void *
106 ice_calloc(struct ice_hw __unused *hw, size_t count, size_t size)
107 {
108 	return malloc(count * size, M_ICE_OSDEP, M_ZERO | M_NOWAIT);
109 }
110 
111 /**
112  * ice_malloc - Allocate memory of a specified size
113  * @hw: the hardware private structure
114  * @size: the size to allocate
115  *
116  * Allocates memory of the specified size. Note that the OS compatibility
117  * layer assumes that all allocations will provide zero'd memory.
118  */
119 static inline void *
120 ice_malloc(struct ice_hw __unused *hw, size_t size)
121 {
122 	return malloc(size, M_ICE_OSDEP, M_ZERO | M_NOWAIT);
123 }
124 
125 /**
126  * ice_memdup - Allocate a copy of some other memory
127  * @hw: private hardware structure
128  * @src: the source to copy from
129  * @size: allocation size
130  * @dir: the direction of copying
131  *
132  * Allocate memory of the specified size, and copy bytes from the src to fill
133  * it. We don't need to zero this memory as we immediately initialize it by
134  * copying from the src pointer.
135  */
136 static inline void *
137 ice_memdup(struct ice_hw __unused *hw, const void *src, size_t size,
138 	   enum ice_memcpy_type __unused dir)
139 {
140 	void *dst = malloc(size, M_ICE_OSDEP, M_NOWAIT);
141 
142 	if (dst != NULL)
143 		memcpy(dst, src, size);
144 
145 	return dst;
146 }
147 
148 /**
149  * ice_free - Free previously allocated memory
150  * @hw: the hardware private structure
151  * @mem: pointer to the memory to free
152  *
153  * Free memory that was previously allocated by ice_calloc, ice_malloc, or
154  * ice_memdup.
155  */
156 static inline void
157 ice_free(struct ice_hw __unused *hw, void *mem)
158 {
159 	free(mem, M_ICE_OSDEP);
160 }
161 
162 /* These are macros in order to drop the unused direction enumeration constant */
163 #define ice_memset(addr, c, len, unused) memset((addr), (c), (len))
164 #define ice_memcpy(dst, src, len, unused) memcpy((dst), (src), (len))
165 
166 void ice_usec_delay(uint32_t time, bool sleep);
167 void ice_msec_delay(uint32_t time, bool sleep);
168 void ice_msec_pause(uint32_t time);
169 void ice_msec_spin(uint32_t time);
170 
171 #define UNREFERENCED_PARAMETER(_p) _p = _p
172 #define UNREFERENCED_1PARAMETER(_p) do {			\
173 	UNREFERENCED_PARAMETER(_p);				\
174 } while (0)
175 #define UNREFERENCED_2PARAMETER(_p, _q) do {			\
176 	UNREFERENCED_PARAMETER(_p);				\
177 	UNREFERENCED_PARAMETER(_q);				\
178 } while (0)
179 #define UNREFERENCED_3PARAMETER(_p, _q, _r) do {		\
180 	UNREFERENCED_PARAMETER(_p);				\
181 	UNREFERENCED_PARAMETER(_q);				\
182 	UNREFERENCED_PARAMETER(_r);				\
183 } while (0)
184 #define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) do {		\
185 	UNREFERENCED_PARAMETER(_p);				\
186 	UNREFERENCED_PARAMETER(_q);				\
187 	UNREFERENCED_PARAMETER(_r);				\
188 	UNREFERENCED_PARAMETER(_s);				\
189 } while (0)
190 #define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) do {	\
191 	UNREFERENCED_PARAMETER(_p);				\
192 	UNREFERENCED_PARAMETER(_q);				\
193 	UNREFERENCED_PARAMETER(_r);				\
194 	UNREFERENCED_PARAMETER(_s);				\
195 	UNREFERENCED_PARAMETER(_t);				\
196 } while (0)
197 
198 #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
199 #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
200 #define MAKEMASK(_m, _s) ((_m) << (_s))
201 
202 #define LIST_HEAD_TYPE ice_list_head
203 #define LIST_ENTRY_TYPE ice_list_node
204 
205 /**
206  * @struct ice_list_node
207  * @brief simplified linked list node API
208  *
209  * Represents a node in a linked list, which can be embedded into a structure
210  * to allow that structure to be inserted into a linked list. Access to the
211  * contained structure is done via __containerof
212  */
213 struct ice_list_node {
214 	LIST_ENTRY(ice_list_node) entries;
215 };
216 
217 /**
218  * @struct ice_list_head
219  * @brief simplified linked list head API
220  *
221  * Represents the head of a linked list. The linked list should consist of
222  * a series of ice_list_node structures embedded into another structure
223  * accessed using __containerof. This way, the ice_list_head doesn't need to
224  * know the type of the structure it contains.
225  */
226 LIST_HEAD(ice_list_head, ice_list_node);
227 
228 #define INIT_LIST_HEAD LIST_INIT
229 /* LIST_EMPTY doesn't need to be changed */
230 #define LIST_ADD(entry, head) LIST_INSERT_HEAD(head, entry, entries)
231 #define LIST_ADD_AFTER(entry, elem) LIST_INSERT_AFTER(elem, entry, entries)
232 #define LIST_DEL(entry) LIST_REMOVE(entry, entries)
233 #define _osdep_LIST_ENTRY(ptr, type, member) \
234 	__containerof(ptr, type, member)
235 #define LIST_FIRST_ENTRY(head, type, member) \
236 	_osdep_LIST_ENTRY(LIST_FIRST(head), type, member)
237 #define LIST_NEXT_ENTRY(ptr, unused, member) \
238 	_osdep_LIST_ENTRY(LIST_NEXT(&(ptr->member), entries), __typeof(*ptr), member)
239 #define LIST_REPLACE_INIT(old_head, new_head) do {			\
240 	__typeof(new_head) _new_head = (new_head);			\
241 	LIST_INIT(_new_head);						\
242 	LIST_SWAP(old_head, _new_head, ice_list_node, entries);		\
243 } while (0)
244 
245 #define LIST_ENTRY_SAFE(_ptr, _type, _member) \
246 ({ __typeof(_ptr) ____ptr = (_ptr); \
247    ____ptr ? _osdep_LIST_ENTRY(____ptr, _type, _member) : NULL; \
248 })
249 
250 /**
251  * ice_get_list_tail - Return the pointer to the last node in the list
252  * @head: the pointer to the head of the list
253  *
254  * A helper function for implementing LIST_ADD_TAIL and LIST_LAST_ENTRY.
255  * Returns the pointer to the last node in the list, or NULL of the list is
256  * empty.
257  *
258  * Note: due to the list implementation this is O(N), where N is the size of
259  * the list. An O(1) implementation requires replacing the underlying list
260  * datastructure with one that has a tail pointer. This is problematic,
261  * because using a simple TAILQ would require that the addition and deletion
262  * be given the head of the list.
263  */
264 static inline struct ice_list_node *
265 ice_get_list_tail(struct ice_list_head *head)
266 {
267 	struct ice_list_node *node = LIST_FIRST(head);
268 
269 	if (node == NULL)
270 		return NULL;
271 	while (LIST_NEXT(node, entries) != NULL)
272 		node = LIST_NEXT(node, entries);
273 
274 	return node;
275 }
276 
277 /* TODO: This is O(N). An O(1) implementation would require a different
278  * underlying list structure, such as a circularly linked list. */
279 #define LIST_ADD_TAIL(entry, head) do {					\
280 	struct ice_list_node *node = ice_get_list_tail(head);		\
281 									\
282 	if (node == NULL) {						\
283 		LIST_ADD(entry, head);					\
284 	} else {							\
285 		LIST_INSERT_AFTER(node, entry, entries);		\
286 	}								\
287 } while (0)
288 
289 #define LIST_LAST_ENTRY(head, type, member) \
290 	LIST_ENTRY_SAFE(ice_get_list_tail(head), type, member)
291 
292 #define LIST_FIRST_ENTRY_SAFE(head, type, member) \
293 	LIST_ENTRY_SAFE(LIST_FIRST(head), type, member)
294 
295 #define LIST_NEXT_ENTRY_SAFE(ptr, member) \
296 	LIST_ENTRY_SAFE(LIST_NEXT(&(ptr->member), entries), __typeof(*ptr), member)
297 
298 #define LIST_FOR_EACH_ENTRY(pos, head, unused, member) \
299 	for (pos = LIST_FIRST_ENTRY_SAFE(head, __typeof(*pos), member);		\
300 	    pos;								\
301 	    pos = LIST_NEXT_ENTRY_SAFE(pos, member))
302 
303 #define LIST_FOR_EACH_ENTRY_SAFE(pos, n, head, unused, member) \
304 	for (pos = LIST_FIRST_ENTRY_SAFE(head, __typeof(*pos), member);		\
305 	     pos && ({ n = LIST_NEXT_ENTRY_SAFE(pos, member); 1; });		\
306 	     pos = n)
307 
308 #define STATIC static
309 
310 #define NTOHS ntohs
311 #define NTOHL ntohl
312 #define HTONS htons
313 #define HTONL htonl
314 #define LE16_TO_CPU le16toh
315 #define LE32_TO_CPU le32toh
316 #define LE64_TO_CPU le64toh
317 #define CPU_TO_LE16 htole16
318 #define CPU_TO_LE32 htole32
319 #define CPU_TO_LE64 htole64
320 #define CPU_TO_BE16 htobe16
321 #define CPU_TO_BE32 htobe32
322 
323 #define SNPRINTF snprintf
324 
325 /**
326  * @typedef u8
327  * @brief compatibility typedef for uint8_t
328  */
329 typedef uint8_t  u8;
330 
331 /**
332  * @typedef u16
333  * @brief compatibility typedef for uint16_t
334  */
335 typedef uint16_t u16;
336 
337 /**
338  * @typedef u32
339  * @brief compatibility typedef for uint32_t
340  */
341 typedef uint32_t u32;
342 
343 /**
344  * @typedef u64
345  * @brief compatibility typedef for uint64_t
346  */
347 typedef uint64_t u64;
348 
349 /**
350  * @typedef s8
351  * @brief compatibility typedef for int8_t
352  */
353 typedef int8_t  s8;
354 
355 /**
356  * @typedef s16
357  * @brief compatibility typedef for int16_t
358  */
359 typedef int16_t s16;
360 
361 /**
362  * @typedef s32
363  * @brief compatibility typedef for int32_t
364  */
365 typedef int32_t s32;
366 
367 /**
368  * @typedef s64
369  * @brief compatibility typedef for int64_t
370  */
371 typedef int64_t s64;
372 
373 #define __le16 u16
374 #define __le32 u32
375 #define __le64 u64
376 #define __be16 u16
377 #define __be32 u32
378 #define __be64 u64
379 
380 #define ice_hweight8(x) bitcount16((u8)x)
381 #define ice_hweight16(x) bitcount16(x)
382 #define ice_hweight32(x) bitcount32(x)
383 #define ice_hweight64(x) bitcount64(x)
384 
385 /**
386  * @struct ice_dma_mem
387  * @brief DMA memory allocation
388  *
389  * Contains DMA allocation bits, used to simplify DMA allocations.
390  */
391 struct ice_dma_mem {
392 	void *va;
393 	uint64_t pa;
394 	size_t size;
395 
396 	bus_dma_tag_t		tag;
397 	bus_dmamap_t		map;
398 	bus_dma_segment_t	seg;
399 };
400 
401 
402 void * ice_alloc_dma_mem(struct ice_hw *hw, struct ice_dma_mem *mem, u64 size);
403 void ice_free_dma_mem(struct ice_hw __unused *hw, struct ice_dma_mem *mem);
404 
405 /**
406  * @struct ice_lock
407  * @brief simplified lock API
408  *
409  * Contains a simple lock implementation used to lock various resources.
410  */
411 struct ice_lock {
412 	struct mtx mutex;
413 	char name[ICE_STR_BUF_LEN];
414 };
415 
416 extern u16 ice_lock_count;
417 
418 /**
419  * ice_init_lock - Initialize a lock for use
420  * @lock: the lock memory to initialize
421  *
422  * OS compatibility layer to provide a simple locking mechanism. We use
423  * a mutex for this purpose.
424  */
425 static inline void
426 ice_init_lock(struct ice_lock *lock)
427 {
428 	/*
429 	 * Make each lock unique by incrementing a counter each time this
430 	 * function is called. Use of a u16 allows 65535 possible locks before
431 	 * we'd hit a duplicate.
432 	 */
433 	memset(lock->name, 0, sizeof(lock->name));
434 	snprintf(lock->name, ICE_STR_BUF_LEN, "ice_lock_%u", ice_lock_count++);
435 	mtx_init(&lock->mutex, lock->name, NULL, MTX_DEF);
436 }
437 
438 /**
439  * ice_acquire_lock - Acquire the lock
440  * @lock: the lock to acquire
441  *
442  * Acquires the mutex specified by the lock pointer.
443  */
444 static inline void
445 ice_acquire_lock(struct ice_lock *lock)
446 {
447 	mtx_lock(&lock->mutex);
448 }
449 
450 /**
451  * ice_release_lock - Release the lock
452  * @lock: the lock to release
453  *
454  * Releases the mutex specified by the lock pointer.
455  */
456 static inline void
457 ice_release_lock(struct ice_lock *lock)
458 {
459 	mtx_unlock(&lock->mutex);
460 }
461 
462 /**
463  * ice_destroy_lock - Destroy the lock to de-allocate it
464  * @lock: the lock to destroy
465  *
466  * Destroys a previously initialized lock. We only do this if the mutex was
467  * previously initialized.
468  */
469 static inline void
470 ice_destroy_lock(struct ice_lock *lock)
471 {
472 	if (mtx_initialized(&lock->mutex))
473 		mtx_destroy(&lock->mutex);
474 	memset(lock->name, 0, sizeof(lock->name));
475 }
476 
477 /* Some function parameters are unused outside of MPASS/KASSERT macros. Rather
478  * than marking these as __unused all the time, mark them as __invariant_only,
479  * and define this to __unused when INVARIANTS is disabled. Otherwise, define
480  * it empty so that __invariant_only parameters are caught as unused by the
481  * INVARIANTS build.
482  */
483 #ifndef INVARIANTS
484 #define __invariant_only __unused
485 #else
486 #define __invariant_only
487 #endif
488 
489 #define __ALWAYS_UNUSED __unused
490 
491 /**
492  * ice_ilog2 - Calculate the integer log base 2 of a 64bit value
493  * @n: 64bit number
494  *
495  * Calculates the integer log base 2 of a 64bit value, rounded down.
496  *
497  * @remark The integer log base 2 of zero is technically undefined, but this
498  * function will return 0 in that case.
499  *
500  */
501 static inline int
502 ice_ilog2(u64 n) {
503 	if (n == 0)
504 		return 0;
505 	return flsll(n) - 1;
506 }
507 
508 /**
509  * ice_is_pow2 - Check if the value is a power of 2
510  * @n: 64bit number
511  *
512  * Check if the given value is a power of 2.
513  *
514  * @remark FreeBSD's powerof2 function treats zero as a power of 2, while this
515  * function does not.
516  *
517  * @returns true or false
518  */
519 static inline bool
520 ice_is_pow2(u64 n) {
521 	if (n == 0)
522 		return false;
523 	return powerof2(n);
524 }
525 #endif /* _ICE_OSDEP_H_ */
526