xref: /freebsd/contrib/ofed/libbnxtre/memory.h (revision 96190b4fef3b4a0cc3ca0606b0c4e3e69a5e6717)
1 /*
2  * Copyright (c) 2024, Broadcom. All rights reserved.  The term
3  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in
13  *    the documentation and/or other materials provided with the
14  *    distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  */
29 
30 #ifndef __BNXT_RE_MEMORY_H__
31 #define __BNXT_RE_MEMORY_H__
32 
33 #include <pthread.h>
34 
35 #include "main.h"
36 
37 struct bnxt_re_mem {
38 	void *va_head;
39 	void *va_tail;
40 	uint32_t head;
41 	uint32_t tail;
42 	uint32_t size;
43 	uint32_t pad;
44 };
45 
46 #define BNXT_RE_QATTR_SQ_INDX	0
47 #define BNXT_RE_QATTR_RQ_INDX	1
48 struct bnxt_re_qattr {
49 	uint32_t esize;
50 	uint32_t slots;
51 	uint32_t nwr;
52 	uint32_t sz_ring;
53 	uint32_t sz_shad;
54 };
55 
56 /* spin lock wrapper struct */
57 struct bnxt_spinlock {
58 	pthread_spinlock_t lock;
59 	int in_use;
60 	int need_lock;
61 };
62 
63 struct bnxt_re_queue {
64 	struct bnxt_spinlock qlock;
65 	uint32_t flags;
66 	uint32_t *dbtail;
67 	void *va;
68 	uint32_t head;
69 	uint32_t depth; /* no of entries */
70 	void *pad;
71 	uint32_t pad_stride_log2;
72 	uint32_t tail;
73 	uint32_t max_slots;
74 	/* Represents the difference between the real queue depth allocated in
75 	 * HW and the user requested queue depth and is used to correctly flag
76 	 * queue full condition based on user supplied queue depth.
77 	 * This value can vary depending on the type of queue and any HW
78 	 * requirements that mandate keeping a fixed gap between the producer
79 	 * and the consumer indices in the queue
80 	 */
81 	uint32_t diff;
82 	uint32_t stride;
83 	uint32_t msn;
84 	uint32_t msn_tbl_sz;
85 };
86 
87 static inline unsigned long get_aligned(uint32_t size, uint32_t al_size)
88 {
89 	return (unsigned long) (size + al_size - 1) & ~(al_size - 1);
90 }
91 
92 static inline unsigned long roundup_pow_of_two(unsigned long val)
93 {
94 	unsigned long roundup = 1;
95 
96 	if (val == 1)
97 		return (roundup << 1);
98 
99 	while (roundup < val)
100 		roundup <<= 1;
101 
102 	return roundup;
103 }
104 
105 #define iowrite64(dst, val)	(*((volatile __u64 *) (dst)) = val)
106 #define iowrite32(dst, val)	(*((volatile __u32 *) (dst)) = val)
107 
108 /* Basic queue operation */
109 static inline void *bnxt_re_get_hwqe(struct bnxt_re_queue *que, uint32_t idx)
110 {
111 	idx += que->tail;
112 	if (idx >= que->depth)
113 		idx -= que->depth;
114 	return (void *)(que->va + (idx << 4));
115 }
116 
117 static inline void *bnxt_re_get_hwqe_hdr(struct bnxt_re_queue *que)
118 {
119 	return (void *)(que->va + ((que->tail) << 4));
120 }
121 
122 static inline uint32_t bnxt_re_is_que_full(struct bnxt_re_queue *que,
123 					   uint32_t slots)
124 {
125 	int32_t avail, head, tail;
126 
127 	head = que->head;
128 	tail = que->tail;
129 	avail = head - tail;
130 	if (head <= tail)
131 		avail += que->depth;
132 	return avail <= (slots + que->diff);
133 }
134 
135 static inline uint32_t bnxt_re_is_que_empty(struct bnxt_re_queue *que)
136 {
137 	return que->tail == que->head;
138 }
139 
140 static inline void bnxt_re_incr_tail(struct bnxt_re_queue *que, uint8_t cnt)
141 {
142 	que->tail += cnt;
143 	if (que->tail >= que->depth) {
144 		que->tail %= que->depth;
145 		/* Rolled over, Toggle Tail bit in epoch flags */
146 		que->flags ^= 1UL << BNXT_RE_FLAG_EPOCH_TAIL_SHIFT;
147 	}
148 }
149 
150 static inline void bnxt_re_incr_head(struct bnxt_re_queue *que, uint8_t cnt)
151 {
152 	que->head += cnt;
153 	if (que->head >= que->depth) {
154 		que->head %= que->depth;
155 		/* Rolled over, Toggle HEAD bit in epoch flags */
156 		que->flags ^= 1UL << BNXT_RE_FLAG_EPOCH_HEAD_SHIFT;
157 	}
158 
159 }
160 
161 void bnxt_re_free_mem(struct bnxt_re_mem *mem);
162 void *bnxt_re_alloc_mem(size_t size, uint32_t pg_size);
163 void *bnxt_re_get_obj(struct bnxt_re_mem *mem, size_t req);
164 void *bnxt_re_get_ring(struct bnxt_re_mem *mem, size_t req);
165 
166 #endif
167