xref: /linux/drivers/net/ethernet/mellanox/mlxsw/item.h (revision c0c914eca7f251c70facc37dfebeaf176601918d)
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/item.h
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the names of the copyright holders nor the names of its
16  *    contributors may be used to endorse or promote products derived from
17  *    this software without specific prior written permission.
18  *
19  * Alternatively, this software may be distributed under the terms of the
20  * GNU General Public License ("GPL") version 2 as published by the Free
21  * Software Foundation.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 #ifndef _MLXSW_ITEM_H
37 #define _MLXSW_ITEM_H
38 
39 #include <linux/types.h>
40 #include <linux/string.h>
41 #include <linux/bitops.h>
42 
43 struct mlxsw_item {
44 	unsigned short	offset;		/* bytes in container */
45 	unsigned short	step;		/* step in bytes for indexed items */
46 	unsigned short	in_step_offset; /* offset within one step */
47 	unsigned char	shift;		/* shift in bits */
48 	unsigned char	element_size;	/* size of element in bit array */
49 	bool		no_real_shift;
50 	union {
51 		unsigned char	bits;
52 		unsigned short	bytes;
53 	} size;
54 	const char	*name;
55 };
56 
57 static inline unsigned int
58 __mlxsw_item_offset(struct mlxsw_item *item, unsigned short index,
59 		    size_t typesize)
60 {
61 	BUG_ON(index && !item->step);
62 	if (item->offset % typesize != 0 ||
63 	    item->step % typesize != 0 ||
64 	    item->in_step_offset % typesize != 0) {
65 		pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%zx)\n",
66 		       item->name, item->offset, item->step,
67 		       item->in_step_offset, typesize);
68 		BUG();
69 	}
70 
71 	return ((item->offset + item->step * index + item->in_step_offset) /
72 		typesize);
73 }
74 
75 static inline u16 __mlxsw_item_get16(char *buf, struct mlxsw_item *item,
76 				     unsigned short index)
77 {
78 	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16));
79 	__be16 *b = (__be16 *) buf;
80 	u16 tmp;
81 
82 	tmp = be16_to_cpu(b[offset]);
83 	tmp >>= item->shift;
84 	tmp &= GENMASK(item->size.bits - 1, 0);
85 	if (item->no_real_shift)
86 		tmp <<= item->shift;
87 	return tmp;
88 }
89 
90 static inline void __mlxsw_item_set16(char *buf, struct mlxsw_item *item,
91 				      unsigned short index, u16 val)
92 {
93 	unsigned int offset = __mlxsw_item_offset(item, index,
94 						  sizeof(u16));
95 	__be16 *b = (__be16 *) buf;
96 	u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
97 	u16 tmp;
98 
99 	if (!item->no_real_shift)
100 		val <<= item->shift;
101 	val &= mask;
102 	tmp = be16_to_cpu(b[offset]);
103 	tmp &= ~mask;
104 	tmp |= val;
105 	b[offset] = cpu_to_be16(tmp);
106 }
107 
108 static inline u32 __mlxsw_item_get32(char *buf, struct mlxsw_item *item,
109 				     unsigned short index)
110 {
111 	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32));
112 	__be32 *b = (__be32 *) buf;
113 	u32 tmp;
114 
115 	tmp = be32_to_cpu(b[offset]);
116 	tmp >>= item->shift;
117 	tmp &= GENMASK(item->size.bits - 1, 0);
118 	if (item->no_real_shift)
119 		tmp <<= item->shift;
120 	return tmp;
121 }
122 
123 static inline void __mlxsw_item_set32(char *buf, struct mlxsw_item *item,
124 				      unsigned short index, u32 val)
125 {
126 	unsigned int offset = __mlxsw_item_offset(item, index,
127 						  sizeof(u32));
128 	__be32 *b = (__be32 *) buf;
129 	u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
130 	u32 tmp;
131 
132 	if (!item->no_real_shift)
133 		val <<= item->shift;
134 	val &= mask;
135 	tmp = be32_to_cpu(b[offset]);
136 	tmp &= ~mask;
137 	tmp |= val;
138 	b[offset] = cpu_to_be32(tmp);
139 }
140 
141 static inline u64 __mlxsw_item_get64(char *buf, struct mlxsw_item *item,
142 				     unsigned short index)
143 {
144 	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
145 	__be64 *b = (__be64 *) buf;
146 	u64 tmp;
147 
148 	tmp = be64_to_cpu(b[offset]);
149 	tmp >>= item->shift;
150 	tmp &= GENMASK_ULL(item->size.bits - 1, 0);
151 	if (item->no_real_shift)
152 		tmp <<= item->shift;
153 	return tmp;
154 }
155 
156 static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item,
157 				      unsigned short index, u64 val)
158 {
159 	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
160 	__be64 *b = (__be64 *) buf;
161 	u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift;
162 	u64 tmp;
163 
164 	if (!item->no_real_shift)
165 		val <<= item->shift;
166 	val &= mask;
167 	tmp = be64_to_cpu(b[offset]);
168 	tmp &= ~mask;
169 	tmp |= val;
170 	b[offset] = cpu_to_be64(tmp);
171 }
172 
173 static inline void __mlxsw_item_memcpy_from(char *buf, char *dst,
174 					    struct mlxsw_item *item,
175 					    unsigned short index)
176 {
177 	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
178 
179 	memcpy(dst, &buf[offset], item->size.bytes);
180 }
181 
182 static inline void __mlxsw_item_memcpy_to(char *buf, const char *src,
183 					  struct mlxsw_item *item,
184 					  unsigned short index)
185 {
186 	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
187 
188 	memcpy(&buf[offset], src, item->size.bytes);
189 }
190 
191 static inline u16
192 __mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
193 {
194 	u16 max_index, be_index;
195 	u16 offset;		/* byte offset inside the array */
196 	u8 in_byte_index;
197 
198 	BUG_ON(index && !item->element_size);
199 	if (item->offset % sizeof(u32) != 0 ||
200 	    BITS_PER_BYTE % item->element_size != 0) {
201 		pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n",
202 		       item->name, item->offset, item->element_size);
203 		BUG();
204 	}
205 
206 	max_index = (item->size.bytes << 3) / item->element_size - 1;
207 	be_index = max_index - index;
208 	offset = be_index * item->element_size >> 3;
209 	in_byte_index  = index % (BITS_PER_BYTE / item->element_size);
210 	*shift = in_byte_index * item->element_size;
211 
212 	return item->offset + offset;
213 }
214 
215 static inline u8 __mlxsw_item_bit_array_get(char *buf, struct mlxsw_item *item,
216 					    u16 index)
217 {
218 	u8 shift, tmp;
219 	u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
220 
221 	tmp = buf[offset];
222 	tmp >>= shift;
223 	tmp &= GENMASK(item->element_size - 1, 0);
224 	return tmp;
225 }
226 
227 static inline void __mlxsw_item_bit_array_set(char *buf, struct mlxsw_item *item,
228 					      u16 index, u8 val)
229 {
230 	u8 shift, tmp;
231 	u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
232 	u8 mask = GENMASK(item->element_size - 1, 0) << shift;
233 
234 	val <<= shift;
235 	val &= mask;
236 	tmp = buf[offset];
237 	tmp &= ~mask;
238 	tmp |= val;
239 	buf[offset] = tmp;
240 }
241 
242 #define __ITEM_NAME(_type, _cname, _iname)					\
243 	mlxsw_##_type##_##_cname##_##_iname##_item
244 
245 /* _type: cmd_mbox, reg, etc.
246  * _cname: containter name (e.g. command name, register name)
247  * _iname: item name within the container
248  */
249 
250 #define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits)		\
251 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
252 	.offset = _offset,							\
253 	.shift = _shift,							\
254 	.size = {.bits = _sizebits,},						\
255 	.name = #_type "_" #_cname "_" #_iname,					\
256 };										\
257 static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf)		\
258 {										\
259 	return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0);	\
260 }										\
261 static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\
262 {										\
263 	__mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val);	\
264 }
265 
266 #define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits,	\
267 			     _step, _instepoffset, _norealshift)		\
268 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
269 	.offset = _offset,							\
270 	.step = _step,								\
271 	.in_step_offset = _instepoffset,					\
272 	.shift = _shift,							\
273 	.no_real_shift = _norealshift,						\
274 	.size = {.bits = _sizebits,},						\
275 	.name = #_type "_" #_cname "_" #_iname,					\
276 };										\
277 static inline u16								\
278 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index)	\
279 {										\
280 	return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname),	\
281 				  index);					\
282 }										\
283 static inline void								\
284 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,	\
285 					  u16 val)				\
286 {										\
287 	__mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname),		\
288 			   index, val);						\
289 }
290 
291 #define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits)		\
292 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
293 	.offset = _offset,							\
294 	.shift = _shift,							\
295 	.size = {.bits = _sizebits,},						\
296 	.name = #_type "_" #_cname "_" #_iname,					\
297 };										\
298 static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf)		\
299 {										\
300 	return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0);	\
301 }										\
302 static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\
303 {										\
304 	__mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val);	\
305 }
306 
307 #define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits,	\
308 			     _step, _instepoffset, _norealshift)		\
309 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
310 	.offset = _offset,							\
311 	.step = _step,								\
312 	.in_step_offset = _instepoffset,					\
313 	.shift = _shift,							\
314 	.no_real_shift = _norealshift,						\
315 	.size = {.bits = _sizebits,},						\
316 	.name = #_type "_" #_cname "_" #_iname,					\
317 };										\
318 static inline u32								\
319 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index)	\
320 {										\
321 	return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname),	\
322 				  index);					\
323 }										\
324 static inline void								\
325 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,	\
326 					  u32 val)				\
327 {										\
328 	__mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname),		\
329 			   index, val);						\
330 }
331 
332 #define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits)		\
333 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
334 	.offset = _offset,							\
335 	.shift = _shift,							\
336 	.size = {.bits = _sizebits,},						\
337 	.name = #_type "_" #_cname "_" #_iname,					\
338 };										\
339 static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf)		\
340 {										\
341 	return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0);	\
342 }										\
343 static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\
344 {										\
345 	__mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0,	val);	\
346 }
347 
348 #define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift,		\
349 			     _sizebits, _step, _instepoffset, _norealshift)	\
350 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
351 	.offset = _offset,							\
352 	.step = _step,								\
353 	.in_step_offset = _instepoffset,					\
354 	.shift = _shift,							\
355 	.no_real_shift = _norealshift,						\
356 	.size = {.bits = _sizebits,},						\
357 	.name = #_type "_" #_cname "_" #_iname,					\
358 };										\
359 static inline u64								\
360 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index)	\
361 {										\
362 	return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname),	\
363 				  index);					\
364 }										\
365 static inline void								\
366 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,	\
367 					  u64 val)				\
368 {										\
369 	__mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname),		\
370 			   index, val);						\
371 }
372 
373 #define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes)		\
374 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
375 	.offset = _offset,							\
376 	.size = {.bytes = _sizebytes,},						\
377 	.name = #_type "_" #_cname "_" #_iname,					\
378 };										\
379 static inline void								\
380 mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst)		\
381 {										\
382 	__mlxsw_item_memcpy_from(buf, dst,					\
383 				 &__ITEM_NAME(_type, _cname, _iname), 0);	\
384 }										\
385 static inline void								\
386 mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src)	\
387 {										\
388 	__mlxsw_item_memcpy_to(buf, src,					\
389 			       &__ITEM_NAME(_type, _cname, _iname), 0);		\
390 }
391 
392 #define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes,	\
393 			       _step, _instepoffset)				\
394 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
395 	.offset = _offset,							\
396 	.step = _step,								\
397 	.in_step_offset = _instepoffset,					\
398 	.size = {.bytes = _sizebytes,},						\
399 	.name = #_type "_" #_cname "_" #_iname,					\
400 };										\
401 static inline void								\
402 mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf,			\
403 						  unsigned short index,		\
404 						  char *dst)			\
405 {										\
406 	__mlxsw_item_memcpy_from(buf, dst,					\
407 				 &__ITEM_NAME(_type, _cname, _iname), index);	\
408 }										\
409 static inline void								\
410 mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf,			\
411 						unsigned short index,		\
412 						const char *src)		\
413 {										\
414 	__mlxsw_item_memcpy_to(buf, src,					\
415 			       &__ITEM_NAME(_type, _cname, _iname), index);	\
416 }
417 
418 #define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes,	\
419 			     _element_size)					\
420 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
421 	.offset = _offset,							\
422 	.element_size = _element_size,						\
423 	.size = {.bytes = _sizebytes,},						\
424 	.name = #_type "_" #_cname "_" #_iname,					\
425 };										\
426 static inline u8								\
427 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, u16 index)			\
428 {										\
429 	return __mlxsw_item_bit_array_get(buf,					\
430 					  &__ITEM_NAME(_type, _cname, _iname),	\
431 					  index);				\
432 }										\
433 static inline void								\
434 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val)		\
435 {										\
436 	return __mlxsw_item_bit_array_set(buf,					\
437 					  &__ITEM_NAME(_type, _cname, _iname),	\
438 					  index, val);				\
439 }										\
440 
441 #endif
442