1 /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #ifndef _MLXSW_ITEM_H
5 #define _MLXSW_ITEM_H
6
7 #include <linux/types.h>
8 #include <linux/string.h>
9 #include <linux/bitops.h>
10
11 struct mlxsw_item {
12 unsigned short offset; /* bytes in container */
13 short step; /* step in bytes for indexed items */
14 unsigned short in_step_offset; /* offset within one step */
15 unsigned char shift; /* shift in bits */
16 unsigned char element_size; /* size of element in bit array */
17 bool no_real_shift;
18 union {
19 unsigned char bits;
20 unsigned short bytes;
21 } size;
22 const char *name;
23 };
24
25 static inline unsigned int
__mlxsw_item_offset(const struct mlxsw_item * item,unsigned short index,size_t typesize)26 __mlxsw_item_offset(const struct mlxsw_item *item, unsigned short index,
27 size_t typesize)
28 {
29 BUG_ON(index && !item->step);
30 if (item->offset % typesize != 0 ||
31 item->step % typesize != 0 ||
32 item->in_step_offset % typesize != 0) {
33 pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%zx)\n",
34 item->name, item->offset, item->step,
35 item->in_step_offset, typesize);
36 BUG();
37 }
38
39 return ((item->offset + item->step * index + item->in_step_offset) /
40 typesize);
41 }
42
__mlxsw_item_get8(const char * buf,const struct mlxsw_item * item,unsigned short index)43 static inline u8 __mlxsw_item_get8(const char *buf,
44 const struct mlxsw_item *item,
45 unsigned short index)
46 {
47 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u8));
48 u8 *b = (u8 *) buf;
49 u8 tmp;
50
51 tmp = b[offset];
52 tmp >>= item->shift;
53 tmp &= GENMASK(item->size.bits - 1, 0);
54 if (item->no_real_shift)
55 tmp <<= item->shift;
56 return tmp;
57 }
58
__mlxsw_item_set8(char * buf,const struct mlxsw_item * item,unsigned short index,u8 val)59 static inline void __mlxsw_item_set8(char *buf, const struct mlxsw_item *item,
60 unsigned short index, u8 val)
61 {
62 unsigned int offset = __mlxsw_item_offset(item, index,
63 sizeof(u8));
64 u8 *b = (u8 *) buf;
65 u8 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
66 u8 tmp;
67
68 if (!item->no_real_shift)
69 val <<= item->shift;
70 val &= mask;
71 tmp = b[offset];
72 tmp &= ~mask;
73 tmp |= val;
74 b[offset] = tmp;
75 }
76
__mlxsw_item_get16(const char * buf,const struct mlxsw_item * item,unsigned short index)77 static inline u16 __mlxsw_item_get16(const char *buf,
78 const struct mlxsw_item *item,
79 unsigned short index)
80 {
81 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16));
82 __be16 *b = (__be16 *) buf;
83 u16 tmp;
84
85 tmp = be16_to_cpu(b[offset]);
86 tmp >>= item->shift;
87 tmp &= GENMASK(item->size.bits - 1, 0);
88 if (item->no_real_shift)
89 tmp <<= item->shift;
90 return tmp;
91 }
92
__mlxsw_item_set16(char * buf,const struct mlxsw_item * item,unsigned short index,u16 val)93 static inline void __mlxsw_item_set16(char *buf, const struct mlxsw_item *item,
94 unsigned short index, u16 val)
95 {
96 unsigned int offset = __mlxsw_item_offset(item, index,
97 sizeof(u16));
98 __be16 *b = (__be16 *) buf;
99 u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
100 u16 tmp;
101
102 if (!item->no_real_shift)
103 val <<= item->shift;
104 val &= mask;
105 tmp = be16_to_cpu(b[offset]);
106 tmp &= ~mask;
107 tmp |= val;
108 b[offset] = cpu_to_be16(tmp);
109 }
110
__mlxsw_item_get32(const char * buf,const struct mlxsw_item * item,unsigned short index)111 static inline u32 __mlxsw_item_get32(const char *buf,
112 const struct mlxsw_item *item,
113 unsigned short index)
114 {
115 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32));
116 __be32 *b = (__be32 *) buf;
117 u32 tmp;
118
119 tmp = be32_to_cpu(b[offset]);
120 tmp >>= item->shift;
121 tmp &= GENMASK(item->size.bits - 1, 0);
122 if (item->no_real_shift)
123 tmp <<= item->shift;
124 return tmp;
125 }
126
__mlxsw_item_set32(char * buf,const struct mlxsw_item * item,unsigned short index,u32 val)127 static inline void __mlxsw_item_set32(char *buf, const struct mlxsw_item *item,
128 unsigned short index, u32 val)
129 {
130 unsigned int offset = __mlxsw_item_offset(item, index,
131 sizeof(u32));
132 __be32 *b = (__be32 *) buf;
133 u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
134 u32 tmp;
135
136 if (!item->no_real_shift)
137 val <<= item->shift;
138 val &= mask;
139 tmp = be32_to_cpu(b[offset]);
140 tmp &= ~mask;
141 tmp |= val;
142 b[offset] = cpu_to_be32(tmp);
143 }
144
__mlxsw_item_get64(const char * buf,const struct mlxsw_item * item,unsigned short index)145 static inline u64 __mlxsw_item_get64(const char *buf,
146 const struct mlxsw_item *item,
147 unsigned short index)
148 {
149 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
150 __be64 *b = (__be64 *) buf;
151 u64 tmp;
152
153 tmp = be64_to_cpu(b[offset]);
154 tmp >>= item->shift;
155 tmp &= GENMASK_ULL(item->size.bits - 1, 0);
156 if (item->no_real_shift)
157 tmp <<= item->shift;
158 return tmp;
159 }
160
__mlxsw_item_set64(char * buf,const struct mlxsw_item * item,unsigned short index,u64 val)161 static inline void __mlxsw_item_set64(char *buf, const struct mlxsw_item *item,
162 unsigned short index, u64 val)
163 {
164 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
165 __be64 *b = (__be64 *) buf;
166 u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift;
167 u64 tmp;
168
169 if (!item->no_real_shift)
170 val <<= item->shift;
171 val &= mask;
172 tmp = be64_to_cpu(b[offset]);
173 tmp &= ~mask;
174 tmp |= val;
175 b[offset] = cpu_to_be64(tmp);
176 }
177
__mlxsw_item_memcpy_from(const char * buf,char * dst,const struct mlxsw_item * item,unsigned short index)178 static inline void __mlxsw_item_memcpy_from(const char *buf, char *dst,
179 const struct mlxsw_item *item,
180 unsigned short index)
181 {
182 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
183
184 memcpy(dst, &buf[offset], item->size.bytes);
185 }
186
__mlxsw_item_memcpy_to(char * buf,const char * src,const struct mlxsw_item * item,unsigned short index)187 static inline void __mlxsw_item_memcpy_to(char *buf, const char *src,
188 const struct mlxsw_item *item,
189 unsigned short index)
190 {
191 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
192
193 memcpy(&buf[offset], src, item->size.bytes);
194 }
195
__mlxsw_item_data(char * buf,const struct mlxsw_item * item,unsigned short index)196 static inline char *__mlxsw_item_data(char *buf, const struct mlxsw_item *item,
197 unsigned short index)
198 {
199 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
200
201 return &buf[offset];
202 }
203
204 static inline u16
__mlxsw_item_bit_array_offset(const struct mlxsw_item * item,u16 index,u8 * shift)205 __mlxsw_item_bit_array_offset(const struct mlxsw_item *item,
206 u16 index, u8 *shift)
207 {
208 u16 max_index, be_index;
209 u16 offset; /* byte offset inside the array */
210 u8 in_byte_index;
211
212 BUG_ON(index && !item->element_size);
213 if (item->offset % sizeof(u32) != 0 ||
214 BITS_PER_BYTE % item->element_size != 0) {
215 pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n",
216 item->name, item->offset, item->element_size);
217 BUG();
218 }
219
220 max_index = (item->size.bytes << 3) / item->element_size - 1;
221 if (WARN_ONCE(index > max_index,
222 "name=%s,index=%u,max_index=%u\n", item->name, index,
223 max_index))
224 index = 0;
225 be_index = max_index - index;
226 offset = be_index * item->element_size >> 3;
227 in_byte_index = index % (BITS_PER_BYTE / item->element_size);
228 *shift = in_byte_index * item->element_size;
229
230 return item->offset + offset;
231 }
232
__mlxsw_item_bit_array_get(const char * buf,const struct mlxsw_item * item,u16 index)233 static inline u8 __mlxsw_item_bit_array_get(const char *buf,
234 const struct mlxsw_item *item,
235 u16 index)
236 {
237 u8 shift, tmp;
238 u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
239
240 tmp = buf[offset];
241 tmp >>= shift;
242 tmp &= GENMASK(item->element_size - 1, 0);
243 return tmp;
244 }
245
__mlxsw_item_bit_array_set(char * buf,const struct mlxsw_item * item,u16 index,u8 val)246 static inline void __mlxsw_item_bit_array_set(char *buf,
247 const struct mlxsw_item *item,
248 u16 index, u8 val)
249 {
250 u8 shift, tmp;
251 u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
252 u8 mask = GENMASK(item->element_size - 1, 0) << shift;
253
254 val <<= shift;
255 val &= mask;
256 tmp = buf[offset];
257 tmp &= ~mask;
258 tmp |= val;
259 buf[offset] = tmp;
260 }
261
262 #define __ITEM_NAME(_type, _cname, _iname) \
263 mlxsw_##_type##_##_cname##_##_iname##_item
264
265 /* _type: cmd_mbox, reg, etc.
266 * _cname: containter name (e.g. command name, register name)
267 * _iname: item name within the container
268 */
269
270 #define MLXSW_ITEM8(_type, _cname, _iname, _offset, _shift, _sizebits) \
271 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
272 .offset = _offset, \
273 .shift = _shift, \
274 .size = {.bits = _sizebits,}, \
275 .name = #_type "_" #_cname "_" #_iname, \
276 }; \
277 static inline u8 __maybe_unused \
278 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
279 { \
280 return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
281 } \
282 static inline void __maybe_unused \
283 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val) \
284 { \
285 __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
286 }
287
288 #define MLXSW_ITEM8_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
289 _step, _instepoffset, _norealshift) \
290 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
291 .offset = _offset, \
292 .step = _step, \
293 .in_step_offset = _instepoffset, \
294 .shift = _shift, \
295 .no_real_shift = _norealshift, \
296 .size = {.bits = _sizebits,}, \
297 .name = #_type "_" #_cname "_" #_iname, \
298 }; \
299 static inline u8 __maybe_unused \
300 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
301 { \
302 return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), \
303 index); \
304 } \
305 static inline void __maybe_unused \
306 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
307 u8 val) \
308 { \
309 __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), \
310 index, val); \
311 }
312
313 #define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \
314 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
315 .offset = _offset, \
316 .shift = _shift, \
317 .size = {.bits = _sizebits,}, \
318 .name = #_type "_" #_cname "_" #_iname, \
319 }; \
320 static inline u16 __maybe_unused \
321 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
322 { \
323 return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
324 } \
325 static inline void __maybe_unused \
326 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val) \
327 { \
328 __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
329 }
330
331 #define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
332 _step, _instepoffset, _norealshift) \
333 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
334 .offset = _offset, \
335 .step = _step, \
336 .in_step_offset = _instepoffset, \
337 .shift = _shift, \
338 .no_real_shift = _norealshift, \
339 .size = {.bits = _sizebits,}, \
340 .name = #_type "_" #_cname "_" #_iname, \
341 }; \
342 static inline u16 __maybe_unused \
343 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
344 { \
345 return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \
346 index); \
347 } \
348 static inline void __maybe_unused \
349 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
350 u16 val) \
351 { \
352 __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), \
353 index, val); \
354 }
355
356 #define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits) \
357 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
358 .offset = _offset, \
359 .shift = _shift, \
360 .size = {.bits = _sizebits,}, \
361 .name = #_type "_" #_cname "_" #_iname, \
362 }; \
363 static inline u32 __maybe_unused \
364 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
365 { \
366 return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
367 } \
368 static inline void __maybe_unused \
369 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val) \
370 { \
371 __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
372 }
373
374 #define LOCAL_PORT_LSB_SIZE 8
375 #define LOCAL_PORT_MSB_SIZE 2
376
377 #define MLXSW_ITEM32_LP(_type, _cname, _offset1, _shift1, _offset2, _shift2) \
378 static struct mlxsw_item __ITEM_NAME(_type, _cname, local_port) = { \
379 .offset = _offset1, \
380 .shift = _shift1, \
381 .size = {.bits = LOCAL_PORT_LSB_SIZE,}, \
382 .name = #_type "_" #_cname "_local_port", \
383 }; \
384 static struct mlxsw_item __ITEM_NAME(_type, _cname, lp_msb) = { \
385 .offset = _offset2, \
386 .shift = _shift2, \
387 .size = {.bits = LOCAL_PORT_MSB_SIZE,}, \
388 .name = #_type "_" #_cname "_lp_msb", \
389 }; \
390 static inline u32 __maybe_unused \
391 mlxsw_##_type##_##_cname##_local_port_get(const char *buf) \
392 { \
393 u32 local_port, lp_msb; \
394 \
395 local_port = __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, \
396 local_port), 0); \
397 lp_msb = __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, lp_msb), \
398 0); \
399 return (lp_msb << LOCAL_PORT_LSB_SIZE) + local_port; \
400 } \
401 static inline void __maybe_unused \
402 mlxsw_##_type##_##_cname##_local_port_set(char *buf, u32 val) \
403 { \
404 __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, local_port), 0, \
405 val & ((1 << LOCAL_PORT_LSB_SIZE) - 1)); \
406 __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, lp_msb), 0, \
407 val >> LOCAL_PORT_LSB_SIZE); \
408 }
409
410 #define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
411 _step, _instepoffset, _norealshift) \
412 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
413 .offset = _offset, \
414 .step = _step, \
415 .in_step_offset = _instepoffset, \
416 .shift = _shift, \
417 .no_real_shift = _norealshift, \
418 .size = {.bits = _sizebits,}, \
419 .name = #_type "_" #_cname "_" #_iname, \
420 }; \
421 static inline u32 __maybe_unused \
422 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
423 { \
424 return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \
425 index); \
426 } \
427 static inline void __maybe_unused \
428 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
429 u32 val) \
430 { \
431 __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), \
432 index, val); \
433 }
434
435 #define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits) \
436 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
437 .offset = _offset, \
438 .shift = _shift, \
439 .size = {.bits = _sizebits,}, \
440 .name = #_type "_" #_cname "_" #_iname, \
441 }; \
442 static inline u64 __maybe_unused \
443 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
444 { \
445 return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
446 } \
447 static inline void __maybe_unused \
448 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val) \
449 { \
450 __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
451 }
452
453 #define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift, \
454 _sizebits, _step, _instepoffset, _norealshift) \
455 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
456 .offset = _offset, \
457 .step = _step, \
458 .in_step_offset = _instepoffset, \
459 .shift = _shift, \
460 .no_real_shift = _norealshift, \
461 .size = {.bits = _sizebits,}, \
462 .name = #_type "_" #_cname "_" #_iname, \
463 }; \
464 static inline u64 __maybe_unused \
465 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
466 { \
467 return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \
468 index); \
469 } \
470 static inline void __maybe_unused \
471 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
472 u64 val) \
473 { \
474 __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), \
475 index, val); \
476 }
477
478 #define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes) \
479 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
480 .offset = _offset, \
481 .size = {.bytes = _sizebytes,}, \
482 .name = #_type "_" #_cname "_" #_iname, \
483 }; \
484 static inline void __maybe_unused \
485 mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, char *dst) \
486 { \
487 __mlxsw_item_memcpy_from(buf, dst, \
488 &__ITEM_NAME(_type, _cname, _iname), 0); \
489 } \
490 static inline void __maybe_unused \
491 mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \
492 { \
493 __mlxsw_item_memcpy_to(buf, src, \
494 &__ITEM_NAME(_type, _cname, _iname), 0); \
495 } \
496 static inline char * __maybe_unused \
497 mlxsw_##_type##_##_cname##_##_iname##_data(char *buf) \
498 { \
499 return __mlxsw_item_data(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
500 }
501
502 #define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes, \
503 _step, _instepoffset) \
504 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
505 .offset = _offset, \
506 .step = _step, \
507 .in_step_offset = _instepoffset, \
508 .size = {.bytes = _sizebytes,}, \
509 .name = #_type "_" #_cname "_" #_iname, \
510 }; \
511 static inline void __maybe_unused \
512 mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \
513 unsigned short index, \
514 char *dst) \
515 { \
516 __mlxsw_item_memcpy_from(buf, dst, \
517 &__ITEM_NAME(_type, _cname, _iname), index); \
518 } \
519 static inline void __maybe_unused \
520 mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \
521 unsigned short index, \
522 const char *src) \
523 { \
524 __mlxsw_item_memcpy_to(buf, src, \
525 &__ITEM_NAME(_type, _cname, _iname), index); \
526 } \
527 static inline char * __maybe_unused \
528 mlxsw_##_type##_##_cname##_##_iname##_data(char *buf, unsigned short index) \
529 { \
530 return __mlxsw_item_data(buf, \
531 &__ITEM_NAME(_type, _cname, _iname), index); \
532 }
533
534 #define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \
535 _element_size) \
536 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
537 .offset = _offset, \
538 .element_size = _element_size, \
539 .size = {.bytes = _sizebytes,}, \
540 .name = #_type "_" #_cname "_" #_iname, \
541 }; \
542 static inline u8 __maybe_unused \
543 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, u16 index) \
544 { \
545 return __mlxsw_item_bit_array_get(buf, \
546 &__ITEM_NAME(_type, _cname, _iname), \
547 index); \
548 } \
549 static inline void __maybe_unused \
550 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \
551 { \
552 return __mlxsw_item_bit_array_set(buf, \
553 &__ITEM_NAME(_type, _cname, _iname), \
554 index, val); \
555 } \
556
557 #endif
558