1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/item.h 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the names of the copyright holders nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * Alternatively, this software may be distributed under the terms of the 20 * GNU General Public License ("GPL") version 2 as published by the Free 21 * Software Foundation. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 #ifndef _MLXSW_ITEM_H 37 #define _MLXSW_ITEM_H 38 39 #include <linux/types.h> 40 #include <linux/string.h> 41 #include <linux/bitops.h> 42 43 struct mlxsw_item { 44 unsigned short offset; /* bytes in container */ 45 unsigned short step; /* step in bytes for indexed items */ 46 unsigned short in_step_offset; /* offset within one step */ 47 unsigned char shift; /* shift in bits */ 48 unsigned char element_size; /* size of element in bit array */ 49 bool no_real_shift; 50 union { 51 unsigned char bits; 52 unsigned short bytes; 53 } size; 54 const char *name; 55 }; 56 57 static inline unsigned int 58 __mlxsw_item_offset(struct mlxsw_item *item, unsigned short index, 59 size_t typesize) 60 { 61 BUG_ON(index && !item->step); 62 if (item->offset % typesize != 0 || 63 item->step % typesize != 0 || 64 item->in_step_offset % typesize != 0) { 65 pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%zx)\n", 66 item->name, item->offset, item->step, 67 item->in_step_offset, typesize); 68 BUG(); 69 } 70 71 return ((item->offset + item->step * index + item->in_step_offset) / 72 typesize); 73 } 74 75 static inline u16 __mlxsw_item_get16(char *buf, struct mlxsw_item *item, 76 unsigned short index) 77 { 78 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16)); 79 __be16 *b = (__be16 *) buf; 80 u16 tmp; 81 82 tmp = be16_to_cpu(b[offset]); 83 tmp >>= item->shift; 84 tmp &= GENMASK(item->size.bits - 1, 0); 85 if (item->no_real_shift) 86 tmp <<= item->shift; 87 return tmp; 88 } 89 90 static inline void __mlxsw_item_set16(char *buf, struct mlxsw_item *item, 91 unsigned short index, u16 val) 92 { 93 unsigned int offset = __mlxsw_item_offset(item, index, 94 sizeof(u16)); 95 __be16 *b = (__be16 *) buf; 96 u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift; 97 u16 tmp; 98 99 if (!item->no_real_shift) 100 val <<= item->shift; 101 val &= mask; 102 tmp = be16_to_cpu(b[offset]); 103 tmp &= ~mask; 104 tmp |= val; 105 b[offset] = cpu_to_be16(tmp); 106 } 107 108 static inline u32 __mlxsw_item_get32(char *buf, struct mlxsw_item *item, 109 unsigned short index) 110 { 111 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32)); 112 __be32 *b = (__be32 *) buf; 113 u32 tmp; 114 115 tmp = be32_to_cpu(b[offset]); 116 tmp >>= item->shift; 117 tmp &= GENMASK(item->size.bits - 1, 0); 118 if (item->no_real_shift) 119 tmp <<= item->shift; 120 return tmp; 121 } 122 123 static inline void __mlxsw_item_set32(char *buf, struct mlxsw_item *item, 124 unsigned short index, u32 val) 125 { 126 unsigned int offset = __mlxsw_item_offset(item, index, 127 sizeof(u32)); 128 __be32 *b = (__be32 *) buf; 129 u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift; 130 u32 tmp; 131 132 if (!item->no_real_shift) 133 val <<= item->shift; 134 val &= mask; 135 tmp = be32_to_cpu(b[offset]); 136 tmp &= ~mask; 137 tmp |= val; 138 b[offset] = cpu_to_be32(tmp); 139 } 140 141 static inline u64 __mlxsw_item_get64(char *buf, struct mlxsw_item *item, 142 unsigned short index) 143 { 144 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64)); 145 __be64 *b = (__be64 *) buf; 146 u64 tmp; 147 148 tmp = be64_to_cpu(b[offset]); 149 tmp >>= item->shift; 150 tmp &= GENMASK_ULL(item->size.bits - 1, 0); 151 if (item->no_real_shift) 152 tmp <<= item->shift; 153 return tmp; 154 } 155 156 static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item, 157 unsigned short index, u64 val) 158 { 159 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64)); 160 __be64 *b = (__be64 *) buf; 161 u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift; 162 u64 tmp; 163 164 if (!item->no_real_shift) 165 val <<= item->shift; 166 val &= mask; 167 tmp = be64_to_cpu(b[offset]); 168 tmp &= ~mask; 169 tmp |= val; 170 b[offset] = cpu_to_be64(tmp); 171 } 172 173 static inline void __mlxsw_item_memcpy_from(char *buf, char *dst, 174 struct mlxsw_item *item) 175 { 176 memcpy(dst, &buf[item->offset], item->size.bytes); 177 } 178 179 static inline void __mlxsw_item_memcpy_to(char *buf, char *src, 180 struct mlxsw_item *item) 181 { 182 memcpy(&buf[item->offset], src, item->size.bytes); 183 } 184 185 static inline u16 186 __mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift) 187 { 188 u16 max_index, be_index; 189 u16 offset; /* byte offset inside the array */ 190 u8 in_byte_index; 191 192 BUG_ON(index && !item->element_size); 193 if (item->offset % sizeof(u32) != 0 || 194 BITS_PER_BYTE % item->element_size != 0) { 195 pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n", 196 item->name, item->offset, item->element_size); 197 BUG(); 198 } 199 200 max_index = (item->size.bytes << 3) / item->element_size - 1; 201 be_index = max_index - index; 202 offset = be_index * item->element_size >> 3; 203 in_byte_index = index % (BITS_PER_BYTE / item->element_size); 204 *shift = in_byte_index * item->element_size; 205 206 return item->offset + offset; 207 } 208 209 static inline u8 __mlxsw_item_bit_array_get(char *buf, struct mlxsw_item *item, 210 u16 index) 211 { 212 u8 shift, tmp; 213 u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift); 214 215 tmp = buf[offset]; 216 tmp >>= shift; 217 tmp &= GENMASK(item->element_size - 1, 0); 218 return tmp; 219 } 220 221 static inline void __mlxsw_item_bit_array_set(char *buf, struct mlxsw_item *item, 222 u16 index, u8 val) 223 { 224 u8 shift, tmp; 225 u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift); 226 u8 mask = GENMASK(item->element_size - 1, 0) << shift; 227 228 val <<= shift; 229 val &= mask; 230 tmp = buf[offset]; 231 tmp &= ~mask; 232 tmp |= val; 233 buf[offset] = tmp; 234 } 235 236 #define __ITEM_NAME(_type, _cname, _iname) \ 237 mlxsw_##_type##_##_cname##_##_iname##_item 238 239 /* _type: cmd_mbox, reg, etc. 240 * _cname: containter name (e.g. command name, register name) 241 * _iname: item name within the container 242 */ 243 244 #define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \ 245 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 246 .offset = _offset, \ 247 .shift = _shift, \ 248 .size = {.bits = _sizebits,}, \ 249 .name = #_type "_" #_cname "_" #_iname, \ 250 }; \ 251 static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \ 252 { \ 253 return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ 254 } \ 255 static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\ 256 { \ 257 __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ 258 } 259 260 #define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \ 261 _step, _instepoffset, _norealshift) \ 262 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 263 .offset = _offset, \ 264 .step = _step, \ 265 .in_step_offset = _instepoffset, \ 266 .shift = _shift, \ 267 .no_real_shift = _norealshift, \ 268 .size = {.bits = _sizebits,}, \ 269 .name = #_type "_" #_cname "_" #_iname, \ 270 }; \ 271 static inline u16 \ 272 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \ 273 { \ 274 return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \ 275 index); \ 276 } \ 277 static inline void \ 278 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ 279 u16 val) \ 280 { \ 281 __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), \ 282 index, val); \ 283 } 284 285 #define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits) \ 286 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 287 .offset = _offset, \ 288 .shift = _shift, \ 289 .size = {.bits = _sizebits,}, \ 290 .name = #_type "_" #_cname "_" #_iname, \ 291 }; \ 292 static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \ 293 { \ 294 return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ 295 } \ 296 static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\ 297 { \ 298 __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ 299 } 300 301 #define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \ 302 _step, _instepoffset, _norealshift) \ 303 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 304 .offset = _offset, \ 305 .step = _step, \ 306 .in_step_offset = _instepoffset, \ 307 .shift = _shift, \ 308 .no_real_shift = _norealshift, \ 309 .size = {.bits = _sizebits,}, \ 310 .name = #_type "_" #_cname "_" #_iname, \ 311 }; \ 312 static inline u32 \ 313 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \ 314 { \ 315 return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \ 316 index); \ 317 } \ 318 static inline void \ 319 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ 320 u32 val) \ 321 { \ 322 __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), \ 323 index, val); \ 324 } 325 326 #define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits) \ 327 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 328 .offset = _offset, \ 329 .shift = _shift, \ 330 .size = {.bits = _sizebits,}, \ 331 .name = #_type "_" #_cname "_" #_iname, \ 332 }; \ 333 static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \ 334 { \ 335 return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ 336 } \ 337 static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\ 338 { \ 339 __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ 340 } 341 342 #define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift, \ 343 _sizebits, _step, _instepoffset, _norealshift) \ 344 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 345 .offset = _offset, \ 346 .step = _step, \ 347 .in_step_offset = _instepoffset, \ 348 .shift = _shift, \ 349 .no_real_shift = _norealshift, \ 350 .size = {.bits = _sizebits,}, \ 351 .name = #_type "_" #_cname "_" #_iname, \ 352 }; \ 353 static inline u64 \ 354 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \ 355 { \ 356 return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \ 357 index); \ 358 } \ 359 static inline void \ 360 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ 361 u64 val) \ 362 { \ 363 __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), \ 364 index, val); \ 365 } 366 367 #define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes) \ 368 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 369 .offset = _offset, \ 370 .size = {.bytes = _sizebytes,}, \ 371 .name = #_type "_" #_cname "_" #_iname, \ 372 }; \ 373 static inline void \ 374 mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst) \ 375 { \ 376 __mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\ 377 } \ 378 static inline void \ 379 mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src) \ 380 { \ 381 __mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname)); \ 382 } 383 384 #define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \ 385 _element_size) \ 386 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 387 .offset = _offset, \ 388 .element_size = _element_size, \ 389 .size = {.bytes = _sizebytes,}, \ 390 .name = #_type "_" #_cname "_" #_iname, \ 391 }; \ 392 static inline u8 \ 393 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, u16 index) \ 394 { \ 395 return __mlxsw_item_bit_array_get(buf, \ 396 &__ITEM_NAME(_type, _cname, _iname), \ 397 index); \ 398 } \ 399 static inline void \ 400 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \ 401 { \ 402 return __mlxsw_item_bit_array_set(buf, \ 403 &__ITEM_NAME(_type, _cname, _iname), \ 404 index, val); \ 405 } \ 406 407 #endif 408