1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/device.h> 7 #include <linux/export.h> 8 #include <linux/err.h> 9 #include <linux/if_link.h> 10 #include <linux/netdevice.h> 11 #include <linux/completion.h> 12 #include <linux/skbuff.h> 13 #include <linux/etherdevice.h> 14 #include <linux/types.h> 15 #include <linux/string.h> 16 #include <linux/gfp.h> 17 #include <linux/random.h> 18 #include <linux/jiffies.h> 19 #include <linux/mutex.h> 20 #include <linux/rcupdate.h> 21 #include <linux/slab.h> 22 #include <linux/workqueue.h> 23 #include <linux/firmware.h> 24 #include <asm/byteorder.h> 25 #include <net/devlink.h> 26 #include <trace/events/devlink.h> 27 28 #include "core.h" 29 #include "core_env.h" 30 #include "item.h" 31 #include "cmd.h" 32 #include "port.h" 33 #include "trap.h" 34 #include "emad.h" 35 #include "reg.h" 36 #include "resources.h" 37 #include "../mlxfw/mlxfw.h" 38 39 static LIST_HEAD(mlxsw_core_driver_list); 40 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); 41 42 static const char mlxsw_core_driver_name[] = "mlxsw_core"; 43 44 static struct workqueue_struct *mlxsw_wq; 45 static struct workqueue_struct *mlxsw_owq; 46 47 struct mlxsw_core_port { 48 struct devlink_port devlink_port; 49 void *port_driver_priv; 50 u16 local_port; 51 }; 52 53 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) 54 { 55 return mlxsw_core_port->port_driver_priv; 56 } 57 EXPORT_SYMBOL(mlxsw_core_port_driver_priv); 58 59 static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port) 60 { 61 return mlxsw_core_port->port_driver_priv != NULL; 62 } 63 64 struct mlxsw_core { 65 struct mlxsw_driver *driver; 66 const struct mlxsw_bus *bus; 67 void *bus_priv; 68 const struct mlxsw_bus_info *bus_info; 69 struct workqueue_struct *emad_wq; 70 struct list_head rx_listener_list; 71 struct list_head event_listener_list; 72 struct { 73 atomic64_t tid; 74 struct list_head trans_list; 75 spinlock_t trans_list_lock; /* protects trans_list writes */ 76 bool use_emad; 77 bool enable_string_tlv; 78 } emad; 79 struct { 80 u16 *mapping; /* lag_id+port_index to local_port mapping */ 81 } lag; 82 struct mlxsw_res res; 83 struct mlxsw_hwmon *hwmon; 84 struct mlxsw_thermal *thermal; 85 struct mlxsw_core_port *ports; 86 unsigned int max_ports; 87 atomic_t active_ports_count; 88 bool fw_flash_in_progress; 89 struct { 90 struct devlink_health_reporter *fw_fatal; 91 } health; 92 struct mlxsw_env *env; 93 unsigned long driver_priv[]; 94 /* driver_priv has to be always the last item */ 95 }; 96 97 #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40 98 99 static u64 mlxsw_ports_occ_get(void *priv) 100 { 101 struct mlxsw_core *mlxsw_core = priv; 102 103 return atomic_read(&mlxsw_core->active_ports_count); 104 } 105 106 static int mlxsw_core_resources_ports_register(struct mlxsw_core *mlxsw_core) 107 { 108 struct devlink *devlink = priv_to_devlink(mlxsw_core); 109 struct devlink_resource_size_params ports_num_params; 110 u32 max_ports; 111 112 max_ports = mlxsw_core->max_ports - 1; 113 devlink_resource_size_params_init(&ports_num_params, max_ports, 114 max_ports, 1, 115 DEVLINK_RESOURCE_UNIT_ENTRY); 116 117 return devlink_resource_register(devlink, 118 DEVLINK_RESOURCE_GENERIC_NAME_PORTS, 119 max_ports, MLXSW_CORE_RESOURCE_PORTS, 120 DEVLINK_RESOURCE_ID_PARENT_TOP, 121 &ports_num_params); 122 } 123 124 static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload) 125 { 126 struct devlink *devlink = priv_to_devlink(mlxsw_core); 127 int err; 128 129 /* Switch ports are numbered from 1 to queried value */ 130 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT)) 131 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core, 132 MAX_SYSTEM_PORT) + 1; 133 else 134 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1; 135 136 mlxsw_core->ports = kcalloc(mlxsw_core->max_ports, 137 sizeof(struct mlxsw_core_port), GFP_KERNEL); 138 if (!mlxsw_core->ports) 139 return -ENOMEM; 140 141 if (!reload) { 142 err = mlxsw_core_resources_ports_register(mlxsw_core); 143 if (err) 144 goto err_resources_ports_register; 145 } 146 atomic_set(&mlxsw_core->active_ports_count, 0); 147 devlink_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS, 148 mlxsw_ports_occ_get, mlxsw_core); 149 150 return 0; 151 152 err_resources_ports_register: 153 kfree(mlxsw_core->ports); 154 return err; 155 } 156 157 static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload) 158 { 159 struct devlink *devlink = priv_to_devlink(mlxsw_core); 160 161 devlink_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS); 162 if (!reload) 163 devlink_resources_unregister(priv_to_devlink(mlxsw_core)); 164 165 kfree(mlxsw_core->ports); 166 } 167 168 unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core) 169 { 170 return mlxsw_core->max_ports; 171 } 172 EXPORT_SYMBOL(mlxsw_core_max_ports); 173 174 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core) 175 { 176 return mlxsw_core->driver_priv; 177 } 178 EXPORT_SYMBOL(mlxsw_core_driver_priv); 179 180 bool 181 mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev, 182 const struct mlxsw_fw_rev *req_rev) 183 { 184 return rev->minor > req_rev->minor || 185 (rev->minor == req_rev->minor && 186 rev->subminor >= req_rev->subminor); 187 } 188 EXPORT_SYMBOL(mlxsw_core_fw_rev_minor_subminor_validate); 189 190 struct mlxsw_rx_listener_item { 191 struct list_head list; 192 struct mlxsw_rx_listener rxl; 193 void *priv; 194 bool enabled; 195 }; 196 197 struct mlxsw_event_listener_item { 198 struct list_head list; 199 struct mlxsw_core *mlxsw_core; 200 struct mlxsw_event_listener el; 201 void *priv; 202 }; 203 204 static const u8 mlxsw_core_trap_groups[] = { 205 MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 206 MLXSW_REG_HTGT_TRAP_GROUP_CORE_EVENT, 207 }; 208 209 static int mlxsw_core_trap_groups_set(struct mlxsw_core *mlxsw_core) 210 { 211 char htgt_pl[MLXSW_REG_HTGT_LEN]; 212 int err; 213 int i; 214 215 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 216 return 0; 217 218 for (i = 0; i < ARRAY_SIZE(mlxsw_core_trap_groups); i++) { 219 mlxsw_reg_htgt_pack(htgt_pl, mlxsw_core_trap_groups[i], 220 MLXSW_REG_HTGT_INVALID_POLICER, 221 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 222 MLXSW_REG_HTGT_DEFAULT_TC); 223 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 224 if (err) 225 return err; 226 } 227 return 0; 228 } 229 230 /****************** 231 * EMAD processing 232 ******************/ 233 234 /* emad_eth_hdr_dmac 235 * Destination MAC in EMAD's Ethernet header. 236 * Must be set to 01:02:c9:00:00:01 237 */ 238 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6); 239 240 /* emad_eth_hdr_smac 241 * Source MAC in EMAD's Ethernet header. 242 * Must be set to 00:02:c9:01:02:03 243 */ 244 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6); 245 246 /* emad_eth_hdr_ethertype 247 * Ethertype in EMAD's Ethernet header. 248 * Must be set to 0x8932 249 */ 250 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16); 251 252 /* emad_eth_hdr_mlx_proto 253 * Mellanox protocol. 254 * Must be set to 0x0. 255 */ 256 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8); 257 258 /* emad_eth_hdr_ver 259 * Mellanox protocol version. 260 * Must be set to 0x0. 261 */ 262 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4); 263 264 /* emad_op_tlv_type 265 * Type of the TLV. 266 * Must be set to 0x1 (operation TLV). 267 */ 268 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5); 269 270 /* emad_op_tlv_len 271 * Length of the operation TLV in u32. 272 * Must be set to 0x4. 273 */ 274 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11); 275 276 /* emad_op_tlv_dr 277 * Direct route bit. Setting to 1 indicates the EMAD is a direct route 278 * EMAD. DR TLV must follow. 279 * 280 * Note: Currently not supported and must not be set. 281 */ 282 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1); 283 284 /* emad_op_tlv_status 285 * Returned status in case of EMAD response. Must be set to 0 in case 286 * of EMAD request. 287 * 0x0 - success 288 * 0x1 - device is busy. Requester should retry 289 * 0x2 - Mellanox protocol version not supported 290 * 0x3 - unknown TLV 291 * 0x4 - register not supported 292 * 0x5 - operation class not supported 293 * 0x6 - EMAD method not supported 294 * 0x7 - bad parameter (e.g. port out of range) 295 * 0x8 - resource not available 296 * 0x9 - message receipt acknowledgment. Requester should retry 297 * 0x70 - internal error 298 */ 299 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7); 300 301 /* emad_op_tlv_register_id 302 * Register ID of register within register TLV. 303 */ 304 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16); 305 306 /* emad_op_tlv_r 307 * Response bit. Setting to 1 indicates Response, otherwise request. 308 */ 309 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1); 310 311 /* emad_op_tlv_method 312 * EMAD method type. 313 * 0x1 - query 314 * 0x2 - write 315 * 0x3 - send (currently not supported) 316 * 0x4 - event 317 */ 318 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7); 319 320 /* emad_op_tlv_class 321 * EMAD operation class. Must be set to 0x1 (REG_ACCESS). 322 */ 323 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8); 324 325 /* emad_op_tlv_tid 326 * EMAD transaction ID. Used for pairing request and response EMADs. 327 */ 328 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64); 329 330 /* emad_string_tlv_type 331 * Type of the TLV. 332 * Must be set to 0x2 (string TLV). 333 */ 334 MLXSW_ITEM32(emad, string_tlv, type, 0x00, 27, 5); 335 336 /* emad_string_tlv_len 337 * Length of the string TLV in u32. 338 */ 339 MLXSW_ITEM32(emad, string_tlv, len, 0x00, 16, 11); 340 341 #define MLXSW_EMAD_STRING_TLV_STRING_LEN 128 342 343 /* emad_string_tlv_string 344 * String provided by the device's firmware in case of erroneous register access 345 */ 346 MLXSW_ITEM_BUF(emad, string_tlv, string, 0x04, 347 MLXSW_EMAD_STRING_TLV_STRING_LEN); 348 349 /* emad_reg_tlv_type 350 * Type of the TLV. 351 * Must be set to 0x3 (register TLV). 352 */ 353 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5); 354 355 /* emad_reg_tlv_len 356 * Length of the operation TLV in u32. 357 */ 358 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11); 359 360 /* emad_end_tlv_type 361 * Type of the TLV. 362 * Must be set to 0x0 (end TLV). 363 */ 364 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5); 365 366 /* emad_end_tlv_len 367 * Length of the end TLV in u32. 368 * Must be set to 1. 369 */ 370 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11); 371 372 enum mlxsw_core_reg_access_type { 373 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 374 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 375 }; 376 377 static inline const char * 378 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type) 379 { 380 switch (type) { 381 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY: 382 return "query"; 383 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE: 384 return "write"; 385 } 386 BUG(); 387 } 388 389 static void mlxsw_emad_pack_end_tlv(char *end_tlv) 390 { 391 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END); 392 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN); 393 } 394 395 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, 396 const struct mlxsw_reg_info *reg, 397 char *payload) 398 { 399 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG); 400 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1); 401 memcpy(reg_tlv + sizeof(u32), payload, reg->len); 402 } 403 404 static void mlxsw_emad_pack_string_tlv(char *string_tlv) 405 { 406 mlxsw_emad_string_tlv_type_set(string_tlv, MLXSW_EMAD_TLV_TYPE_STRING); 407 mlxsw_emad_string_tlv_len_set(string_tlv, MLXSW_EMAD_STRING_TLV_LEN); 408 } 409 410 static void mlxsw_emad_pack_op_tlv(char *op_tlv, 411 const struct mlxsw_reg_info *reg, 412 enum mlxsw_core_reg_access_type type, 413 u64 tid) 414 { 415 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP); 416 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN); 417 mlxsw_emad_op_tlv_dr_set(op_tlv, 0); 418 mlxsw_emad_op_tlv_status_set(op_tlv, 0); 419 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id); 420 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST); 421 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY) 422 mlxsw_emad_op_tlv_method_set(op_tlv, 423 MLXSW_EMAD_OP_TLV_METHOD_QUERY); 424 else 425 mlxsw_emad_op_tlv_method_set(op_tlv, 426 MLXSW_EMAD_OP_TLV_METHOD_WRITE); 427 mlxsw_emad_op_tlv_class_set(op_tlv, 428 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); 429 mlxsw_emad_op_tlv_tid_set(op_tlv, tid); 430 } 431 432 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) 433 { 434 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN); 435 436 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC); 437 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC); 438 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE); 439 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO); 440 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION); 441 442 skb_reset_mac_header(skb); 443 444 return 0; 445 } 446 447 static void mlxsw_emad_construct(struct sk_buff *skb, 448 const struct mlxsw_reg_info *reg, 449 char *payload, 450 enum mlxsw_core_reg_access_type type, 451 u64 tid, bool enable_string_tlv) 452 { 453 char *buf; 454 455 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32)); 456 mlxsw_emad_pack_end_tlv(buf); 457 458 buf = skb_push(skb, reg->len + sizeof(u32)); 459 mlxsw_emad_pack_reg_tlv(buf, reg, payload); 460 461 if (enable_string_tlv) { 462 buf = skb_push(skb, MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32)); 463 mlxsw_emad_pack_string_tlv(buf); 464 } 465 466 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); 467 mlxsw_emad_pack_op_tlv(buf, reg, type, tid); 468 469 mlxsw_emad_construct_eth_hdr(skb); 470 } 471 472 struct mlxsw_emad_tlv_offsets { 473 u16 op_tlv; 474 u16 string_tlv; 475 u16 reg_tlv; 476 }; 477 478 static bool mlxsw_emad_tlv_is_string_tlv(const char *tlv) 479 { 480 u8 tlv_type = mlxsw_emad_string_tlv_type_get(tlv); 481 482 return tlv_type == MLXSW_EMAD_TLV_TYPE_STRING; 483 } 484 485 static void mlxsw_emad_tlv_parse(struct sk_buff *skb) 486 { 487 struct mlxsw_emad_tlv_offsets *offsets = 488 (struct mlxsw_emad_tlv_offsets *) skb->cb; 489 490 offsets->op_tlv = MLXSW_EMAD_ETH_HDR_LEN; 491 offsets->string_tlv = 0; 492 offsets->reg_tlv = MLXSW_EMAD_ETH_HDR_LEN + 493 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); 494 495 /* If string TLV is present, it must come after the operation TLV. */ 496 if (mlxsw_emad_tlv_is_string_tlv(skb->data + offsets->reg_tlv)) { 497 offsets->string_tlv = offsets->reg_tlv; 498 offsets->reg_tlv += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32); 499 } 500 } 501 502 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb) 503 { 504 struct mlxsw_emad_tlv_offsets *offsets = 505 (struct mlxsw_emad_tlv_offsets *) skb->cb; 506 507 return ((char *) (skb->data + offsets->op_tlv)); 508 } 509 510 static char *mlxsw_emad_string_tlv(const struct sk_buff *skb) 511 { 512 struct mlxsw_emad_tlv_offsets *offsets = 513 (struct mlxsw_emad_tlv_offsets *) skb->cb; 514 515 if (!offsets->string_tlv) 516 return NULL; 517 518 return ((char *) (skb->data + offsets->string_tlv)); 519 } 520 521 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb) 522 { 523 struct mlxsw_emad_tlv_offsets *offsets = 524 (struct mlxsw_emad_tlv_offsets *) skb->cb; 525 526 return ((char *) (skb->data + offsets->reg_tlv)); 527 } 528 529 static char *mlxsw_emad_reg_payload(const char *reg_tlv) 530 { 531 return ((char *) (reg_tlv + sizeof(u32))); 532 } 533 534 static char *mlxsw_emad_reg_payload_cmd(const char *mbox) 535 { 536 return ((char *) (mbox + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32))); 537 } 538 539 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb) 540 { 541 char *op_tlv; 542 543 op_tlv = mlxsw_emad_op_tlv(skb); 544 return mlxsw_emad_op_tlv_tid_get(op_tlv); 545 } 546 547 static bool mlxsw_emad_is_resp(const struct sk_buff *skb) 548 { 549 char *op_tlv; 550 551 op_tlv = mlxsw_emad_op_tlv(skb); 552 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE); 553 } 554 555 static int mlxsw_emad_process_status(char *op_tlv, 556 enum mlxsw_emad_op_tlv_status *p_status) 557 { 558 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv); 559 560 switch (*p_status) { 561 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: 562 return 0; 563 case MLXSW_EMAD_OP_TLV_STATUS_BUSY: 564 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: 565 return -EAGAIN; 566 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: 567 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: 568 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED: 569 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED: 570 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED: 571 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER: 572 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: 573 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: 574 default: 575 return -EIO; 576 } 577 } 578 579 static int 580 mlxsw_emad_process_status_skb(struct sk_buff *skb, 581 enum mlxsw_emad_op_tlv_status *p_status) 582 { 583 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status); 584 } 585 586 struct mlxsw_reg_trans { 587 struct list_head list; 588 struct list_head bulk_list; 589 struct mlxsw_core *core; 590 struct sk_buff *tx_skb; 591 struct mlxsw_tx_info tx_info; 592 struct delayed_work timeout_dw; 593 unsigned int retries; 594 u64 tid; 595 struct completion completion; 596 atomic_t active; 597 mlxsw_reg_trans_cb_t *cb; 598 unsigned long cb_priv; 599 const struct mlxsw_reg_info *reg; 600 enum mlxsw_core_reg_access_type type; 601 int err; 602 char *emad_err_string; 603 enum mlxsw_emad_op_tlv_status emad_status; 604 struct rcu_head rcu; 605 }; 606 607 static void mlxsw_emad_process_string_tlv(const struct sk_buff *skb, 608 struct mlxsw_reg_trans *trans) 609 { 610 char *string_tlv; 611 char *string; 612 613 string_tlv = mlxsw_emad_string_tlv(skb); 614 if (!string_tlv) 615 return; 616 617 trans->emad_err_string = kzalloc(MLXSW_EMAD_STRING_TLV_STRING_LEN, 618 GFP_ATOMIC); 619 if (!trans->emad_err_string) 620 return; 621 622 string = mlxsw_emad_string_tlv_string_data(string_tlv); 623 strlcpy(trans->emad_err_string, string, 624 MLXSW_EMAD_STRING_TLV_STRING_LEN); 625 } 626 627 #define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000 628 #define MLXSW_EMAD_TIMEOUT_MS 200 629 630 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) 631 { 632 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); 633 634 if (trans->core->fw_flash_in_progress) 635 timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS); 636 637 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, 638 timeout << trans->retries); 639 } 640 641 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 642 struct mlxsw_reg_trans *trans) 643 { 644 struct sk_buff *skb; 645 int err; 646 647 skb = skb_clone(trans->tx_skb, GFP_KERNEL); 648 if (!skb) 649 return -ENOMEM; 650 651 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0, 652 skb->data + mlxsw_core->driver->txhdr_len, 653 skb->len - mlxsw_core->driver->txhdr_len); 654 655 atomic_set(&trans->active, 1); 656 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info); 657 if (err) { 658 dev_kfree_skb(skb); 659 return err; 660 } 661 mlxsw_emad_trans_timeout_schedule(trans); 662 return 0; 663 } 664 665 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err) 666 { 667 struct mlxsw_core *mlxsw_core = trans->core; 668 669 dev_kfree_skb(trans->tx_skb); 670 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 671 list_del_rcu(&trans->list); 672 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 673 trans->err = err; 674 complete(&trans->completion); 675 } 676 677 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core, 678 struct mlxsw_reg_trans *trans) 679 { 680 int err; 681 682 if (trans->retries < MLXSW_EMAD_MAX_RETRY) { 683 trans->retries++; 684 err = mlxsw_emad_transmit(trans->core, trans); 685 if (err == 0) 686 return; 687 688 if (!atomic_dec_and_test(&trans->active)) 689 return; 690 } else { 691 err = -EIO; 692 } 693 mlxsw_emad_trans_finish(trans, err); 694 } 695 696 static void mlxsw_emad_trans_timeout_work(struct work_struct *work) 697 { 698 struct mlxsw_reg_trans *trans = container_of(work, 699 struct mlxsw_reg_trans, 700 timeout_dw.work); 701 702 if (!atomic_dec_and_test(&trans->active)) 703 return; 704 705 mlxsw_emad_transmit_retry(trans->core, trans); 706 } 707 708 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core, 709 struct mlxsw_reg_trans *trans, 710 struct sk_buff *skb) 711 { 712 int err; 713 714 if (!atomic_dec_and_test(&trans->active)) 715 return; 716 717 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status); 718 if (err == -EAGAIN) { 719 mlxsw_emad_transmit_retry(mlxsw_core, trans); 720 } else { 721 if (err == 0) { 722 char *reg_tlv = mlxsw_emad_reg_tlv(skb); 723 724 if (trans->cb) 725 trans->cb(mlxsw_core, 726 mlxsw_emad_reg_payload(reg_tlv), 727 trans->reg->len, trans->cb_priv); 728 } else { 729 mlxsw_emad_process_string_tlv(skb, trans); 730 } 731 mlxsw_emad_trans_finish(trans, err); 732 } 733 } 734 735 /* called with rcu read lock held */ 736 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u16 local_port, 737 void *priv) 738 { 739 struct mlxsw_core *mlxsw_core = priv; 740 struct mlxsw_reg_trans *trans; 741 742 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0, 743 skb->data, skb->len); 744 745 mlxsw_emad_tlv_parse(skb); 746 747 if (!mlxsw_emad_is_resp(skb)) 748 goto free_skb; 749 750 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) { 751 if (mlxsw_emad_get_tid(skb) == trans->tid) { 752 mlxsw_emad_process_response(mlxsw_core, trans, skb); 753 break; 754 } 755 } 756 757 free_skb: 758 dev_kfree_skb(skb); 759 } 760 761 static const struct mlxsw_listener mlxsw_emad_rx_listener = 762 MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false, 763 EMAD, DISCARD); 764 765 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) 766 { 767 struct workqueue_struct *emad_wq; 768 u64 tid; 769 int err; 770 771 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 772 return 0; 773 774 emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0); 775 if (!emad_wq) 776 return -ENOMEM; 777 mlxsw_core->emad_wq = emad_wq; 778 779 /* Set the upper 32 bits of the transaction ID field to a random 780 * number. This allows us to discard EMADs addressed to other 781 * devices. 782 */ 783 get_random_bytes(&tid, 4); 784 tid <<= 32; 785 atomic64_set(&mlxsw_core->emad.tid, tid); 786 787 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list); 788 spin_lock_init(&mlxsw_core->emad.trans_list_lock); 789 790 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener, 791 mlxsw_core); 792 if (err) 793 goto err_trap_register; 794 795 mlxsw_core->emad.use_emad = true; 796 797 return 0; 798 799 err_trap_register: 800 destroy_workqueue(mlxsw_core->emad_wq); 801 return err; 802 } 803 804 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) 805 { 806 807 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 808 return; 809 810 mlxsw_core->emad.use_emad = false; 811 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 812 mlxsw_core); 813 destroy_workqueue(mlxsw_core->emad_wq); 814 } 815 816 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, 817 u16 reg_len, bool enable_string_tlv) 818 { 819 struct sk_buff *skb; 820 u16 emad_len; 821 822 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN + 823 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) * 824 sizeof(u32) + mlxsw_core->driver->txhdr_len); 825 if (enable_string_tlv) 826 emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32); 827 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN) 828 return NULL; 829 830 skb = netdev_alloc_skb(NULL, emad_len); 831 if (!skb) 832 return NULL; 833 memset(skb->data, 0, emad_len); 834 skb_reserve(skb, emad_len); 835 836 return skb; 837 } 838 839 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, 840 const struct mlxsw_reg_info *reg, 841 char *payload, 842 enum mlxsw_core_reg_access_type type, 843 struct mlxsw_reg_trans *trans, 844 struct list_head *bulk_list, 845 mlxsw_reg_trans_cb_t *cb, 846 unsigned long cb_priv, u64 tid) 847 { 848 bool enable_string_tlv; 849 struct sk_buff *skb; 850 int err; 851 852 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", 853 tid, reg->id, mlxsw_reg_id_str(reg->id), 854 mlxsw_core_reg_access_type_str(type)); 855 856 /* Since this can be changed during emad_reg_access, read it once and 857 * use the value all the way. 858 */ 859 enable_string_tlv = mlxsw_core->emad.enable_string_tlv; 860 861 skb = mlxsw_emad_alloc(mlxsw_core, reg->len, enable_string_tlv); 862 if (!skb) 863 return -ENOMEM; 864 865 list_add_tail(&trans->bulk_list, bulk_list); 866 trans->core = mlxsw_core; 867 trans->tx_skb = skb; 868 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT; 869 trans->tx_info.is_emad = true; 870 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work); 871 trans->tid = tid; 872 init_completion(&trans->completion); 873 trans->cb = cb; 874 trans->cb_priv = cb_priv; 875 trans->reg = reg; 876 trans->type = type; 877 878 mlxsw_emad_construct(skb, reg, payload, type, trans->tid, 879 enable_string_tlv); 880 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info); 881 882 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 883 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list); 884 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 885 err = mlxsw_emad_transmit(mlxsw_core, trans); 886 if (err) 887 goto err_out; 888 return 0; 889 890 err_out: 891 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 892 list_del_rcu(&trans->list); 893 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 894 list_del(&trans->bulk_list); 895 dev_kfree_skb(trans->tx_skb); 896 return err; 897 } 898 899 /***************** 900 * Core functions 901 *****************/ 902 903 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) 904 { 905 spin_lock(&mlxsw_core_driver_list_lock); 906 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list); 907 spin_unlock(&mlxsw_core_driver_list_lock); 908 return 0; 909 } 910 EXPORT_SYMBOL(mlxsw_core_driver_register); 911 912 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver) 913 { 914 spin_lock(&mlxsw_core_driver_list_lock); 915 list_del(&mlxsw_driver->list); 916 spin_unlock(&mlxsw_core_driver_list_lock); 917 } 918 EXPORT_SYMBOL(mlxsw_core_driver_unregister); 919 920 static struct mlxsw_driver *__driver_find(const char *kind) 921 { 922 struct mlxsw_driver *mlxsw_driver; 923 924 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) { 925 if (strcmp(mlxsw_driver->kind, kind) == 0) 926 return mlxsw_driver; 927 } 928 return NULL; 929 } 930 931 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind) 932 { 933 struct mlxsw_driver *mlxsw_driver; 934 935 spin_lock(&mlxsw_core_driver_list_lock); 936 mlxsw_driver = __driver_find(kind); 937 spin_unlock(&mlxsw_core_driver_list_lock); 938 return mlxsw_driver; 939 } 940 941 struct mlxsw_core_fw_info { 942 struct mlxfw_dev mlxfw_dev; 943 struct mlxsw_core *mlxsw_core; 944 }; 945 946 static int mlxsw_core_fw_component_query(struct mlxfw_dev *mlxfw_dev, 947 u16 component_index, u32 *p_max_size, 948 u8 *p_align_bits, u16 *p_max_write_size) 949 { 950 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 951 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 952 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 953 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 954 int err; 955 956 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 957 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcqi), mcqi_pl); 958 if (err) 959 return err; 960 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, p_max_write_size); 961 962 *p_align_bits = max_t(u8, *p_align_bits, 2); 963 *p_max_write_size = min_t(u16, *p_max_write_size, MLXSW_REG_MCDA_MAX_DATA_LEN); 964 return 0; 965 } 966 967 static int mlxsw_core_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 968 { 969 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 970 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 971 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 972 char mcc_pl[MLXSW_REG_MCC_LEN]; 973 u8 control_state; 974 int err; 975 976 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 977 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 978 if (err) 979 return err; 980 981 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 982 if (control_state != MLXFW_FSM_STATE_IDLE) 983 return -EBUSY; 984 985 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 0, *fwhandle, 0); 986 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 987 } 988 989 static int mlxsw_core_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 990 u16 component_index, u32 component_size) 991 { 992 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 993 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 994 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 995 char mcc_pl[MLXSW_REG_MCC_LEN]; 996 997 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 998 component_index, fwhandle, component_size); 999 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1000 } 1001 1002 static int mlxsw_core_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 1003 u8 *data, u16 size, u32 offset) 1004 { 1005 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1006 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1007 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1008 char mcda_pl[MLXSW_REG_MCDA_LEN]; 1009 1010 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 1011 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcda), mcda_pl); 1012 } 1013 1014 static int mlxsw_core_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 1015 u16 component_index) 1016 { 1017 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1018 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1019 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1020 char mcc_pl[MLXSW_REG_MCC_LEN]; 1021 1022 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 1023 component_index, fwhandle, 0); 1024 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1025 } 1026 1027 static int mlxsw_core_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 1028 { 1029 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1030 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1031 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1032 char mcc_pl[MLXSW_REG_MCC_LEN]; 1033 1034 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, fwhandle, 0); 1035 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1036 } 1037 1038 static int mlxsw_core_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 1039 enum mlxfw_fsm_state *fsm_state, 1040 enum mlxfw_fsm_state_err *fsm_state_err) 1041 { 1042 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1043 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1044 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1045 char mcc_pl[MLXSW_REG_MCC_LEN]; 1046 u8 control_state; 1047 u8 error_code; 1048 int err; 1049 1050 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 1051 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1052 if (err) 1053 return err; 1054 1055 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 1056 *fsm_state = control_state; 1057 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, MLXFW_FSM_STATE_ERR_MAX); 1058 return 0; 1059 } 1060 1061 static void mlxsw_core_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 1062 { 1063 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1064 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1065 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1066 char mcc_pl[MLXSW_REG_MCC_LEN]; 1067 1068 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0); 1069 mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1070 } 1071 1072 static void mlxsw_core_fw_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 1073 { 1074 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1075 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1076 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1077 char mcc_pl[MLXSW_REG_MCC_LEN]; 1078 1079 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, fwhandle, 0); 1080 mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1081 } 1082 1083 static const struct mlxfw_dev_ops mlxsw_core_fw_mlxsw_dev_ops = { 1084 .component_query = mlxsw_core_fw_component_query, 1085 .fsm_lock = mlxsw_core_fw_fsm_lock, 1086 .fsm_component_update = mlxsw_core_fw_fsm_component_update, 1087 .fsm_block_download = mlxsw_core_fw_fsm_block_download, 1088 .fsm_component_verify = mlxsw_core_fw_fsm_component_verify, 1089 .fsm_activate = mlxsw_core_fw_fsm_activate, 1090 .fsm_query_state = mlxsw_core_fw_fsm_query_state, 1091 .fsm_cancel = mlxsw_core_fw_fsm_cancel, 1092 .fsm_release = mlxsw_core_fw_fsm_release, 1093 }; 1094 1095 static int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, const struct firmware *firmware, 1096 struct netlink_ext_ack *extack) 1097 { 1098 struct mlxsw_core_fw_info mlxsw_core_fw_info = { 1099 .mlxfw_dev = { 1100 .ops = &mlxsw_core_fw_mlxsw_dev_ops, 1101 .psid = mlxsw_core->bus_info->psid, 1102 .psid_size = strlen(mlxsw_core->bus_info->psid), 1103 .devlink = priv_to_devlink(mlxsw_core), 1104 }, 1105 .mlxsw_core = mlxsw_core 1106 }; 1107 int err; 1108 1109 mlxsw_core->fw_flash_in_progress = true; 1110 err = mlxfw_firmware_flash(&mlxsw_core_fw_info.mlxfw_dev, firmware, extack); 1111 mlxsw_core->fw_flash_in_progress = false; 1112 1113 return err; 1114 } 1115 1116 static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core, 1117 const struct mlxsw_bus_info *mlxsw_bus_info, 1118 const struct mlxsw_fw_rev *req_rev, 1119 const char *filename) 1120 { 1121 const struct mlxsw_fw_rev *rev = &mlxsw_bus_info->fw_rev; 1122 union devlink_param_value value; 1123 const struct firmware *firmware; 1124 int err; 1125 1126 /* Don't check if driver does not require it */ 1127 if (!req_rev || !filename) 1128 return 0; 1129 1130 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 1131 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_core), 1132 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 1133 &value); 1134 if (err) 1135 return err; 1136 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 1137 return 0; 1138 1139 /* Validate driver & FW are compatible */ 1140 if (rev->major != req_rev->major) { 1141 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 1142 rev->major, req_rev->major); 1143 return -EINVAL; 1144 } 1145 if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev)) 1146 return 0; 1147 1148 dev_err(mlxsw_bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n", 1149 rev->major, rev->minor, rev->subminor, req_rev->major, 1150 req_rev->minor, req_rev->subminor); 1151 dev_info(mlxsw_bus_info->dev, "Flashing firmware using file %s\n", filename); 1152 1153 err = request_firmware_direct(&firmware, filename, mlxsw_bus_info->dev); 1154 if (err) { 1155 dev_err(mlxsw_bus_info->dev, "Could not request firmware file %s\n", filename); 1156 return err; 1157 } 1158 1159 err = mlxsw_core_fw_flash(mlxsw_core, firmware, NULL); 1160 release_firmware(firmware); 1161 if (err) 1162 dev_err(mlxsw_bus_info->dev, "Could not upgrade firmware\n"); 1163 1164 /* On FW flash success, tell the caller FW reset is needed 1165 * if current FW supports it. 1166 */ 1167 if (rev->minor >= req_rev->can_reset_minor) 1168 return err ? err : -EAGAIN; 1169 else 1170 return 0; 1171 } 1172 1173 static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core, 1174 struct devlink_flash_update_params *params, 1175 struct netlink_ext_ack *extack) 1176 { 1177 return mlxsw_core_fw_flash(mlxsw_core, params->fw, extack); 1178 } 1179 1180 static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 1181 union devlink_param_value val, 1182 struct netlink_ext_ack *extack) 1183 { 1184 if (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER && 1185 val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) { 1186 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 1187 return -EINVAL; 1188 } 1189 1190 return 0; 1191 } 1192 1193 static const struct devlink_param mlxsw_core_fw_devlink_params[] = { 1194 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL, 1195 mlxsw_core_devlink_param_fw_load_policy_validate), 1196 }; 1197 1198 static int mlxsw_core_fw_params_register(struct mlxsw_core *mlxsw_core) 1199 { 1200 struct devlink *devlink = priv_to_devlink(mlxsw_core); 1201 union devlink_param_value value; 1202 int err; 1203 1204 err = devlink_params_register(devlink, mlxsw_core_fw_devlink_params, 1205 ARRAY_SIZE(mlxsw_core_fw_devlink_params)); 1206 if (err) 1207 return err; 1208 1209 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 1210 devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, value); 1211 return 0; 1212 } 1213 1214 static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core) 1215 { 1216 devlink_params_unregister(priv_to_devlink(mlxsw_core), mlxsw_core_fw_devlink_params, 1217 ARRAY_SIZE(mlxsw_core_fw_devlink_params)); 1218 } 1219 1220 static int mlxsw_devlink_port_split(struct devlink *devlink, 1221 unsigned int port_index, 1222 unsigned int count, 1223 struct netlink_ext_ack *extack) 1224 { 1225 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1226 1227 if (port_index >= mlxsw_core->max_ports) { 1228 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports"); 1229 return -EINVAL; 1230 } 1231 if (!mlxsw_core->driver->port_split) 1232 return -EOPNOTSUPP; 1233 return mlxsw_core->driver->port_split(mlxsw_core, port_index, count, 1234 extack); 1235 } 1236 1237 static int mlxsw_devlink_port_unsplit(struct devlink *devlink, 1238 unsigned int port_index, 1239 struct netlink_ext_ack *extack) 1240 { 1241 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1242 1243 if (port_index >= mlxsw_core->max_ports) { 1244 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports"); 1245 return -EINVAL; 1246 } 1247 if (!mlxsw_core->driver->port_unsplit) 1248 return -EOPNOTSUPP; 1249 return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index, 1250 extack); 1251 } 1252 1253 static int 1254 mlxsw_devlink_sb_pool_get(struct devlink *devlink, 1255 unsigned int sb_index, u16 pool_index, 1256 struct devlink_sb_pool_info *pool_info) 1257 { 1258 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1259 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1260 1261 if (!mlxsw_driver->sb_pool_get) 1262 return -EOPNOTSUPP; 1263 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index, 1264 pool_index, pool_info); 1265 } 1266 1267 static int 1268 mlxsw_devlink_sb_pool_set(struct devlink *devlink, 1269 unsigned int sb_index, u16 pool_index, u32 size, 1270 enum devlink_sb_threshold_type threshold_type, 1271 struct netlink_ext_ack *extack) 1272 { 1273 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1274 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1275 1276 if (!mlxsw_driver->sb_pool_set) 1277 return -EOPNOTSUPP; 1278 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index, 1279 pool_index, size, threshold_type, 1280 extack); 1281 } 1282 1283 static void *__dl_port(struct devlink_port *devlink_port) 1284 { 1285 return container_of(devlink_port, struct mlxsw_core_port, devlink_port); 1286 } 1287 1288 static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port, 1289 enum devlink_port_type port_type) 1290 { 1291 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1292 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1293 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1294 1295 if (!mlxsw_driver->port_type_set) 1296 return -EOPNOTSUPP; 1297 1298 return mlxsw_driver->port_type_set(mlxsw_core, 1299 mlxsw_core_port->local_port, 1300 port_type); 1301 } 1302 1303 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port, 1304 unsigned int sb_index, u16 pool_index, 1305 u32 *p_threshold) 1306 { 1307 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1308 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1309 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1310 1311 if (!mlxsw_driver->sb_port_pool_get || 1312 !mlxsw_core_port_check(mlxsw_core_port)) 1313 return -EOPNOTSUPP; 1314 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index, 1315 pool_index, p_threshold); 1316 } 1317 1318 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port, 1319 unsigned int sb_index, u16 pool_index, 1320 u32 threshold, 1321 struct netlink_ext_ack *extack) 1322 { 1323 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1324 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1325 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1326 1327 if (!mlxsw_driver->sb_port_pool_set || 1328 !mlxsw_core_port_check(mlxsw_core_port)) 1329 return -EOPNOTSUPP; 1330 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index, 1331 pool_index, threshold, extack); 1332 } 1333 1334 static int 1335 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port, 1336 unsigned int sb_index, u16 tc_index, 1337 enum devlink_sb_pool_type pool_type, 1338 u16 *p_pool_index, u32 *p_threshold) 1339 { 1340 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1341 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1342 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1343 1344 if (!mlxsw_driver->sb_tc_pool_bind_get || 1345 !mlxsw_core_port_check(mlxsw_core_port)) 1346 return -EOPNOTSUPP; 1347 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index, 1348 tc_index, pool_type, 1349 p_pool_index, p_threshold); 1350 } 1351 1352 static int 1353 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, 1354 unsigned int sb_index, u16 tc_index, 1355 enum devlink_sb_pool_type pool_type, 1356 u16 pool_index, u32 threshold, 1357 struct netlink_ext_ack *extack) 1358 { 1359 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1360 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1361 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1362 1363 if (!mlxsw_driver->sb_tc_pool_bind_set || 1364 !mlxsw_core_port_check(mlxsw_core_port)) 1365 return -EOPNOTSUPP; 1366 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index, 1367 tc_index, pool_type, 1368 pool_index, threshold, extack); 1369 } 1370 1371 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink, 1372 unsigned int sb_index) 1373 { 1374 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1375 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1376 1377 if (!mlxsw_driver->sb_occ_snapshot) 1378 return -EOPNOTSUPP; 1379 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index); 1380 } 1381 1382 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink, 1383 unsigned int sb_index) 1384 { 1385 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1386 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1387 1388 if (!mlxsw_driver->sb_occ_max_clear) 1389 return -EOPNOTSUPP; 1390 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index); 1391 } 1392 1393 static int 1394 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port, 1395 unsigned int sb_index, u16 pool_index, 1396 u32 *p_cur, u32 *p_max) 1397 { 1398 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1399 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1400 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1401 1402 if (!mlxsw_driver->sb_occ_port_pool_get || 1403 !mlxsw_core_port_check(mlxsw_core_port)) 1404 return -EOPNOTSUPP; 1405 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index, 1406 pool_index, p_cur, p_max); 1407 } 1408 1409 static int 1410 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, 1411 unsigned int sb_index, u16 tc_index, 1412 enum devlink_sb_pool_type pool_type, 1413 u32 *p_cur, u32 *p_max) 1414 { 1415 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1416 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1417 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1418 1419 if (!mlxsw_driver->sb_occ_tc_port_bind_get || 1420 !mlxsw_core_port_check(mlxsw_core_port)) 1421 return -EOPNOTSUPP; 1422 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port, 1423 sb_index, tc_index, 1424 pool_type, p_cur, p_max); 1425 } 1426 1427 static int 1428 mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, 1429 struct netlink_ext_ack *extack) 1430 { 1431 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1432 char fw_info_psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE]; 1433 u32 hw_rev, fw_major, fw_minor, fw_sub_minor; 1434 char mgir_pl[MLXSW_REG_MGIR_LEN]; 1435 char buf[32]; 1436 int err; 1437 1438 err = devlink_info_driver_name_put(req, 1439 mlxsw_core->bus_info->device_kind); 1440 if (err) 1441 return err; 1442 1443 mlxsw_reg_mgir_pack(mgir_pl); 1444 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl); 1445 if (err) 1446 return err; 1447 mlxsw_reg_mgir_unpack(mgir_pl, &hw_rev, fw_info_psid, &fw_major, 1448 &fw_minor, &fw_sub_minor); 1449 1450 sprintf(buf, "%X", hw_rev); 1451 err = devlink_info_version_fixed_put(req, "hw.revision", buf); 1452 if (err) 1453 return err; 1454 1455 err = devlink_info_version_fixed_put(req, 1456 DEVLINK_INFO_VERSION_GENERIC_FW_PSID, 1457 fw_info_psid); 1458 if (err) 1459 return err; 1460 1461 sprintf(buf, "%d.%d.%d", fw_major, fw_minor, fw_sub_minor); 1462 err = devlink_info_version_running_put(req, "fw.version", buf); 1463 if (err) 1464 return err; 1465 1466 return devlink_info_version_running_put(req, 1467 DEVLINK_INFO_VERSION_GENERIC_FW, 1468 buf); 1469 } 1470 1471 static int 1472 mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink, 1473 bool netns_change, enum devlink_reload_action action, 1474 enum devlink_reload_limit limit, 1475 struct netlink_ext_ack *extack) 1476 { 1477 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1478 1479 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET)) 1480 return -EOPNOTSUPP; 1481 1482 mlxsw_core_bus_device_unregister(mlxsw_core, true); 1483 return 0; 1484 } 1485 1486 static int 1487 mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, enum devlink_reload_action action, 1488 enum devlink_reload_limit limit, u32 *actions_performed, 1489 struct netlink_ext_ack *extack) 1490 { 1491 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1492 1493 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | 1494 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE); 1495 return mlxsw_core_bus_device_register(mlxsw_core->bus_info, 1496 mlxsw_core->bus, 1497 mlxsw_core->bus_priv, true, 1498 devlink, extack); 1499 } 1500 1501 static int mlxsw_devlink_flash_update(struct devlink *devlink, 1502 struct devlink_flash_update_params *params, 1503 struct netlink_ext_ack *extack) 1504 { 1505 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1506 1507 return mlxsw_core_fw_flash_update(mlxsw_core, params, extack); 1508 } 1509 1510 static int mlxsw_devlink_trap_init(struct devlink *devlink, 1511 const struct devlink_trap *trap, 1512 void *trap_ctx) 1513 { 1514 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1515 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1516 1517 if (!mlxsw_driver->trap_init) 1518 return -EOPNOTSUPP; 1519 return mlxsw_driver->trap_init(mlxsw_core, trap, trap_ctx); 1520 } 1521 1522 static void mlxsw_devlink_trap_fini(struct devlink *devlink, 1523 const struct devlink_trap *trap, 1524 void *trap_ctx) 1525 { 1526 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1527 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1528 1529 if (!mlxsw_driver->trap_fini) 1530 return; 1531 mlxsw_driver->trap_fini(mlxsw_core, trap, trap_ctx); 1532 } 1533 1534 static int mlxsw_devlink_trap_action_set(struct devlink *devlink, 1535 const struct devlink_trap *trap, 1536 enum devlink_trap_action action, 1537 struct netlink_ext_ack *extack) 1538 { 1539 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1540 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1541 1542 if (!mlxsw_driver->trap_action_set) 1543 return -EOPNOTSUPP; 1544 return mlxsw_driver->trap_action_set(mlxsw_core, trap, action, extack); 1545 } 1546 1547 static int 1548 mlxsw_devlink_trap_group_init(struct devlink *devlink, 1549 const struct devlink_trap_group *group) 1550 { 1551 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1552 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1553 1554 if (!mlxsw_driver->trap_group_init) 1555 return -EOPNOTSUPP; 1556 return mlxsw_driver->trap_group_init(mlxsw_core, group); 1557 } 1558 1559 static int 1560 mlxsw_devlink_trap_group_set(struct devlink *devlink, 1561 const struct devlink_trap_group *group, 1562 const struct devlink_trap_policer *policer, 1563 struct netlink_ext_ack *extack) 1564 { 1565 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1566 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1567 1568 if (!mlxsw_driver->trap_group_set) 1569 return -EOPNOTSUPP; 1570 return mlxsw_driver->trap_group_set(mlxsw_core, group, policer, extack); 1571 } 1572 1573 static int 1574 mlxsw_devlink_trap_policer_init(struct devlink *devlink, 1575 const struct devlink_trap_policer *policer) 1576 { 1577 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1578 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1579 1580 if (!mlxsw_driver->trap_policer_init) 1581 return -EOPNOTSUPP; 1582 return mlxsw_driver->trap_policer_init(mlxsw_core, policer); 1583 } 1584 1585 static void 1586 mlxsw_devlink_trap_policer_fini(struct devlink *devlink, 1587 const struct devlink_trap_policer *policer) 1588 { 1589 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1590 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1591 1592 if (!mlxsw_driver->trap_policer_fini) 1593 return; 1594 mlxsw_driver->trap_policer_fini(mlxsw_core, policer); 1595 } 1596 1597 static int 1598 mlxsw_devlink_trap_policer_set(struct devlink *devlink, 1599 const struct devlink_trap_policer *policer, 1600 u64 rate, u64 burst, 1601 struct netlink_ext_ack *extack) 1602 { 1603 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1604 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1605 1606 if (!mlxsw_driver->trap_policer_set) 1607 return -EOPNOTSUPP; 1608 return mlxsw_driver->trap_policer_set(mlxsw_core, policer, rate, burst, 1609 extack); 1610 } 1611 1612 static int 1613 mlxsw_devlink_trap_policer_counter_get(struct devlink *devlink, 1614 const struct devlink_trap_policer *policer, 1615 u64 *p_drops) 1616 { 1617 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1618 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1619 1620 if (!mlxsw_driver->trap_policer_counter_get) 1621 return -EOPNOTSUPP; 1622 return mlxsw_driver->trap_policer_counter_get(mlxsw_core, policer, 1623 p_drops); 1624 } 1625 1626 static const struct devlink_ops mlxsw_devlink_ops = { 1627 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | 1628 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), 1629 .reload_down = mlxsw_devlink_core_bus_device_reload_down, 1630 .reload_up = mlxsw_devlink_core_bus_device_reload_up, 1631 .port_type_set = mlxsw_devlink_port_type_set, 1632 .port_split = mlxsw_devlink_port_split, 1633 .port_unsplit = mlxsw_devlink_port_unsplit, 1634 .sb_pool_get = mlxsw_devlink_sb_pool_get, 1635 .sb_pool_set = mlxsw_devlink_sb_pool_set, 1636 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get, 1637 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set, 1638 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get, 1639 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set, 1640 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot, 1641 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear, 1642 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get, 1643 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get, 1644 .info_get = mlxsw_devlink_info_get, 1645 .flash_update = mlxsw_devlink_flash_update, 1646 .trap_init = mlxsw_devlink_trap_init, 1647 .trap_fini = mlxsw_devlink_trap_fini, 1648 .trap_action_set = mlxsw_devlink_trap_action_set, 1649 .trap_group_init = mlxsw_devlink_trap_group_init, 1650 .trap_group_set = mlxsw_devlink_trap_group_set, 1651 .trap_policer_init = mlxsw_devlink_trap_policer_init, 1652 .trap_policer_fini = mlxsw_devlink_trap_policer_fini, 1653 .trap_policer_set = mlxsw_devlink_trap_policer_set, 1654 .trap_policer_counter_get = mlxsw_devlink_trap_policer_counter_get, 1655 }; 1656 1657 static int mlxsw_core_params_register(struct mlxsw_core *mlxsw_core) 1658 { 1659 int err; 1660 1661 err = mlxsw_core_fw_params_register(mlxsw_core); 1662 if (err) 1663 return err; 1664 1665 if (mlxsw_core->driver->params_register) { 1666 err = mlxsw_core->driver->params_register(mlxsw_core); 1667 if (err) 1668 goto err_params_register; 1669 } 1670 return 0; 1671 1672 err_params_register: 1673 mlxsw_core_fw_params_unregister(mlxsw_core); 1674 return err; 1675 } 1676 1677 static void mlxsw_core_params_unregister(struct mlxsw_core *mlxsw_core) 1678 { 1679 mlxsw_core_fw_params_unregister(mlxsw_core); 1680 if (mlxsw_core->driver->params_register) 1681 mlxsw_core->driver->params_unregister(mlxsw_core); 1682 } 1683 1684 struct mlxsw_core_health_event { 1685 struct mlxsw_core *mlxsw_core; 1686 char mfde_pl[MLXSW_REG_MFDE_LEN]; 1687 struct work_struct work; 1688 }; 1689 1690 static void mlxsw_core_health_event_work(struct work_struct *work) 1691 { 1692 struct mlxsw_core_health_event *event; 1693 struct mlxsw_core *mlxsw_core; 1694 1695 event = container_of(work, struct mlxsw_core_health_event, work); 1696 mlxsw_core = event->mlxsw_core; 1697 devlink_health_report(mlxsw_core->health.fw_fatal, "FW fatal event occurred", 1698 event->mfde_pl); 1699 kfree(event); 1700 } 1701 1702 static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg, 1703 char *mfde_pl, void *priv) 1704 { 1705 struct mlxsw_core_health_event *event; 1706 struct mlxsw_core *mlxsw_core = priv; 1707 1708 event = kmalloc(sizeof(*event), GFP_ATOMIC); 1709 if (!event) 1710 return; 1711 event->mlxsw_core = mlxsw_core; 1712 memcpy(event->mfde_pl, mfde_pl, sizeof(event->mfde_pl)); 1713 INIT_WORK(&event->work, mlxsw_core_health_event_work); 1714 mlxsw_core_schedule_work(&event->work); 1715 } 1716 1717 static const struct mlxsw_listener mlxsw_core_health_listener = 1718 MLXSW_CORE_EVENTL(mlxsw_core_health_listener_func, MFDE); 1719 1720 static int 1721 mlxsw_core_health_fw_fatal_dump_fatal_cause(const char *mfde_pl, 1722 struct devlink_fmsg *fmsg) 1723 { 1724 u32 val, tile_v; 1725 int err; 1726 1727 val = mlxsw_reg_mfde_fatal_cause_id_get(mfde_pl); 1728 err = devlink_fmsg_u32_pair_put(fmsg, "cause_id", val); 1729 if (err) 1730 return err; 1731 tile_v = mlxsw_reg_mfde_fatal_cause_tile_v_get(mfde_pl); 1732 if (tile_v) { 1733 val = mlxsw_reg_mfde_fatal_cause_tile_index_get(mfde_pl); 1734 err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val); 1735 if (err) 1736 return err; 1737 } 1738 1739 return 0; 1740 } 1741 1742 static int 1743 mlxsw_core_health_fw_fatal_dump_fw_assert(const char *mfde_pl, 1744 struct devlink_fmsg *fmsg) 1745 { 1746 u32 val, tile_v; 1747 int err; 1748 1749 val = mlxsw_reg_mfde_fw_assert_var0_get(mfde_pl); 1750 err = devlink_fmsg_u32_pair_put(fmsg, "var0", val); 1751 if (err) 1752 return err; 1753 val = mlxsw_reg_mfde_fw_assert_var1_get(mfde_pl); 1754 err = devlink_fmsg_u32_pair_put(fmsg, "var1", val); 1755 if (err) 1756 return err; 1757 val = mlxsw_reg_mfde_fw_assert_var2_get(mfde_pl); 1758 err = devlink_fmsg_u32_pair_put(fmsg, "var2", val); 1759 if (err) 1760 return err; 1761 val = mlxsw_reg_mfde_fw_assert_var3_get(mfde_pl); 1762 err = devlink_fmsg_u32_pair_put(fmsg, "var3", val); 1763 if (err) 1764 return err; 1765 val = mlxsw_reg_mfde_fw_assert_var4_get(mfde_pl); 1766 err = devlink_fmsg_u32_pair_put(fmsg, "var4", val); 1767 if (err) 1768 return err; 1769 val = mlxsw_reg_mfde_fw_assert_existptr_get(mfde_pl); 1770 err = devlink_fmsg_u32_pair_put(fmsg, "existptr", val); 1771 if (err) 1772 return err; 1773 val = mlxsw_reg_mfde_fw_assert_callra_get(mfde_pl); 1774 err = devlink_fmsg_u32_pair_put(fmsg, "callra", val); 1775 if (err) 1776 return err; 1777 val = mlxsw_reg_mfde_fw_assert_oe_get(mfde_pl); 1778 err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val); 1779 if (err) 1780 return err; 1781 tile_v = mlxsw_reg_mfde_fw_assert_tile_v_get(mfde_pl); 1782 if (tile_v) { 1783 val = mlxsw_reg_mfde_fw_assert_tile_index_get(mfde_pl); 1784 err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val); 1785 if (err) 1786 return err; 1787 } 1788 val = mlxsw_reg_mfde_fw_assert_ext_synd_get(mfde_pl); 1789 err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd", val); 1790 if (err) 1791 return err; 1792 1793 return 0; 1794 } 1795 1796 static int 1797 mlxsw_core_health_fw_fatal_dump_kvd_im_stop(const char *mfde_pl, 1798 struct devlink_fmsg *fmsg) 1799 { 1800 u32 val; 1801 int err; 1802 1803 val = mlxsw_reg_mfde_kvd_im_stop_oe_get(mfde_pl); 1804 err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val); 1805 if (err) 1806 return err; 1807 val = mlxsw_reg_mfde_kvd_im_stop_pipes_mask_get(mfde_pl); 1808 return devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val); 1809 } 1810 1811 static int 1812 mlxsw_core_health_fw_fatal_dump_crspace_to(const char *mfde_pl, 1813 struct devlink_fmsg *fmsg) 1814 { 1815 u32 val; 1816 int err; 1817 1818 val = mlxsw_reg_mfde_crspace_to_log_address_get(mfde_pl); 1819 err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val); 1820 if (err) 1821 return err; 1822 val = mlxsw_reg_mfde_crspace_to_oe_get(mfde_pl); 1823 err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val); 1824 if (err) 1825 return err; 1826 val = mlxsw_reg_mfde_crspace_to_log_id_get(mfde_pl); 1827 err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val); 1828 if (err) 1829 return err; 1830 val = mlxsw_reg_mfde_crspace_to_log_ip_get(mfde_pl); 1831 err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val); 1832 if (err) 1833 return err; 1834 1835 return 0; 1836 } 1837 1838 static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter, 1839 struct devlink_fmsg *fmsg, void *priv_ctx, 1840 struct netlink_ext_ack *extack) 1841 { 1842 char *mfde_pl = priv_ctx; 1843 char *val_str; 1844 u8 event_id; 1845 u32 val; 1846 int err; 1847 1848 if (!priv_ctx) 1849 /* User-triggered dumps are not possible */ 1850 return -EOPNOTSUPP; 1851 1852 val = mlxsw_reg_mfde_irisc_id_get(mfde_pl); 1853 err = devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val); 1854 if (err) 1855 return err; 1856 err = devlink_fmsg_arr_pair_nest_start(fmsg, "event"); 1857 if (err) 1858 return err; 1859 1860 event_id = mlxsw_reg_mfde_event_id_get(mfde_pl); 1861 err = devlink_fmsg_u32_pair_put(fmsg, "id", event_id); 1862 if (err) 1863 return err; 1864 switch (event_id) { 1865 case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO: 1866 val_str = "CR space timeout"; 1867 break; 1868 case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP: 1869 val_str = "KVD insertion machine stopped"; 1870 break; 1871 case MLXSW_REG_MFDE_EVENT_ID_TEST: 1872 val_str = "Test"; 1873 break; 1874 case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT: 1875 val_str = "FW assert"; 1876 break; 1877 case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE: 1878 val_str = "Fatal cause"; 1879 break; 1880 default: 1881 val_str = NULL; 1882 } 1883 if (val_str) { 1884 err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str); 1885 if (err) 1886 return err; 1887 } 1888 1889 err = devlink_fmsg_arr_pair_nest_end(fmsg); 1890 if (err) 1891 return err; 1892 1893 err = devlink_fmsg_arr_pair_nest_start(fmsg, "severity"); 1894 if (err) 1895 return err; 1896 1897 val = mlxsw_reg_mfde_severity_get(mfde_pl); 1898 err = devlink_fmsg_u8_pair_put(fmsg, "id", val); 1899 if (err) 1900 return err; 1901 switch (val) { 1902 case MLXSW_REG_MFDE_SEVERITY_FATL: 1903 val_str = "Fatal"; 1904 break; 1905 case MLXSW_REG_MFDE_SEVERITY_NRML: 1906 val_str = "Normal"; 1907 break; 1908 case MLXSW_REG_MFDE_SEVERITY_INTR: 1909 val_str = "Debug"; 1910 break; 1911 default: 1912 val_str = NULL; 1913 } 1914 if (val_str) { 1915 err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str); 1916 if (err) 1917 return err; 1918 } 1919 1920 err = devlink_fmsg_arr_pair_nest_end(fmsg); 1921 if (err) 1922 return err; 1923 1924 val = mlxsw_reg_mfde_method_get(mfde_pl); 1925 switch (val) { 1926 case MLXSW_REG_MFDE_METHOD_QUERY: 1927 val_str = "query"; 1928 break; 1929 case MLXSW_REG_MFDE_METHOD_WRITE: 1930 val_str = "write"; 1931 break; 1932 default: 1933 val_str = NULL; 1934 } 1935 if (val_str) { 1936 err = devlink_fmsg_string_pair_put(fmsg, "method", val_str); 1937 if (err) 1938 return err; 1939 } 1940 1941 val = mlxsw_reg_mfde_long_process_get(mfde_pl); 1942 err = devlink_fmsg_bool_pair_put(fmsg, "long_process", val); 1943 if (err) 1944 return err; 1945 1946 val = mlxsw_reg_mfde_command_type_get(mfde_pl); 1947 switch (val) { 1948 case MLXSW_REG_MFDE_COMMAND_TYPE_MAD: 1949 val_str = "mad"; 1950 break; 1951 case MLXSW_REG_MFDE_COMMAND_TYPE_EMAD: 1952 val_str = "emad"; 1953 break; 1954 case MLXSW_REG_MFDE_COMMAND_TYPE_CMDIF: 1955 val_str = "cmdif"; 1956 break; 1957 default: 1958 val_str = NULL; 1959 } 1960 if (val_str) { 1961 err = devlink_fmsg_string_pair_put(fmsg, "command_type", val_str); 1962 if (err) 1963 return err; 1964 } 1965 1966 val = mlxsw_reg_mfde_reg_attr_id_get(mfde_pl); 1967 err = devlink_fmsg_u32_pair_put(fmsg, "reg_attr_id", val); 1968 if (err) 1969 return err; 1970 1971 switch (event_id) { 1972 case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO: 1973 return mlxsw_core_health_fw_fatal_dump_crspace_to(mfde_pl, 1974 fmsg); 1975 case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP: 1976 return mlxsw_core_health_fw_fatal_dump_kvd_im_stop(mfde_pl, 1977 fmsg); 1978 case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT: 1979 return mlxsw_core_health_fw_fatal_dump_fw_assert(mfde_pl, fmsg); 1980 case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE: 1981 return mlxsw_core_health_fw_fatal_dump_fatal_cause(mfde_pl, 1982 fmsg); 1983 } 1984 1985 return 0; 1986 } 1987 1988 static int 1989 mlxsw_core_health_fw_fatal_test(struct devlink_health_reporter *reporter, 1990 struct netlink_ext_ack *extack) 1991 { 1992 struct mlxsw_core *mlxsw_core = devlink_health_reporter_priv(reporter); 1993 char mfgd_pl[MLXSW_REG_MFGD_LEN]; 1994 int err; 1995 1996 /* Read the register first to make sure no other bits are changed. */ 1997 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl); 1998 if (err) 1999 return err; 2000 mlxsw_reg_mfgd_trigger_test_set(mfgd_pl, true); 2001 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl); 2002 } 2003 2004 static const struct devlink_health_reporter_ops 2005 mlxsw_core_health_fw_fatal_ops = { 2006 .name = "fw_fatal", 2007 .dump = mlxsw_core_health_fw_fatal_dump, 2008 .test = mlxsw_core_health_fw_fatal_test, 2009 }; 2010 2011 static int mlxsw_core_health_fw_fatal_config(struct mlxsw_core *mlxsw_core, 2012 bool enable) 2013 { 2014 char mfgd_pl[MLXSW_REG_MFGD_LEN]; 2015 int err; 2016 2017 /* Read the register first to make sure no other bits are changed. */ 2018 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl); 2019 if (err) 2020 return err; 2021 mlxsw_reg_mfgd_fatal_event_mode_set(mfgd_pl, enable); 2022 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl); 2023 } 2024 2025 static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core) 2026 { 2027 struct devlink *devlink = priv_to_devlink(mlxsw_core); 2028 struct devlink_health_reporter *fw_fatal; 2029 int err; 2030 2031 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 2032 return 0; 2033 2034 fw_fatal = devlink_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops, 2035 0, mlxsw_core); 2036 if (IS_ERR(fw_fatal)) { 2037 dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter"); 2038 return PTR_ERR(fw_fatal); 2039 } 2040 mlxsw_core->health.fw_fatal = fw_fatal; 2041 2042 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core); 2043 if (err) 2044 goto err_trap_register; 2045 2046 err = mlxsw_core_health_fw_fatal_config(mlxsw_core, true); 2047 if (err) 2048 goto err_fw_fatal_config; 2049 2050 return 0; 2051 2052 err_fw_fatal_config: 2053 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core); 2054 err_trap_register: 2055 devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal); 2056 return err; 2057 } 2058 2059 static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core) 2060 { 2061 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 2062 return; 2063 2064 mlxsw_core_health_fw_fatal_config(mlxsw_core, false); 2065 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core); 2066 /* Make sure there is no more event work scheduled */ 2067 mlxsw_core_flush_owq(); 2068 devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal); 2069 } 2070 2071 static int 2072 __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, 2073 const struct mlxsw_bus *mlxsw_bus, 2074 void *bus_priv, bool reload, 2075 struct devlink *devlink, 2076 struct netlink_ext_ack *extack) 2077 { 2078 const char *device_kind = mlxsw_bus_info->device_kind; 2079 struct mlxsw_core *mlxsw_core; 2080 struct mlxsw_driver *mlxsw_driver; 2081 size_t alloc_size; 2082 int err; 2083 2084 mlxsw_driver = mlxsw_core_driver_get(device_kind); 2085 if (!mlxsw_driver) 2086 return -EINVAL; 2087 2088 if (!reload) { 2089 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; 2090 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size, 2091 mlxsw_bus_info->dev); 2092 if (!devlink) { 2093 err = -ENOMEM; 2094 goto err_devlink_alloc; 2095 } 2096 } 2097 2098 mlxsw_core = devlink_priv(devlink); 2099 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list); 2100 INIT_LIST_HEAD(&mlxsw_core->event_listener_list); 2101 mlxsw_core->driver = mlxsw_driver; 2102 mlxsw_core->bus = mlxsw_bus; 2103 mlxsw_core->bus_priv = bus_priv; 2104 mlxsw_core->bus_info = mlxsw_bus_info; 2105 2106 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, 2107 &mlxsw_core->res); 2108 if (err) 2109 goto err_bus_init; 2110 2111 if (mlxsw_driver->resources_register && !reload) { 2112 err = mlxsw_driver->resources_register(mlxsw_core); 2113 if (err) 2114 goto err_register_resources; 2115 } 2116 2117 err = mlxsw_ports_init(mlxsw_core, reload); 2118 if (err) 2119 goto err_ports_init; 2120 2121 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) && 2122 MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) { 2123 alloc_size = sizeof(*mlxsw_core->lag.mapping) * 2124 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) * 2125 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); 2126 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL); 2127 if (!mlxsw_core->lag.mapping) { 2128 err = -ENOMEM; 2129 goto err_alloc_lag_mapping; 2130 } 2131 } 2132 2133 err = mlxsw_core_trap_groups_set(mlxsw_core); 2134 if (err) 2135 goto err_trap_groups_set; 2136 2137 err = mlxsw_emad_init(mlxsw_core); 2138 if (err) 2139 goto err_emad_init; 2140 2141 if (!reload) { 2142 err = mlxsw_core_params_register(mlxsw_core); 2143 if (err) 2144 goto err_register_params; 2145 } 2146 2147 err = mlxsw_core_fw_rev_validate(mlxsw_core, mlxsw_bus_info, mlxsw_driver->fw_req_rev, 2148 mlxsw_driver->fw_filename); 2149 if (err) 2150 goto err_fw_rev_validate; 2151 2152 err = mlxsw_core_health_init(mlxsw_core); 2153 if (err) 2154 goto err_health_init; 2155 2156 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); 2157 if (err) 2158 goto err_hwmon_init; 2159 2160 err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info, 2161 &mlxsw_core->thermal); 2162 if (err) 2163 goto err_thermal_init; 2164 2165 err = mlxsw_env_init(mlxsw_core, &mlxsw_core->env); 2166 if (err) 2167 goto err_env_init; 2168 2169 if (mlxsw_driver->init) { 2170 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack); 2171 if (err) 2172 goto err_driver_init; 2173 } 2174 2175 if (!reload) { 2176 devlink_set_features(devlink, DEVLINK_F_RELOAD); 2177 devlink_register(devlink); 2178 } 2179 return 0; 2180 2181 err_driver_init: 2182 mlxsw_env_fini(mlxsw_core->env); 2183 err_env_init: 2184 mlxsw_thermal_fini(mlxsw_core->thermal); 2185 err_thermal_init: 2186 mlxsw_hwmon_fini(mlxsw_core->hwmon); 2187 err_hwmon_init: 2188 mlxsw_core_health_fini(mlxsw_core); 2189 err_health_init: 2190 err_fw_rev_validate: 2191 if (!reload) 2192 mlxsw_core_params_unregister(mlxsw_core); 2193 err_register_params: 2194 mlxsw_emad_fini(mlxsw_core); 2195 err_emad_init: 2196 err_trap_groups_set: 2197 kfree(mlxsw_core->lag.mapping); 2198 err_alloc_lag_mapping: 2199 mlxsw_ports_fini(mlxsw_core, reload); 2200 err_ports_init: 2201 if (!reload) 2202 devlink_resources_unregister(devlink); 2203 err_register_resources: 2204 mlxsw_bus->fini(bus_priv); 2205 err_bus_init: 2206 if (!reload) 2207 devlink_free(devlink); 2208 err_devlink_alloc: 2209 return err; 2210 } 2211 2212 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, 2213 const struct mlxsw_bus *mlxsw_bus, 2214 void *bus_priv, bool reload, 2215 struct devlink *devlink, 2216 struct netlink_ext_ack *extack) 2217 { 2218 bool called_again = false; 2219 int err; 2220 2221 again: 2222 err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus, 2223 bus_priv, reload, 2224 devlink, extack); 2225 /* -EAGAIN is returned in case the FW was updated. FW needs 2226 * a reset, so lets try to call __mlxsw_core_bus_device_register() 2227 * again. 2228 */ 2229 if (err == -EAGAIN && !called_again) { 2230 called_again = true; 2231 goto again; 2232 } 2233 2234 return err; 2235 } 2236 EXPORT_SYMBOL(mlxsw_core_bus_device_register); 2237 2238 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, 2239 bool reload) 2240 { 2241 struct devlink *devlink = priv_to_devlink(mlxsw_core); 2242 2243 if (!reload) 2244 devlink_unregister(devlink); 2245 2246 if (devlink_is_reload_failed(devlink)) { 2247 if (!reload) 2248 /* Only the parts that were not de-initialized in the 2249 * failed reload attempt need to be de-initialized. 2250 */ 2251 goto reload_fail_deinit; 2252 else 2253 return; 2254 } 2255 2256 if (mlxsw_core->driver->fini) 2257 mlxsw_core->driver->fini(mlxsw_core); 2258 mlxsw_env_fini(mlxsw_core->env); 2259 mlxsw_thermal_fini(mlxsw_core->thermal); 2260 mlxsw_hwmon_fini(mlxsw_core->hwmon); 2261 mlxsw_core_health_fini(mlxsw_core); 2262 if (!reload) 2263 mlxsw_core_params_unregister(mlxsw_core); 2264 mlxsw_emad_fini(mlxsw_core); 2265 kfree(mlxsw_core->lag.mapping); 2266 mlxsw_ports_fini(mlxsw_core, reload); 2267 if (!reload) 2268 devlink_resources_unregister(devlink); 2269 mlxsw_core->bus->fini(mlxsw_core->bus_priv); 2270 if (!reload) 2271 devlink_free(devlink); 2272 2273 return; 2274 2275 reload_fail_deinit: 2276 mlxsw_core_params_unregister(mlxsw_core); 2277 devlink_resources_unregister(devlink); 2278 devlink_free(devlink); 2279 } 2280 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); 2281 2282 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, 2283 const struct mlxsw_tx_info *tx_info) 2284 { 2285 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv, 2286 tx_info); 2287 } 2288 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); 2289 2290 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 2291 const struct mlxsw_tx_info *tx_info) 2292 { 2293 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, 2294 tx_info); 2295 } 2296 EXPORT_SYMBOL(mlxsw_core_skb_transmit); 2297 2298 void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core, 2299 struct sk_buff *skb, u16 local_port) 2300 { 2301 if (mlxsw_core->driver->ptp_transmitted) 2302 mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb, 2303 local_port); 2304 } 2305 EXPORT_SYMBOL(mlxsw_core_ptp_transmitted); 2306 2307 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a, 2308 const struct mlxsw_rx_listener *rxl_b) 2309 { 2310 return (rxl_a->func == rxl_b->func && 2311 rxl_a->local_port == rxl_b->local_port && 2312 rxl_a->trap_id == rxl_b->trap_id && 2313 rxl_a->mirror_reason == rxl_b->mirror_reason); 2314 } 2315 2316 static struct mlxsw_rx_listener_item * 2317 __find_rx_listener_item(struct mlxsw_core *mlxsw_core, 2318 const struct mlxsw_rx_listener *rxl) 2319 { 2320 struct mlxsw_rx_listener_item *rxl_item; 2321 2322 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) { 2323 if (__is_rx_listener_equal(&rxl_item->rxl, rxl)) 2324 return rxl_item; 2325 } 2326 return NULL; 2327 } 2328 2329 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core, 2330 const struct mlxsw_rx_listener *rxl, 2331 void *priv, bool enabled) 2332 { 2333 struct mlxsw_rx_listener_item *rxl_item; 2334 2335 rxl_item = __find_rx_listener_item(mlxsw_core, rxl); 2336 if (rxl_item) 2337 return -EEXIST; 2338 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL); 2339 if (!rxl_item) 2340 return -ENOMEM; 2341 rxl_item->rxl = *rxl; 2342 rxl_item->priv = priv; 2343 rxl_item->enabled = enabled; 2344 2345 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list); 2346 return 0; 2347 } 2348 EXPORT_SYMBOL(mlxsw_core_rx_listener_register); 2349 2350 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core, 2351 const struct mlxsw_rx_listener *rxl) 2352 { 2353 struct mlxsw_rx_listener_item *rxl_item; 2354 2355 rxl_item = __find_rx_listener_item(mlxsw_core, rxl); 2356 if (!rxl_item) 2357 return; 2358 list_del_rcu(&rxl_item->list); 2359 synchronize_rcu(); 2360 kfree(rxl_item); 2361 } 2362 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister); 2363 2364 static void 2365 mlxsw_core_rx_listener_state_set(struct mlxsw_core *mlxsw_core, 2366 const struct mlxsw_rx_listener *rxl, 2367 bool enabled) 2368 { 2369 struct mlxsw_rx_listener_item *rxl_item; 2370 2371 rxl_item = __find_rx_listener_item(mlxsw_core, rxl); 2372 if (WARN_ON(!rxl_item)) 2373 return; 2374 rxl_item->enabled = enabled; 2375 } 2376 2377 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u16 local_port, 2378 void *priv) 2379 { 2380 struct mlxsw_event_listener_item *event_listener_item = priv; 2381 struct mlxsw_core *mlxsw_core; 2382 struct mlxsw_reg_info reg; 2383 char *payload; 2384 char *reg_tlv; 2385 char *op_tlv; 2386 2387 mlxsw_core = event_listener_item->mlxsw_core; 2388 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0, 2389 skb->data, skb->len); 2390 2391 mlxsw_emad_tlv_parse(skb); 2392 op_tlv = mlxsw_emad_op_tlv(skb); 2393 reg_tlv = mlxsw_emad_reg_tlv(skb); 2394 2395 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv); 2396 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32); 2397 payload = mlxsw_emad_reg_payload(reg_tlv); 2398 event_listener_item->el.func(®, payload, event_listener_item->priv); 2399 dev_kfree_skb(skb); 2400 } 2401 2402 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a, 2403 const struct mlxsw_event_listener *el_b) 2404 { 2405 return (el_a->func == el_b->func && 2406 el_a->trap_id == el_b->trap_id); 2407 } 2408 2409 static struct mlxsw_event_listener_item * 2410 __find_event_listener_item(struct mlxsw_core *mlxsw_core, 2411 const struct mlxsw_event_listener *el) 2412 { 2413 struct mlxsw_event_listener_item *el_item; 2414 2415 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) { 2416 if (__is_event_listener_equal(&el_item->el, el)) 2417 return el_item; 2418 } 2419 return NULL; 2420 } 2421 2422 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core, 2423 const struct mlxsw_event_listener *el, 2424 void *priv) 2425 { 2426 int err; 2427 struct mlxsw_event_listener_item *el_item; 2428 const struct mlxsw_rx_listener rxl = { 2429 .func = mlxsw_core_event_listener_func, 2430 .local_port = MLXSW_PORT_DONT_CARE, 2431 .trap_id = el->trap_id, 2432 }; 2433 2434 el_item = __find_event_listener_item(mlxsw_core, el); 2435 if (el_item) 2436 return -EEXIST; 2437 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL); 2438 if (!el_item) 2439 return -ENOMEM; 2440 el_item->mlxsw_core = mlxsw_core; 2441 el_item->el = *el; 2442 el_item->priv = priv; 2443 2444 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item, true); 2445 if (err) 2446 goto err_rx_listener_register; 2447 2448 /* No reason to save item if we did not manage to register an RX 2449 * listener for it. 2450 */ 2451 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list); 2452 2453 return 0; 2454 2455 err_rx_listener_register: 2456 kfree(el_item); 2457 return err; 2458 } 2459 EXPORT_SYMBOL(mlxsw_core_event_listener_register); 2460 2461 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, 2462 const struct mlxsw_event_listener *el) 2463 { 2464 struct mlxsw_event_listener_item *el_item; 2465 const struct mlxsw_rx_listener rxl = { 2466 .func = mlxsw_core_event_listener_func, 2467 .local_port = MLXSW_PORT_DONT_CARE, 2468 .trap_id = el->trap_id, 2469 }; 2470 2471 el_item = __find_event_listener_item(mlxsw_core, el); 2472 if (!el_item) 2473 return; 2474 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl); 2475 list_del(&el_item->list); 2476 kfree(el_item); 2477 } 2478 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); 2479 2480 static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core, 2481 const struct mlxsw_listener *listener, 2482 void *priv, bool enabled) 2483 { 2484 if (listener->is_event) { 2485 WARN_ON(!enabled); 2486 return mlxsw_core_event_listener_register(mlxsw_core, 2487 &listener->event_listener, 2488 priv); 2489 } else { 2490 return mlxsw_core_rx_listener_register(mlxsw_core, 2491 &listener->rx_listener, 2492 priv, enabled); 2493 } 2494 } 2495 2496 static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core, 2497 const struct mlxsw_listener *listener, 2498 void *priv) 2499 { 2500 if (listener->is_event) 2501 mlxsw_core_event_listener_unregister(mlxsw_core, 2502 &listener->event_listener); 2503 else 2504 mlxsw_core_rx_listener_unregister(mlxsw_core, 2505 &listener->rx_listener); 2506 } 2507 2508 int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core, 2509 const struct mlxsw_listener *listener, void *priv) 2510 { 2511 enum mlxsw_reg_htgt_trap_group trap_group; 2512 enum mlxsw_reg_hpkt_action action; 2513 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2514 int err; 2515 2516 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 2517 return 0; 2518 2519 err = mlxsw_core_listener_register(mlxsw_core, listener, priv, 2520 listener->enabled_on_register); 2521 if (err) 2522 return err; 2523 2524 action = listener->enabled_on_register ? listener->en_action : 2525 listener->dis_action; 2526 trap_group = listener->enabled_on_register ? listener->en_trap_group : 2527 listener->dis_trap_group; 2528 mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id, 2529 trap_group, listener->is_ctrl); 2530 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 2531 if (err) 2532 goto err_trap_set; 2533 2534 return 0; 2535 2536 err_trap_set: 2537 mlxsw_core_listener_unregister(mlxsw_core, listener, priv); 2538 return err; 2539 } 2540 EXPORT_SYMBOL(mlxsw_core_trap_register); 2541 2542 void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core, 2543 const struct mlxsw_listener *listener, 2544 void *priv) 2545 { 2546 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2547 2548 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 2549 return; 2550 2551 if (!listener->is_event) { 2552 mlxsw_reg_hpkt_pack(hpkt_pl, listener->dis_action, 2553 listener->trap_id, listener->dis_trap_group, 2554 listener->is_ctrl); 2555 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 2556 } 2557 2558 mlxsw_core_listener_unregister(mlxsw_core, listener, priv); 2559 } 2560 EXPORT_SYMBOL(mlxsw_core_trap_unregister); 2561 2562 int mlxsw_core_traps_register(struct mlxsw_core *mlxsw_core, 2563 const struct mlxsw_listener *listeners, 2564 size_t listeners_count, void *priv) 2565 { 2566 int i, err; 2567 2568 for (i = 0; i < listeners_count; i++) { 2569 err = mlxsw_core_trap_register(mlxsw_core, 2570 &listeners[i], 2571 priv); 2572 if (err) 2573 goto err_listener_register; 2574 } 2575 return 0; 2576 2577 err_listener_register: 2578 for (i--; i >= 0; i--) { 2579 mlxsw_core_trap_unregister(mlxsw_core, 2580 &listeners[i], 2581 priv); 2582 } 2583 return err; 2584 } 2585 EXPORT_SYMBOL(mlxsw_core_traps_register); 2586 2587 void mlxsw_core_traps_unregister(struct mlxsw_core *mlxsw_core, 2588 const struct mlxsw_listener *listeners, 2589 size_t listeners_count, void *priv) 2590 { 2591 int i; 2592 2593 for (i = 0; i < listeners_count; i++) { 2594 mlxsw_core_trap_unregister(mlxsw_core, 2595 &listeners[i], 2596 priv); 2597 } 2598 } 2599 EXPORT_SYMBOL(mlxsw_core_traps_unregister); 2600 2601 int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core, 2602 const struct mlxsw_listener *listener, 2603 bool enabled) 2604 { 2605 enum mlxsw_reg_htgt_trap_group trap_group; 2606 enum mlxsw_reg_hpkt_action action; 2607 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2608 int err; 2609 2610 /* Not supported for event listener */ 2611 if (WARN_ON(listener->is_event)) 2612 return -EINVAL; 2613 2614 action = enabled ? listener->en_action : listener->dis_action; 2615 trap_group = enabled ? listener->en_trap_group : 2616 listener->dis_trap_group; 2617 mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id, 2618 trap_group, listener->is_ctrl); 2619 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 2620 if (err) 2621 return err; 2622 2623 mlxsw_core_rx_listener_state_set(mlxsw_core, &listener->rx_listener, 2624 enabled); 2625 return 0; 2626 } 2627 EXPORT_SYMBOL(mlxsw_core_trap_state_set); 2628 2629 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core) 2630 { 2631 return atomic64_inc_return(&mlxsw_core->emad.tid); 2632 } 2633 2634 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, 2635 const struct mlxsw_reg_info *reg, 2636 char *payload, 2637 enum mlxsw_core_reg_access_type type, 2638 struct list_head *bulk_list, 2639 mlxsw_reg_trans_cb_t *cb, 2640 unsigned long cb_priv) 2641 { 2642 u64 tid = mlxsw_core_tid_get(mlxsw_core); 2643 struct mlxsw_reg_trans *trans; 2644 int err; 2645 2646 trans = kzalloc(sizeof(*trans), GFP_KERNEL); 2647 if (!trans) 2648 return -ENOMEM; 2649 2650 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, 2651 bulk_list, cb, cb_priv, tid); 2652 if (err) { 2653 kfree_rcu(trans, rcu); 2654 return err; 2655 } 2656 return 0; 2657 } 2658 2659 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core, 2660 const struct mlxsw_reg_info *reg, char *payload, 2661 struct list_head *bulk_list, 2662 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 2663 { 2664 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 2665 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 2666 bulk_list, cb, cb_priv); 2667 } 2668 EXPORT_SYMBOL(mlxsw_reg_trans_query); 2669 2670 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core, 2671 const struct mlxsw_reg_info *reg, char *payload, 2672 struct list_head *bulk_list, 2673 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 2674 { 2675 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 2676 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 2677 bulk_list, cb, cb_priv); 2678 } 2679 EXPORT_SYMBOL(mlxsw_reg_trans_write); 2680 2681 #define MLXSW_REG_TRANS_ERR_STRING_SIZE 256 2682 2683 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans) 2684 { 2685 char err_string[MLXSW_REG_TRANS_ERR_STRING_SIZE]; 2686 struct mlxsw_core *mlxsw_core = trans->core; 2687 int err; 2688 2689 wait_for_completion(&trans->completion); 2690 cancel_delayed_work_sync(&trans->timeout_dw); 2691 err = trans->err; 2692 2693 if (trans->retries) 2694 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n", 2695 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid); 2696 if (err) { 2697 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n", 2698 trans->tid, trans->reg->id, 2699 mlxsw_reg_id_str(trans->reg->id), 2700 mlxsw_core_reg_access_type_str(trans->type), 2701 trans->emad_status, 2702 mlxsw_emad_op_tlv_status_str(trans->emad_status)); 2703 2704 snprintf(err_string, MLXSW_REG_TRANS_ERR_STRING_SIZE, 2705 "(tid=%llx,reg_id=%x(%s)) %s (%s)\n", trans->tid, 2706 trans->reg->id, mlxsw_reg_id_str(trans->reg->id), 2707 mlxsw_emad_op_tlv_status_str(trans->emad_status), 2708 trans->emad_err_string ? trans->emad_err_string : ""); 2709 2710 trace_devlink_hwerr(priv_to_devlink(mlxsw_core), 2711 trans->emad_status, err_string); 2712 2713 kfree(trans->emad_err_string); 2714 } 2715 2716 list_del(&trans->bulk_list); 2717 kfree_rcu(trans, rcu); 2718 return err; 2719 } 2720 2721 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list) 2722 { 2723 struct mlxsw_reg_trans *trans; 2724 struct mlxsw_reg_trans *tmp; 2725 int sum_err = 0; 2726 int err; 2727 2728 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) { 2729 err = mlxsw_reg_trans_wait(trans); 2730 if (err && sum_err == 0) 2731 sum_err = err; /* first error to be returned */ 2732 } 2733 return sum_err; 2734 } 2735 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait); 2736 2737 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, 2738 const struct mlxsw_reg_info *reg, 2739 char *payload, 2740 enum mlxsw_core_reg_access_type type) 2741 { 2742 enum mlxsw_emad_op_tlv_status status; 2743 int err, n_retry; 2744 bool reset_ok; 2745 char *in_mbox, *out_mbox, *tmp; 2746 2747 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n", 2748 reg->id, mlxsw_reg_id_str(reg->id), 2749 mlxsw_core_reg_access_type_str(type)); 2750 2751 in_mbox = mlxsw_cmd_mbox_alloc(); 2752 if (!in_mbox) 2753 return -ENOMEM; 2754 2755 out_mbox = mlxsw_cmd_mbox_alloc(); 2756 if (!out_mbox) { 2757 err = -ENOMEM; 2758 goto free_in_mbox; 2759 } 2760 2761 mlxsw_emad_pack_op_tlv(in_mbox, reg, type, 2762 mlxsw_core_tid_get(mlxsw_core)); 2763 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); 2764 mlxsw_emad_pack_reg_tlv(tmp, reg, payload); 2765 2766 /* There is a special treatment needed for MRSR (reset) register. 2767 * The command interface will return error after the command 2768 * is executed, so tell the lower layer to expect it 2769 * and cope accordingly. 2770 */ 2771 reset_ok = reg->id == MLXSW_REG_MRSR_ID; 2772 2773 n_retry = 0; 2774 retry: 2775 err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox); 2776 if (!err) { 2777 err = mlxsw_emad_process_status(out_mbox, &status); 2778 if (err) { 2779 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) 2780 goto retry; 2781 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n", 2782 status, mlxsw_emad_op_tlv_status_str(status)); 2783 } 2784 } 2785 2786 if (!err) 2787 memcpy(payload, mlxsw_emad_reg_payload_cmd(out_mbox), 2788 reg->len); 2789 2790 mlxsw_cmd_mbox_free(out_mbox); 2791 free_in_mbox: 2792 mlxsw_cmd_mbox_free(in_mbox); 2793 if (err) 2794 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n", 2795 reg->id, mlxsw_reg_id_str(reg->id), 2796 mlxsw_core_reg_access_type_str(type)); 2797 return err; 2798 } 2799 2800 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core, 2801 char *payload, size_t payload_len, 2802 unsigned long cb_priv) 2803 { 2804 char *orig_payload = (char *) cb_priv; 2805 2806 memcpy(orig_payload, payload, payload_len); 2807 } 2808 2809 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, 2810 const struct mlxsw_reg_info *reg, 2811 char *payload, 2812 enum mlxsw_core_reg_access_type type) 2813 { 2814 LIST_HEAD(bulk_list); 2815 int err; 2816 2817 /* During initialization EMAD interface is not available to us, 2818 * so we default to command interface. We switch to EMAD interface 2819 * after setting the appropriate traps. 2820 */ 2821 if (!mlxsw_core->emad.use_emad) 2822 return mlxsw_core_reg_access_cmd(mlxsw_core, reg, 2823 payload, type); 2824 2825 err = mlxsw_core_reg_access_emad(mlxsw_core, reg, 2826 payload, type, &bulk_list, 2827 mlxsw_core_reg_access_cb, 2828 (unsigned long) payload); 2829 if (err) 2830 return err; 2831 return mlxsw_reg_trans_bulk_wait(&bulk_list); 2832 } 2833 2834 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, 2835 const struct mlxsw_reg_info *reg, char *payload) 2836 { 2837 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 2838 MLXSW_CORE_REG_ACCESS_TYPE_QUERY); 2839 } 2840 EXPORT_SYMBOL(mlxsw_reg_query); 2841 2842 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, 2843 const struct mlxsw_reg_info *reg, char *payload) 2844 { 2845 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 2846 MLXSW_CORE_REG_ACCESS_TYPE_WRITE); 2847 } 2848 EXPORT_SYMBOL(mlxsw_reg_write); 2849 2850 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 2851 struct mlxsw_rx_info *rx_info) 2852 { 2853 struct mlxsw_rx_listener_item *rxl_item; 2854 const struct mlxsw_rx_listener *rxl; 2855 u16 local_port; 2856 bool found = false; 2857 2858 if (rx_info->is_lag) { 2859 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n", 2860 __func__, rx_info->u.lag_id, 2861 rx_info->trap_id); 2862 /* Upper layer does not care if the skb came from LAG or not, 2863 * so just get the local_port for the lag port and push it up. 2864 */ 2865 local_port = mlxsw_core_lag_mapping_get(mlxsw_core, 2866 rx_info->u.lag_id, 2867 rx_info->lag_port_index); 2868 } else { 2869 local_port = rx_info->u.sys_port; 2870 } 2871 2872 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n", 2873 __func__, local_port, rx_info->trap_id); 2874 2875 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || 2876 (local_port >= mlxsw_core->max_ports)) 2877 goto drop; 2878 2879 rcu_read_lock(); 2880 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) { 2881 rxl = &rxl_item->rxl; 2882 if ((rxl->local_port == MLXSW_PORT_DONT_CARE || 2883 rxl->local_port == local_port) && 2884 rxl->trap_id == rx_info->trap_id && 2885 rxl->mirror_reason == rx_info->mirror_reason) { 2886 if (rxl_item->enabled) 2887 found = true; 2888 break; 2889 } 2890 } 2891 if (!found) { 2892 rcu_read_unlock(); 2893 goto drop; 2894 } 2895 2896 rxl->func(skb, local_port, rxl_item->priv); 2897 rcu_read_unlock(); 2898 return; 2899 2900 drop: 2901 dev_kfree_skb(skb); 2902 } 2903 EXPORT_SYMBOL(mlxsw_core_skb_receive); 2904 2905 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core, 2906 u16 lag_id, u8 port_index) 2907 { 2908 return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id + 2909 port_index; 2910 } 2911 2912 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, 2913 u16 lag_id, u8 port_index, u16 local_port) 2914 { 2915 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 2916 lag_id, port_index); 2917 2918 mlxsw_core->lag.mapping[index] = local_port; 2919 } 2920 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set); 2921 2922 u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, 2923 u16 lag_id, u8 port_index) 2924 { 2925 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 2926 lag_id, port_index); 2927 2928 return mlxsw_core->lag.mapping[index]; 2929 } 2930 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get); 2931 2932 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, 2933 u16 lag_id, u16 local_port) 2934 { 2935 int i; 2936 2937 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) { 2938 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 2939 lag_id, i); 2940 2941 if (mlxsw_core->lag.mapping[index] == local_port) 2942 mlxsw_core->lag.mapping[index] = 0; 2943 } 2944 } 2945 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); 2946 2947 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, 2948 enum mlxsw_res_id res_id) 2949 { 2950 return mlxsw_res_valid(&mlxsw_core->res, res_id); 2951 } 2952 EXPORT_SYMBOL(mlxsw_core_res_valid); 2953 2954 u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, 2955 enum mlxsw_res_id res_id) 2956 { 2957 return mlxsw_res_get(&mlxsw_core->res, res_id); 2958 } 2959 EXPORT_SYMBOL(mlxsw_core_res_get); 2960 2961 static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, 2962 enum devlink_port_flavour flavour, 2963 u32 port_number, bool split, 2964 u32 split_port_subnumber, 2965 bool splittable, u32 lanes, 2966 const unsigned char *switch_id, 2967 unsigned char switch_id_len) 2968 { 2969 struct devlink *devlink = priv_to_devlink(mlxsw_core); 2970 struct mlxsw_core_port *mlxsw_core_port = 2971 &mlxsw_core->ports[local_port]; 2972 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 2973 struct devlink_port_attrs attrs = {}; 2974 int err; 2975 2976 attrs.split = split; 2977 attrs.lanes = lanes; 2978 attrs.splittable = splittable; 2979 attrs.flavour = flavour; 2980 attrs.phys.port_number = port_number; 2981 attrs.phys.split_subport_number = split_port_subnumber; 2982 memcpy(attrs.switch_id.id, switch_id, switch_id_len); 2983 attrs.switch_id.id_len = switch_id_len; 2984 mlxsw_core_port->local_port = local_port; 2985 devlink_port_attrs_set(devlink_port, &attrs); 2986 err = devlink_port_register(devlink, devlink_port, local_port); 2987 if (err) 2988 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); 2989 return err; 2990 } 2991 2992 static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port) 2993 { 2994 struct mlxsw_core_port *mlxsw_core_port = 2995 &mlxsw_core->ports[local_port]; 2996 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 2997 2998 devlink_port_unregister(devlink_port); 2999 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); 3000 } 3001 3002 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, 3003 u32 port_number, bool split, 3004 u32 split_port_subnumber, 3005 bool splittable, u32 lanes, 3006 const unsigned char *switch_id, 3007 unsigned char switch_id_len) 3008 { 3009 int err; 3010 3011 err = __mlxsw_core_port_init(mlxsw_core, local_port, 3012 DEVLINK_PORT_FLAVOUR_PHYSICAL, 3013 port_number, split, split_port_subnumber, 3014 splittable, lanes, 3015 switch_id, switch_id_len); 3016 if (err) 3017 return err; 3018 3019 atomic_inc(&mlxsw_core->active_ports_count); 3020 return 0; 3021 } 3022 EXPORT_SYMBOL(mlxsw_core_port_init); 3023 3024 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port) 3025 { 3026 atomic_dec(&mlxsw_core->active_ports_count); 3027 3028 __mlxsw_core_port_fini(mlxsw_core, local_port); 3029 } 3030 EXPORT_SYMBOL(mlxsw_core_port_fini); 3031 3032 int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core, 3033 void *port_driver_priv, 3034 const unsigned char *switch_id, 3035 unsigned char switch_id_len) 3036 { 3037 struct mlxsw_core_port *mlxsw_core_port = 3038 &mlxsw_core->ports[MLXSW_PORT_CPU_PORT]; 3039 int err; 3040 3041 err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT, 3042 DEVLINK_PORT_FLAVOUR_CPU, 3043 0, false, 0, false, 0, 3044 switch_id, switch_id_len); 3045 if (err) 3046 return err; 3047 3048 mlxsw_core_port->port_driver_priv = port_driver_priv; 3049 return 0; 3050 } 3051 EXPORT_SYMBOL(mlxsw_core_cpu_port_init); 3052 3053 void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core) 3054 { 3055 __mlxsw_core_port_fini(mlxsw_core, MLXSW_PORT_CPU_PORT); 3056 } 3057 EXPORT_SYMBOL(mlxsw_core_cpu_port_fini); 3058 3059 void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port, 3060 void *port_driver_priv, struct net_device *dev) 3061 { 3062 struct mlxsw_core_port *mlxsw_core_port = 3063 &mlxsw_core->ports[local_port]; 3064 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 3065 3066 mlxsw_core_port->port_driver_priv = port_driver_priv; 3067 devlink_port_type_eth_set(devlink_port, dev); 3068 } 3069 EXPORT_SYMBOL(mlxsw_core_port_eth_set); 3070 3071 void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port, 3072 void *port_driver_priv) 3073 { 3074 struct mlxsw_core_port *mlxsw_core_port = 3075 &mlxsw_core->ports[local_port]; 3076 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 3077 3078 mlxsw_core_port->port_driver_priv = port_driver_priv; 3079 devlink_port_type_ib_set(devlink_port, NULL); 3080 } 3081 EXPORT_SYMBOL(mlxsw_core_port_ib_set); 3082 3083 void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port, 3084 void *port_driver_priv) 3085 { 3086 struct mlxsw_core_port *mlxsw_core_port = 3087 &mlxsw_core->ports[local_port]; 3088 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 3089 3090 mlxsw_core_port->port_driver_priv = port_driver_priv; 3091 devlink_port_type_clear(devlink_port); 3092 } 3093 EXPORT_SYMBOL(mlxsw_core_port_clear); 3094 3095 enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, 3096 u16 local_port) 3097 { 3098 struct mlxsw_core_port *mlxsw_core_port = 3099 &mlxsw_core->ports[local_port]; 3100 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 3101 3102 return devlink_port->type; 3103 } 3104 EXPORT_SYMBOL(mlxsw_core_port_type_get); 3105 3106 3107 struct devlink_port * 3108 mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, 3109 u16 local_port) 3110 { 3111 struct mlxsw_core_port *mlxsw_core_port = 3112 &mlxsw_core->ports[local_port]; 3113 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 3114 3115 return devlink_port; 3116 } 3117 EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get); 3118 3119 bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port) 3120 { 3121 const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info; 3122 int i; 3123 3124 for (i = 0; i < bus_info->xm_local_ports_count; i++) 3125 if (bus_info->xm_local_ports[i] == local_port) 3126 return true; 3127 return false; 3128 } 3129 EXPORT_SYMBOL(mlxsw_core_port_is_xm); 3130 3131 struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core) 3132 { 3133 return mlxsw_core->env; 3134 } 3135 3136 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, 3137 const char *buf, size_t size) 3138 { 3139 __be32 *m = (__be32 *) buf; 3140 int i; 3141 int count = size / sizeof(__be32); 3142 3143 for (i = count - 1; i >= 0; i--) 3144 if (m[i]) 3145 break; 3146 i++; 3147 count = i ? i : 1; 3148 for (i = 0; i < count; i += 4) 3149 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", 3150 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), 3151 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); 3152 } 3153 3154 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, 3155 u32 in_mod, bool out_mbox_direct, bool reset_ok, 3156 char *in_mbox, size_t in_mbox_size, 3157 char *out_mbox, size_t out_mbox_size) 3158 { 3159 u8 status; 3160 int err; 3161 3162 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32)); 3163 if (!mlxsw_core->bus->cmd_exec) 3164 return -EOPNOTSUPP; 3165 3166 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 3167 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod); 3168 if (in_mbox) { 3169 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n"); 3170 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size); 3171 } 3172 3173 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode, 3174 opcode_mod, in_mod, out_mbox_direct, 3175 in_mbox, in_mbox_size, 3176 out_mbox, out_mbox_size, &status); 3177 3178 if (!err && out_mbox) { 3179 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); 3180 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); 3181 } 3182 3183 if (reset_ok && err == -EIO && 3184 status == MLXSW_CMD_STATUS_RUNNING_RESET) { 3185 err = 0; 3186 } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { 3187 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n", 3188 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 3189 in_mod, status, mlxsw_cmd_status_str(status)); 3190 } else if (err == -ETIMEDOUT) { 3191 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 3192 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 3193 in_mod); 3194 } 3195 3196 return err; 3197 } 3198 EXPORT_SYMBOL(mlxsw_cmd_exec); 3199 3200 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay) 3201 { 3202 return queue_delayed_work(mlxsw_wq, dwork, delay); 3203 } 3204 EXPORT_SYMBOL(mlxsw_core_schedule_dw); 3205 3206 bool mlxsw_core_schedule_work(struct work_struct *work) 3207 { 3208 return queue_work(mlxsw_owq, work); 3209 } 3210 EXPORT_SYMBOL(mlxsw_core_schedule_work); 3211 3212 void mlxsw_core_flush_owq(void) 3213 { 3214 flush_workqueue(mlxsw_owq); 3215 } 3216 EXPORT_SYMBOL(mlxsw_core_flush_owq); 3217 3218 int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3219 const struct mlxsw_config_profile *profile, 3220 u64 *p_single_size, u64 *p_double_size, 3221 u64 *p_linear_size) 3222 { 3223 struct mlxsw_driver *driver = mlxsw_core->driver; 3224 3225 if (!driver->kvd_sizes_get) 3226 return -EINVAL; 3227 3228 return driver->kvd_sizes_get(mlxsw_core, profile, 3229 p_single_size, p_double_size, 3230 p_linear_size); 3231 } 3232 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get); 3233 3234 int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox, 3235 struct mlxsw_res *res) 3236 { 3237 int index, i; 3238 u64 data; 3239 u16 id; 3240 int err; 3241 3242 mlxsw_cmd_mbox_zero(mbox); 3243 3244 for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES; 3245 index++) { 3246 err = mlxsw_cmd_query_resources(mlxsw_core, mbox, index); 3247 if (err) 3248 return err; 3249 3250 for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) { 3251 id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i); 3252 data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i); 3253 3254 if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID) 3255 return 0; 3256 3257 mlxsw_res_parse(res, id, data); 3258 } 3259 } 3260 3261 /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get 3262 * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW. 3263 */ 3264 return -EIO; 3265 } 3266 EXPORT_SYMBOL(mlxsw_core_resources_query); 3267 3268 u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core) 3269 { 3270 return mlxsw_core->bus->read_frc_h(mlxsw_core->bus_priv); 3271 } 3272 EXPORT_SYMBOL(mlxsw_core_read_frc_h); 3273 3274 u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core) 3275 { 3276 return mlxsw_core->bus->read_frc_l(mlxsw_core->bus_priv); 3277 } 3278 EXPORT_SYMBOL(mlxsw_core_read_frc_l); 3279 3280 void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core) 3281 { 3282 mlxsw_core->emad.enable_string_tlv = true; 3283 } 3284 EXPORT_SYMBOL(mlxsw_core_emad_string_tlv_enable); 3285 3286 static int __init mlxsw_core_module_init(void) 3287 { 3288 int err; 3289 3290 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0); 3291 if (!mlxsw_wq) 3292 return -ENOMEM; 3293 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0, 3294 mlxsw_core_driver_name); 3295 if (!mlxsw_owq) { 3296 err = -ENOMEM; 3297 goto err_alloc_ordered_workqueue; 3298 } 3299 return 0; 3300 3301 err_alloc_ordered_workqueue: 3302 destroy_workqueue(mlxsw_wq); 3303 return err; 3304 } 3305 3306 static void __exit mlxsw_core_module_exit(void) 3307 { 3308 destroy_workqueue(mlxsw_owq); 3309 destroy_workqueue(mlxsw_wq); 3310 } 3311 3312 module_init(mlxsw_core_module_init); 3313 module_exit(mlxsw_core_module_exit); 3314 3315 MODULE_LICENSE("Dual BSD/GPL"); 3316 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 3317 MODULE_DESCRIPTION("Mellanox switch device core driver"); 3318