1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/device.h> 7 #include <linux/export.h> 8 #include <linux/err.h> 9 #include <linux/if_link.h> 10 #include <linux/netdevice.h> 11 #include <linux/completion.h> 12 #include <linux/skbuff.h> 13 #include <linux/etherdevice.h> 14 #include <linux/types.h> 15 #include <linux/string.h> 16 #include <linux/gfp.h> 17 #include <linux/random.h> 18 #include <linux/jiffies.h> 19 #include <linux/mutex.h> 20 #include <linux/rcupdate.h> 21 #include <linux/slab.h> 22 #include <linux/workqueue.h> 23 #include <linux/firmware.h> 24 #include <asm/byteorder.h> 25 #include <net/devlink.h> 26 #include <trace/events/devlink.h> 27 28 #include "core.h" 29 #include "core_env.h" 30 #include "item.h" 31 #include "cmd.h" 32 #include "port.h" 33 #include "trap.h" 34 #include "emad.h" 35 #include "reg.h" 36 #include "resources.h" 37 #include "../mlxfw/mlxfw.h" 38 39 static LIST_HEAD(mlxsw_core_driver_list); 40 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); 41 42 static const char mlxsw_core_driver_name[] = "mlxsw_core"; 43 44 static struct workqueue_struct *mlxsw_wq; 45 static struct workqueue_struct *mlxsw_owq; 46 47 struct mlxsw_core_port { 48 struct devlink_port devlink_port; 49 void *port_driver_priv; 50 u16 local_port; 51 struct mlxsw_linecard *linecard; 52 }; 53 54 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) 55 { 56 return mlxsw_core_port->port_driver_priv; 57 } 58 EXPORT_SYMBOL(mlxsw_core_port_driver_priv); 59 60 static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port) 61 { 62 return mlxsw_core_port->port_driver_priv != NULL; 63 } 64 65 struct mlxsw_core { 66 struct mlxsw_driver *driver; 67 const struct mlxsw_bus *bus; 68 void *bus_priv; 69 const struct mlxsw_bus_info *bus_info; 70 struct workqueue_struct *emad_wq; 71 struct list_head rx_listener_list; 72 struct list_head event_listener_list; 73 struct list_head irq_event_handler_list; 74 struct mutex irq_event_handler_lock; /* Locks access to handlers list */ 75 struct { 76 atomic64_t tid; 77 struct list_head trans_list; 78 spinlock_t trans_list_lock; /* protects trans_list writes */ 79 bool use_emad; 80 bool enable_string_tlv; 81 bool enable_latency_tlv; 82 } emad; 83 struct { 84 u16 *mapping; /* lag_id+port_index to local_port mapping */ 85 } lag; 86 struct mlxsw_res res; 87 struct mlxsw_hwmon *hwmon; 88 struct mlxsw_thermal *thermal; 89 struct mlxsw_linecards *linecards; 90 struct mlxsw_core_port *ports; 91 unsigned int max_ports; 92 atomic_t active_ports_count; 93 bool fw_flash_in_progress; 94 struct { 95 struct devlink_health_reporter *fw_fatal; 96 } health; 97 struct mlxsw_env *env; 98 unsigned long driver_priv[]; 99 /* driver_priv has to be always the last item */ 100 }; 101 102 struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core) 103 { 104 return mlxsw_core->linecards; 105 } 106 107 void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core, 108 struct mlxsw_linecards *linecards) 109 { 110 mlxsw_core->linecards = linecards; 111 } 112 113 #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40 114 115 static u64 mlxsw_ports_occ_get(void *priv) 116 { 117 struct mlxsw_core *mlxsw_core = priv; 118 119 return atomic_read(&mlxsw_core->active_ports_count); 120 } 121 122 static int mlxsw_core_resources_ports_register(struct mlxsw_core *mlxsw_core) 123 { 124 struct devlink *devlink = priv_to_devlink(mlxsw_core); 125 struct devlink_resource_size_params ports_num_params; 126 u32 max_ports; 127 128 max_ports = mlxsw_core->max_ports - 1; 129 devlink_resource_size_params_init(&ports_num_params, max_ports, 130 max_ports, 1, 131 DEVLINK_RESOURCE_UNIT_ENTRY); 132 133 return devl_resource_register(devlink, 134 DEVLINK_RESOURCE_GENERIC_NAME_PORTS, 135 max_ports, MLXSW_CORE_RESOURCE_PORTS, 136 DEVLINK_RESOURCE_ID_PARENT_TOP, 137 &ports_num_params); 138 } 139 140 static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload) 141 { 142 struct devlink *devlink = priv_to_devlink(mlxsw_core); 143 int err; 144 145 /* Switch ports are numbered from 1 to queried value */ 146 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT)) 147 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core, 148 MAX_SYSTEM_PORT) + 1; 149 else 150 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1; 151 152 mlxsw_core->ports = kcalloc(mlxsw_core->max_ports, 153 sizeof(struct mlxsw_core_port), GFP_KERNEL); 154 if (!mlxsw_core->ports) 155 return -ENOMEM; 156 157 if (!reload) { 158 err = mlxsw_core_resources_ports_register(mlxsw_core); 159 if (err) 160 goto err_resources_ports_register; 161 } 162 atomic_set(&mlxsw_core->active_ports_count, 0); 163 devl_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS, 164 mlxsw_ports_occ_get, mlxsw_core); 165 166 return 0; 167 168 err_resources_ports_register: 169 kfree(mlxsw_core->ports); 170 return err; 171 } 172 173 static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload) 174 { 175 struct devlink *devlink = priv_to_devlink(mlxsw_core); 176 177 devl_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS); 178 if (!reload) 179 devl_resources_unregister(priv_to_devlink(mlxsw_core)); 180 181 kfree(mlxsw_core->ports); 182 } 183 184 unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core) 185 { 186 return mlxsw_core->max_ports; 187 } 188 EXPORT_SYMBOL(mlxsw_core_max_ports); 189 190 int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag) 191 { 192 struct mlxsw_driver *driver = mlxsw_core->driver; 193 194 if (driver->profile->used_max_lag) { 195 *p_max_lag = driver->profile->max_lag; 196 return 0; 197 } 198 199 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG)) 200 return -EIO; 201 202 *p_max_lag = MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG); 203 return 0; 204 } 205 EXPORT_SYMBOL(mlxsw_core_max_lag); 206 207 enum mlxsw_cmd_mbox_config_profile_lag_mode 208 mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core) 209 { 210 return mlxsw_core->bus->lag_mode(mlxsw_core->bus_priv); 211 } 212 EXPORT_SYMBOL(mlxsw_core_lag_mode); 213 214 enum mlxsw_cmd_mbox_config_profile_flood_mode 215 mlxsw_core_flood_mode(struct mlxsw_core *mlxsw_core) 216 { 217 return mlxsw_core->bus->flood_mode(mlxsw_core->bus_priv); 218 } 219 EXPORT_SYMBOL(mlxsw_core_flood_mode); 220 221 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core) 222 { 223 return mlxsw_core->driver_priv; 224 } 225 EXPORT_SYMBOL(mlxsw_core_driver_priv); 226 227 bool 228 mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev, 229 const struct mlxsw_fw_rev *req_rev) 230 { 231 return rev->minor > req_rev->minor || 232 (rev->minor == req_rev->minor && 233 rev->subminor >= req_rev->subminor); 234 } 235 EXPORT_SYMBOL(mlxsw_core_fw_rev_minor_subminor_validate); 236 237 struct mlxsw_rx_listener_item { 238 struct list_head list; 239 struct mlxsw_rx_listener rxl; 240 void *priv; 241 bool enabled; 242 }; 243 244 struct mlxsw_event_listener_item { 245 struct list_head list; 246 struct mlxsw_core *mlxsw_core; 247 struct mlxsw_event_listener el; 248 void *priv; 249 }; 250 251 static const u8 mlxsw_core_trap_groups[] = { 252 MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 253 MLXSW_REG_HTGT_TRAP_GROUP_CORE_EVENT, 254 }; 255 256 static int mlxsw_core_trap_groups_set(struct mlxsw_core *mlxsw_core) 257 { 258 char htgt_pl[MLXSW_REG_HTGT_LEN]; 259 int err; 260 int i; 261 262 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 263 return 0; 264 265 for (i = 0; i < ARRAY_SIZE(mlxsw_core_trap_groups); i++) { 266 mlxsw_reg_htgt_pack(htgt_pl, mlxsw_core_trap_groups[i], 267 MLXSW_REG_HTGT_INVALID_POLICER, 268 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 269 MLXSW_REG_HTGT_DEFAULT_TC); 270 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 271 if (err) 272 return err; 273 } 274 return 0; 275 } 276 277 /****************** 278 * EMAD processing 279 ******************/ 280 281 /* emad_eth_hdr_dmac 282 * Destination MAC in EMAD's Ethernet header. 283 * Must be set to 01:02:c9:00:00:01 284 */ 285 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6); 286 287 /* emad_eth_hdr_smac 288 * Source MAC in EMAD's Ethernet header. 289 * Must be set to 00:02:c9:01:02:03 290 */ 291 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6); 292 293 /* emad_eth_hdr_ethertype 294 * Ethertype in EMAD's Ethernet header. 295 * Must be set to 0x8932 296 */ 297 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16); 298 299 /* emad_eth_hdr_mlx_proto 300 * Mellanox protocol. 301 * Must be set to 0x0. 302 */ 303 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8); 304 305 /* emad_eth_hdr_ver 306 * Mellanox protocol version. 307 * Must be set to 0x0. 308 */ 309 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4); 310 311 /* emad_op_tlv_type 312 * Type of the TLV. 313 * Must be set to 0x1 (operation TLV). 314 */ 315 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5); 316 317 /* emad_op_tlv_len 318 * Length of the operation TLV in u32. 319 * Must be set to 0x4. 320 */ 321 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11); 322 323 /* emad_op_tlv_dr 324 * Direct route bit. Setting to 1 indicates the EMAD is a direct route 325 * EMAD. DR TLV must follow. 326 * 327 * Note: Currently not supported and must not be set. 328 */ 329 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1); 330 331 /* emad_op_tlv_status 332 * Returned status in case of EMAD response. Must be set to 0 in case 333 * of EMAD request. 334 * 0x0 - success 335 * 0x1 - device is busy. Requester should retry 336 * 0x2 - Mellanox protocol version not supported 337 * 0x3 - unknown TLV 338 * 0x4 - register not supported 339 * 0x5 - operation class not supported 340 * 0x6 - EMAD method not supported 341 * 0x7 - bad parameter (e.g. port out of range) 342 * 0x8 - resource not available 343 * 0x9 - message receipt acknowledgment. Requester should retry 344 * 0x70 - internal error 345 */ 346 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7); 347 348 /* emad_op_tlv_register_id 349 * Register ID of register within register TLV. 350 */ 351 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16); 352 353 /* emad_op_tlv_r 354 * Response bit. Setting to 1 indicates Response, otherwise request. 355 */ 356 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1); 357 358 /* emad_op_tlv_method 359 * EMAD method type. 360 * 0x1 - query 361 * 0x2 - write 362 * 0x3 - send (currently not supported) 363 * 0x4 - event 364 */ 365 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7); 366 367 /* emad_op_tlv_class 368 * EMAD operation class. Must be set to 0x1 (REG_ACCESS). 369 */ 370 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8); 371 372 /* emad_op_tlv_tid 373 * EMAD transaction ID. Used for pairing request and response EMADs. 374 */ 375 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64); 376 377 /* emad_string_tlv_type 378 * Type of the TLV. 379 * Must be set to 0x2 (string TLV). 380 */ 381 MLXSW_ITEM32(emad, string_tlv, type, 0x00, 27, 5); 382 383 /* emad_string_tlv_len 384 * Length of the string TLV in u32. 385 */ 386 MLXSW_ITEM32(emad, string_tlv, len, 0x00, 16, 11); 387 388 #define MLXSW_EMAD_STRING_TLV_STRING_LEN 128 389 390 /* emad_string_tlv_string 391 * String provided by the device's firmware in case of erroneous register access 392 */ 393 MLXSW_ITEM_BUF(emad, string_tlv, string, 0x04, 394 MLXSW_EMAD_STRING_TLV_STRING_LEN); 395 396 /* emad_latency_tlv_type 397 * Type of the TLV. 398 * Must be set to 0x4 (latency TLV). 399 */ 400 MLXSW_ITEM32(emad, latency_tlv, type, 0x00, 27, 5); 401 402 /* emad_latency_tlv_len 403 * Length of the latency TLV in u32. 404 */ 405 MLXSW_ITEM32(emad, latency_tlv, len, 0x00, 16, 11); 406 407 /* emad_latency_tlv_latency_time 408 * EMAD latency time in units of uSec. 409 */ 410 MLXSW_ITEM32(emad, latency_tlv, latency_time, 0x04, 0, 32); 411 412 /* emad_reg_tlv_type 413 * Type of the TLV. 414 * Must be set to 0x3 (register TLV). 415 */ 416 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5); 417 418 /* emad_reg_tlv_len 419 * Length of the operation TLV in u32. 420 */ 421 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11); 422 423 /* emad_end_tlv_type 424 * Type of the TLV. 425 * Must be set to 0x0 (end TLV). 426 */ 427 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5); 428 429 /* emad_end_tlv_len 430 * Length of the end TLV in u32. 431 * Must be set to 1. 432 */ 433 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11); 434 435 enum mlxsw_core_reg_access_type { 436 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 437 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 438 }; 439 440 static inline const char * 441 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type) 442 { 443 switch (type) { 444 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY: 445 return "query"; 446 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE: 447 return "write"; 448 } 449 BUG(); 450 } 451 452 static void mlxsw_emad_pack_end_tlv(char *end_tlv) 453 { 454 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END); 455 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN); 456 } 457 458 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, 459 const struct mlxsw_reg_info *reg, 460 char *payload) 461 { 462 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG); 463 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1); 464 memcpy(reg_tlv + sizeof(u32), payload, reg->len); 465 } 466 467 static void mlxsw_emad_pack_string_tlv(char *string_tlv) 468 { 469 mlxsw_emad_string_tlv_type_set(string_tlv, MLXSW_EMAD_TLV_TYPE_STRING); 470 mlxsw_emad_string_tlv_len_set(string_tlv, MLXSW_EMAD_STRING_TLV_LEN); 471 } 472 473 static void mlxsw_emad_pack_op_tlv(char *op_tlv, 474 const struct mlxsw_reg_info *reg, 475 enum mlxsw_core_reg_access_type type, 476 u64 tid) 477 { 478 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP); 479 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN); 480 mlxsw_emad_op_tlv_dr_set(op_tlv, 0); 481 mlxsw_emad_op_tlv_status_set(op_tlv, 0); 482 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id); 483 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST); 484 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY) 485 mlxsw_emad_op_tlv_method_set(op_tlv, 486 MLXSW_EMAD_OP_TLV_METHOD_QUERY); 487 else 488 mlxsw_emad_op_tlv_method_set(op_tlv, 489 MLXSW_EMAD_OP_TLV_METHOD_WRITE); 490 mlxsw_emad_op_tlv_class_set(op_tlv, 491 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); 492 mlxsw_emad_op_tlv_tid_set(op_tlv, tid); 493 } 494 495 static void mlxsw_emad_pack_latency_tlv(char *latency_tlv) 496 { 497 mlxsw_emad_latency_tlv_type_set(latency_tlv, MLXSW_EMAD_TLV_TYPE_LATENCY); 498 mlxsw_emad_latency_tlv_len_set(latency_tlv, MLXSW_EMAD_LATENCY_TLV_LEN); 499 } 500 501 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) 502 { 503 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN); 504 505 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC); 506 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC); 507 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE); 508 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO); 509 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION); 510 511 skb_reset_mac_header(skb); 512 513 return 0; 514 } 515 516 static void mlxsw_emad_construct(const struct mlxsw_core *mlxsw_core, 517 struct sk_buff *skb, 518 const struct mlxsw_reg_info *reg, 519 char *payload, 520 enum mlxsw_core_reg_access_type type, u64 tid) 521 { 522 char *buf; 523 524 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32)); 525 mlxsw_emad_pack_end_tlv(buf); 526 527 buf = skb_push(skb, reg->len + sizeof(u32)); 528 mlxsw_emad_pack_reg_tlv(buf, reg, payload); 529 530 if (mlxsw_core->emad.enable_latency_tlv) { 531 buf = skb_push(skb, MLXSW_EMAD_LATENCY_TLV_LEN * sizeof(u32)); 532 mlxsw_emad_pack_latency_tlv(buf); 533 } 534 535 if (mlxsw_core->emad.enable_string_tlv) { 536 buf = skb_push(skb, MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32)); 537 mlxsw_emad_pack_string_tlv(buf); 538 } 539 540 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); 541 mlxsw_emad_pack_op_tlv(buf, reg, type, tid); 542 543 mlxsw_emad_construct_eth_hdr(skb); 544 } 545 546 struct mlxsw_emad_tlv_offsets { 547 u16 op_tlv; 548 u16 string_tlv; 549 u16 latency_tlv; 550 u16 reg_tlv; 551 }; 552 553 static bool mlxsw_emad_tlv_is_string_tlv(const char *tlv) 554 { 555 u8 tlv_type = mlxsw_emad_string_tlv_type_get(tlv); 556 557 return tlv_type == MLXSW_EMAD_TLV_TYPE_STRING; 558 } 559 560 static bool mlxsw_emad_tlv_is_latency_tlv(const char *tlv) 561 { 562 u8 tlv_type = mlxsw_emad_latency_tlv_type_get(tlv); 563 564 return tlv_type == MLXSW_EMAD_TLV_TYPE_LATENCY; 565 } 566 567 static void mlxsw_emad_tlv_parse(struct sk_buff *skb) 568 { 569 struct mlxsw_emad_tlv_offsets *offsets = 570 (struct mlxsw_emad_tlv_offsets *) skb->cb; 571 572 offsets->op_tlv = MLXSW_EMAD_ETH_HDR_LEN; 573 offsets->string_tlv = 0; 574 offsets->latency_tlv = 0; 575 576 offsets->reg_tlv = MLXSW_EMAD_ETH_HDR_LEN + 577 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); 578 579 /* If string TLV is present, it must come after the operation TLV. */ 580 if (mlxsw_emad_tlv_is_string_tlv(skb->data + offsets->reg_tlv)) { 581 offsets->string_tlv = offsets->reg_tlv; 582 offsets->reg_tlv += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32); 583 } 584 585 if (mlxsw_emad_tlv_is_latency_tlv(skb->data + offsets->reg_tlv)) { 586 offsets->latency_tlv = offsets->reg_tlv; 587 offsets->reg_tlv += MLXSW_EMAD_LATENCY_TLV_LEN * sizeof(u32); 588 } 589 } 590 591 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb) 592 { 593 struct mlxsw_emad_tlv_offsets *offsets = 594 (struct mlxsw_emad_tlv_offsets *) skb->cb; 595 596 return ((char *) (skb->data + offsets->op_tlv)); 597 } 598 599 static char *mlxsw_emad_string_tlv(const struct sk_buff *skb) 600 { 601 struct mlxsw_emad_tlv_offsets *offsets = 602 (struct mlxsw_emad_tlv_offsets *) skb->cb; 603 604 if (!offsets->string_tlv) 605 return NULL; 606 607 return ((char *) (skb->data + offsets->string_tlv)); 608 } 609 610 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb) 611 { 612 struct mlxsw_emad_tlv_offsets *offsets = 613 (struct mlxsw_emad_tlv_offsets *) skb->cb; 614 615 return ((char *) (skb->data + offsets->reg_tlv)); 616 } 617 618 static char *mlxsw_emad_reg_payload(const char *reg_tlv) 619 { 620 return ((char *) (reg_tlv + sizeof(u32))); 621 } 622 623 static char *mlxsw_emad_reg_payload_cmd(const char *mbox) 624 { 625 return ((char *) (mbox + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32))); 626 } 627 628 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb) 629 { 630 char *op_tlv; 631 632 op_tlv = mlxsw_emad_op_tlv(skb); 633 return mlxsw_emad_op_tlv_tid_get(op_tlv); 634 } 635 636 static bool mlxsw_emad_is_resp(const struct sk_buff *skb) 637 { 638 char *op_tlv; 639 640 op_tlv = mlxsw_emad_op_tlv(skb); 641 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE); 642 } 643 644 static int mlxsw_emad_process_status(char *op_tlv, 645 enum mlxsw_emad_op_tlv_status *p_status) 646 { 647 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv); 648 649 switch (*p_status) { 650 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: 651 return 0; 652 case MLXSW_EMAD_OP_TLV_STATUS_BUSY: 653 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: 654 return -EAGAIN; 655 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: 656 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: 657 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED: 658 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED: 659 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED: 660 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER: 661 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: 662 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: 663 default: 664 return -EIO; 665 } 666 } 667 668 static int 669 mlxsw_emad_process_status_skb(struct sk_buff *skb, 670 enum mlxsw_emad_op_tlv_status *p_status) 671 { 672 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status); 673 } 674 675 struct mlxsw_reg_trans { 676 struct list_head list; 677 struct list_head bulk_list; 678 struct mlxsw_core *core; 679 struct sk_buff *tx_skb; 680 struct mlxsw_tx_info tx_info; 681 struct delayed_work timeout_dw; 682 unsigned int retries; 683 u64 tid; 684 struct completion completion; 685 atomic_t active; 686 mlxsw_reg_trans_cb_t *cb; 687 unsigned long cb_priv; 688 const struct mlxsw_reg_info *reg; 689 enum mlxsw_core_reg_access_type type; 690 int err; 691 char *emad_err_string; 692 enum mlxsw_emad_op_tlv_status emad_status; 693 struct rcu_head rcu; 694 }; 695 696 static void mlxsw_emad_process_string_tlv(const struct sk_buff *skb, 697 struct mlxsw_reg_trans *trans) 698 { 699 char *string_tlv; 700 char *string; 701 702 string_tlv = mlxsw_emad_string_tlv(skb); 703 if (!string_tlv) 704 return; 705 706 trans->emad_err_string = kzalloc(MLXSW_EMAD_STRING_TLV_STRING_LEN, 707 GFP_ATOMIC); 708 if (!trans->emad_err_string) 709 return; 710 711 string = mlxsw_emad_string_tlv_string_data(string_tlv); 712 strscpy(trans->emad_err_string, string, 713 MLXSW_EMAD_STRING_TLV_STRING_LEN); 714 } 715 716 #define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000 717 #define MLXSW_EMAD_TIMEOUT_MS 200 718 719 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) 720 { 721 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); 722 723 if (trans->core->fw_flash_in_progress) 724 timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS); 725 726 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, 727 timeout << trans->retries); 728 } 729 730 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 731 struct mlxsw_reg_trans *trans) 732 { 733 struct sk_buff *skb; 734 int err; 735 736 skb = skb_clone(trans->tx_skb, GFP_KERNEL); 737 if (!skb) 738 return -ENOMEM; 739 740 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0, 741 skb->data + mlxsw_core->driver->txhdr_len, 742 skb->len - mlxsw_core->driver->txhdr_len); 743 744 atomic_set(&trans->active, 1); 745 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info); 746 if (err) { 747 dev_kfree_skb(skb); 748 return err; 749 } 750 mlxsw_emad_trans_timeout_schedule(trans); 751 return 0; 752 } 753 754 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err) 755 { 756 struct mlxsw_core *mlxsw_core = trans->core; 757 758 dev_kfree_skb(trans->tx_skb); 759 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 760 list_del_rcu(&trans->list); 761 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 762 trans->err = err; 763 complete(&trans->completion); 764 } 765 766 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core, 767 struct mlxsw_reg_trans *trans) 768 { 769 int err; 770 771 if (trans->retries < MLXSW_EMAD_MAX_RETRY) { 772 trans->retries++; 773 err = mlxsw_emad_transmit(trans->core, trans); 774 if (err == 0) 775 return; 776 777 if (!atomic_dec_and_test(&trans->active)) 778 return; 779 } else { 780 err = -EIO; 781 } 782 mlxsw_emad_trans_finish(trans, err); 783 } 784 785 static void mlxsw_emad_trans_timeout_work(struct work_struct *work) 786 { 787 struct mlxsw_reg_trans *trans = container_of(work, 788 struct mlxsw_reg_trans, 789 timeout_dw.work); 790 791 if (!atomic_dec_and_test(&trans->active)) 792 return; 793 794 mlxsw_emad_transmit_retry(trans->core, trans); 795 } 796 797 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core, 798 struct mlxsw_reg_trans *trans, 799 struct sk_buff *skb) 800 { 801 int err; 802 803 if (!atomic_dec_and_test(&trans->active)) 804 return; 805 806 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status); 807 if (err == -EAGAIN) { 808 mlxsw_emad_transmit_retry(mlxsw_core, trans); 809 } else { 810 if (err == 0) { 811 char *reg_tlv = mlxsw_emad_reg_tlv(skb); 812 813 if (trans->cb) 814 trans->cb(mlxsw_core, 815 mlxsw_emad_reg_payload(reg_tlv), 816 trans->reg->len, trans->cb_priv); 817 } else { 818 mlxsw_emad_process_string_tlv(skb, trans); 819 } 820 mlxsw_emad_trans_finish(trans, err); 821 } 822 } 823 824 /* called with rcu read lock held */ 825 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u16 local_port, 826 void *priv) 827 { 828 struct mlxsw_core *mlxsw_core = priv; 829 struct mlxsw_reg_trans *trans; 830 831 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0, 832 skb->data, skb->len); 833 834 mlxsw_emad_tlv_parse(skb); 835 836 if (!mlxsw_emad_is_resp(skb)) 837 goto free_skb; 838 839 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) { 840 if (mlxsw_emad_get_tid(skb) == trans->tid) { 841 mlxsw_emad_process_response(mlxsw_core, trans, skb); 842 break; 843 } 844 } 845 846 free_skb: 847 dev_kfree_skb(skb); 848 } 849 850 static const struct mlxsw_listener mlxsw_emad_rx_listener = 851 MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false, 852 EMAD, FORWARD); 853 854 static int mlxsw_emad_tlv_enable(struct mlxsw_core *mlxsw_core) 855 { 856 char mgir_pl[MLXSW_REG_MGIR_LEN]; 857 bool string_tlv, latency_tlv; 858 int err; 859 860 mlxsw_reg_mgir_pack(mgir_pl); 861 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl); 862 if (err) 863 return err; 864 865 string_tlv = mlxsw_reg_mgir_fw_info_string_tlv_get(mgir_pl); 866 mlxsw_core->emad.enable_string_tlv = string_tlv; 867 868 latency_tlv = mlxsw_reg_mgir_fw_info_latency_tlv_get(mgir_pl); 869 mlxsw_core->emad.enable_latency_tlv = latency_tlv; 870 871 return 0; 872 } 873 874 static void mlxsw_emad_tlv_disable(struct mlxsw_core *mlxsw_core) 875 { 876 mlxsw_core->emad.enable_latency_tlv = false; 877 mlxsw_core->emad.enable_string_tlv = false; 878 } 879 880 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) 881 { 882 struct workqueue_struct *emad_wq; 883 u64 tid; 884 int err; 885 886 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 887 return 0; 888 889 emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0); 890 if (!emad_wq) 891 return -ENOMEM; 892 mlxsw_core->emad_wq = emad_wq; 893 894 /* Set the upper 32 bits of the transaction ID field to a random 895 * number. This allows us to discard EMADs addressed to other 896 * devices. 897 */ 898 get_random_bytes(&tid, 4); 899 tid <<= 32; 900 atomic64_set(&mlxsw_core->emad.tid, tid); 901 902 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list); 903 spin_lock_init(&mlxsw_core->emad.trans_list_lock); 904 905 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener, 906 mlxsw_core); 907 if (err) 908 goto err_trap_register; 909 910 err = mlxsw_emad_tlv_enable(mlxsw_core); 911 if (err) 912 goto err_emad_tlv_enable; 913 914 mlxsw_core->emad.use_emad = true; 915 916 return 0; 917 918 err_emad_tlv_enable: 919 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 920 mlxsw_core); 921 err_trap_register: 922 destroy_workqueue(mlxsw_core->emad_wq); 923 return err; 924 } 925 926 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) 927 { 928 929 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 930 return; 931 932 mlxsw_core->emad.use_emad = false; 933 mlxsw_emad_tlv_disable(mlxsw_core); 934 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 935 mlxsw_core); 936 destroy_workqueue(mlxsw_core->emad_wq); 937 } 938 939 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, 940 u16 reg_len) 941 { 942 struct sk_buff *skb; 943 u16 emad_len; 944 945 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN + 946 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) * 947 sizeof(u32) + mlxsw_core->driver->txhdr_len); 948 if (mlxsw_core->emad.enable_string_tlv) 949 emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32); 950 if (mlxsw_core->emad.enable_latency_tlv) 951 emad_len += MLXSW_EMAD_LATENCY_TLV_LEN * sizeof(u32); 952 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN) 953 return NULL; 954 955 skb = netdev_alloc_skb(NULL, emad_len); 956 if (!skb) 957 return NULL; 958 memset(skb->data, 0, emad_len); 959 skb_reserve(skb, emad_len); 960 961 return skb; 962 } 963 964 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, 965 const struct mlxsw_reg_info *reg, 966 char *payload, 967 enum mlxsw_core_reg_access_type type, 968 struct mlxsw_reg_trans *trans, 969 struct list_head *bulk_list, 970 mlxsw_reg_trans_cb_t *cb, 971 unsigned long cb_priv, u64 tid) 972 { 973 struct sk_buff *skb; 974 int err; 975 976 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", 977 tid, reg->id, mlxsw_reg_id_str(reg->id), 978 mlxsw_core_reg_access_type_str(type)); 979 980 skb = mlxsw_emad_alloc(mlxsw_core, reg->len); 981 if (!skb) 982 return -ENOMEM; 983 984 list_add_tail(&trans->bulk_list, bulk_list); 985 trans->core = mlxsw_core; 986 trans->tx_skb = skb; 987 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT; 988 trans->tx_info.is_emad = true; 989 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work); 990 trans->tid = tid; 991 init_completion(&trans->completion); 992 trans->cb = cb; 993 trans->cb_priv = cb_priv; 994 trans->reg = reg; 995 trans->type = type; 996 997 mlxsw_emad_construct(mlxsw_core, skb, reg, payload, type, trans->tid); 998 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info); 999 1000 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 1001 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list); 1002 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 1003 err = mlxsw_emad_transmit(mlxsw_core, trans); 1004 if (err) 1005 goto err_out; 1006 return 0; 1007 1008 err_out: 1009 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 1010 list_del_rcu(&trans->list); 1011 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 1012 list_del(&trans->bulk_list); 1013 dev_kfree_skb(trans->tx_skb); 1014 return err; 1015 } 1016 1017 /***************** 1018 * Core functions 1019 *****************/ 1020 1021 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) 1022 { 1023 spin_lock(&mlxsw_core_driver_list_lock); 1024 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list); 1025 spin_unlock(&mlxsw_core_driver_list_lock); 1026 return 0; 1027 } 1028 EXPORT_SYMBOL(mlxsw_core_driver_register); 1029 1030 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver) 1031 { 1032 spin_lock(&mlxsw_core_driver_list_lock); 1033 list_del(&mlxsw_driver->list); 1034 spin_unlock(&mlxsw_core_driver_list_lock); 1035 } 1036 EXPORT_SYMBOL(mlxsw_core_driver_unregister); 1037 1038 static struct mlxsw_driver *__driver_find(const char *kind) 1039 { 1040 struct mlxsw_driver *mlxsw_driver; 1041 1042 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) { 1043 if (strcmp(mlxsw_driver->kind, kind) == 0) 1044 return mlxsw_driver; 1045 } 1046 return NULL; 1047 } 1048 1049 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind) 1050 { 1051 struct mlxsw_driver *mlxsw_driver; 1052 1053 spin_lock(&mlxsw_core_driver_list_lock); 1054 mlxsw_driver = __driver_find(kind); 1055 spin_unlock(&mlxsw_core_driver_list_lock); 1056 return mlxsw_driver; 1057 } 1058 1059 int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, 1060 struct mlxfw_dev *mlxfw_dev, 1061 const struct firmware *firmware, 1062 struct netlink_ext_ack *extack) 1063 { 1064 int err; 1065 1066 mlxsw_core->fw_flash_in_progress = true; 1067 err = mlxfw_firmware_flash(mlxfw_dev, firmware, extack); 1068 mlxsw_core->fw_flash_in_progress = false; 1069 1070 return err; 1071 } 1072 1073 struct mlxsw_core_fw_info { 1074 struct mlxfw_dev mlxfw_dev; 1075 struct mlxsw_core *mlxsw_core; 1076 }; 1077 1078 static int mlxsw_core_fw_component_query(struct mlxfw_dev *mlxfw_dev, 1079 u16 component_index, u32 *p_max_size, 1080 u8 *p_align_bits, u16 *p_max_write_size) 1081 { 1082 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1083 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1084 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1085 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 1086 int err; 1087 1088 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 1089 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcqi), mcqi_pl); 1090 if (err) 1091 return err; 1092 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, p_max_write_size); 1093 1094 *p_align_bits = max_t(u8, *p_align_bits, 2); 1095 *p_max_write_size = min_t(u16, *p_max_write_size, MLXSW_REG_MCDA_MAX_DATA_LEN); 1096 return 0; 1097 } 1098 1099 static int mlxsw_core_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 1100 { 1101 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1102 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1103 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1104 char mcc_pl[MLXSW_REG_MCC_LEN]; 1105 u8 control_state; 1106 int err; 1107 1108 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 1109 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1110 if (err) 1111 return err; 1112 1113 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 1114 if (control_state != MLXFW_FSM_STATE_IDLE) 1115 return -EBUSY; 1116 1117 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 0, *fwhandle, 0); 1118 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1119 } 1120 1121 static int mlxsw_core_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 1122 u16 component_index, u32 component_size) 1123 { 1124 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1125 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1126 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1127 char mcc_pl[MLXSW_REG_MCC_LEN]; 1128 1129 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 1130 component_index, fwhandle, component_size); 1131 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1132 } 1133 1134 static int mlxsw_core_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 1135 u8 *data, u16 size, u32 offset) 1136 { 1137 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1138 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1139 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1140 char mcda_pl[MLXSW_REG_MCDA_LEN]; 1141 1142 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 1143 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcda), mcda_pl); 1144 } 1145 1146 static int mlxsw_core_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 1147 u16 component_index) 1148 { 1149 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1150 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1151 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1152 char mcc_pl[MLXSW_REG_MCC_LEN]; 1153 1154 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 1155 component_index, fwhandle, 0); 1156 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1157 } 1158 1159 static int mlxsw_core_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 1160 { 1161 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1162 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1163 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1164 char mcc_pl[MLXSW_REG_MCC_LEN]; 1165 1166 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, fwhandle, 0); 1167 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1168 } 1169 1170 static int mlxsw_core_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 1171 enum mlxfw_fsm_state *fsm_state, 1172 enum mlxfw_fsm_state_err *fsm_state_err) 1173 { 1174 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1175 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1176 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1177 char mcc_pl[MLXSW_REG_MCC_LEN]; 1178 u8 control_state; 1179 u8 error_code; 1180 int err; 1181 1182 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 1183 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1184 if (err) 1185 return err; 1186 1187 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 1188 *fsm_state = control_state; 1189 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, MLXFW_FSM_STATE_ERR_MAX); 1190 return 0; 1191 } 1192 1193 static void mlxsw_core_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 1194 { 1195 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1196 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1197 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1198 char mcc_pl[MLXSW_REG_MCC_LEN]; 1199 1200 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0); 1201 mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1202 } 1203 1204 static void mlxsw_core_fw_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 1205 { 1206 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1207 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1208 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1209 char mcc_pl[MLXSW_REG_MCC_LEN]; 1210 1211 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, fwhandle, 0); 1212 mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1213 } 1214 1215 static const struct mlxfw_dev_ops mlxsw_core_fw_mlxsw_dev_ops = { 1216 .component_query = mlxsw_core_fw_component_query, 1217 .fsm_lock = mlxsw_core_fw_fsm_lock, 1218 .fsm_component_update = mlxsw_core_fw_fsm_component_update, 1219 .fsm_block_download = mlxsw_core_fw_fsm_block_download, 1220 .fsm_component_verify = mlxsw_core_fw_fsm_component_verify, 1221 .fsm_activate = mlxsw_core_fw_fsm_activate, 1222 .fsm_query_state = mlxsw_core_fw_fsm_query_state, 1223 .fsm_cancel = mlxsw_core_fw_fsm_cancel, 1224 .fsm_release = mlxsw_core_fw_fsm_release, 1225 }; 1226 1227 static int mlxsw_core_dev_fw_flash(struct mlxsw_core *mlxsw_core, 1228 const struct firmware *firmware, 1229 struct netlink_ext_ack *extack) 1230 { 1231 struct mlxsw_core_fw_info mlxsw_core_fw_info = { 1232 .mlxfw_dev = { 1233 .ops = &mlxsw_core_fw_mlxsw_dev_ops, 1234 .psid = mlxsw_core->bus_info->psid, 1235 .psid_size = strlen(mlxsw_core->bus_info->psid), 1236 .devlink = priv_to_devlink(mlxsw_core), 1237 }, 1238 .mlxsw_core = mlxsw_core 1239 }; 1240 1241 return mlxsw_core_fw_flash(mlxsw_core, &mlxsw_core_fw_info.mlxfw_dev, 1242 firmware, extack); 1243 } 1244 1245 static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core, 1246 const struct mlxsw_bus_info *mlxsw_bus_info, 1247 const struct mlxsw_fw_rev *req_rev, 1248 const char *filename) 1249 { 1250 const struct mlxsw_fw_rev *rev = &mlxsw_bus_info->fw_rev; 1251 union devlink_param_value value; 1252 const struct firmware *firmware; 1253 int err; 1254 1255 /* Don't check if driver does not require it */ 1256 if (!req_rev || !filename) 1257 return 0; 1258 1259 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 1260 err = devl_param_driverinit_value_get(priv_to_devlink(mlxsw_core), 1261 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 1262 &value); 1263 if (err) 1264 return err; 1265 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 1266 return 0; 1267 1268 /* Validate driver & FW are compatible */ 1269 if (rev->major != req_rev->major) { 1270 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 1271 rev->major, req_rev->major); 1272 return -EINVAL; 1273 } 1274 if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev)) 1275 return 0; 1276 1277 dev_err(mlxsw_bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n", 1278 rev->major, rev->minor, rev->subminor, req_rev->major, 1279 req_rev->minor, req_rev->subminor); 1280 dev_info(mlxsw_bus_info->dev, "Flashing firmware using file %s\n", filename); 1281 1282 err = request_firmware_direct(&firmware, filename, mlxsw_bus_info->dev); 1283 if (err) { 1284 dev_err(mlxsw_bus_info->dev, "Could not request firmware file %s\n", filename); 1285 return err; 1286 } 1287 1288 err = mlxsw_core_dev_fw_flash(mlxsw_core, firmware, NULL); 1289 release_firmware(firmware); 1290 if (err) 1291 dev_err(mlxsw_bus_info->dev, "Could not upgrade firmware\n"); 1292 1293 /* On FW flash success, tell the caller FW reset is needed 1294 * if current FW supports it. 1295 */ 1296 if (rev->minor >= req_rev->can_reset_minor) 1297 return err ? err : -EAGAIN; 1298 else 1299 return 0; 1300 } 1301 1302 static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core, 1303 struct devlink_flash_update_params *params, 1304 struct netlink_ext_ack *extack) 1305 { 1306 return mlxsw_core_dev_fw_flash(mlxsw_core, params->fw, extack); 1307 } 1308 1309 static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 1310 union devlink_param_value val, 1311 struct netlink_ext_ack *extack) 1312 { 1313 if (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER && 1314 val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) { 1315 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 1316 return -EINVAL; 1317 } 1318 1319 return 0; 1320 } 1321 1322 static const struct devlink_param mlxsw_core_fw_devlink_params[] = { 1323 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL, 1324 mlxsw_core_devlink_param_fw_load_policy_validate), 1325 }; 1326 1327 static int mlxsw_core_fw_params_register(struct mlxsw_core *mlxsw_core) 1328 { 1329 struct devlink *devlink = priv_to_devlink(mlxsw_core); 1330 union devlink_param_value value; 1331 int err; 1332 1333 err = devl_params_register(devlink, mlxsw_core_fw_devlink_params, 1334 ARRAY_SIZE(mlxsw_core_fw_devlink_params)); 1335 if (err) 1336 return err; 1337 1338 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 1339 devl_param_driverinit_value_set(devlink, 1340 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 1341 value); 1342 return 0; 1343 } 1344 1345 static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core) 1346 { 1347 devl_params_unregister(priv_to_devlink(mlxsw_core), mlxsw_core_fw_devlink_params, 1348 ARRAY_SIZE(mlxsw_core_fw_devlink_params)); 1349 } 1350 1351 static void *__dl_port(struct devlink_port *devlink_port) 1352 { 1353 return container_of(devlink_port, struct mlxsw_core_port, devlink_port); 1354 } 1355 1356 static int mlxsw_devlink_port_split(struct devlink *devlink, 1357 struct devlink_port *port, 1358 unsigned int count, 1359 struct netlink_ext_ack *extack) 1360 { 1361 struct mlxsw_core_port *mlxsw_core_port = __dl_port(port); 1362 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1363 1364 if (!mlxsw_core->driver->port_split) 1365 return -EOPNOTSUPP; 1366 return mlxsw_core->driver->port_split(mlxsw_core, 1367 mlxsw_core_port->local_port, 1368 count, extack); 1369 } 1370 1371 static int mlxsw_devlink_port_unsplit(struct devlink *devlink, 1372 struct devlink_port *port, 1373 struct netlink_ext_ack *extack) 1374 { 1375 struct mlxsw_core_port *mlxsw_core_port = __dl_port(port); 1376 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1377 1378 if (!mlxsw_core->driver->port_unsplit) 1379 return -EOPNOTSUPP; 1380 return mlxsw_core->driver->port_unsplit(mlxsw_core, 1381 mlxsw_core_port->local_port, 1382 extack); 1383 } 1384 1385 static int 1386 mlxsw_devlink_sb_pool_get(struct devlink *devlink, 1387 unsigned int sb_index, u16 pool_index, 1388 struct devlink_sb_pool_info *pool_info) 1389 { 1390 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1391 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1392 1393 if (!mlxsw_driver->sb_pool_get) 1394 return -EOPNOTSUPP; 1395 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index, 1396 pool_index, pool_info); 1397 } 1398 1399 static int 1400 mlxsw_devlink_sb_pool_set(struct devlink *devlink, 1401 unsigned int sb_index, u16 pool_index, u32 size, 1402 enum devlink_sb_threshold_type threshold_type, 1403 struct netlink_ext_ack *extack) 1404 { 1405 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1406 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1407 1408 if (!mlxsw_driver->sb_pool_set) 1409 return -EOPNOTSUPP; 1410 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index, 1411 pool_index, size, threshold_type, 1412 extack); 1413 } 1414 1415 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port, 1416 unsigned int sb_index, u16 pool_index, 1417 u32 *p_threshold) 1418 { 1419 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1420 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1421 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1422 1423 if (!mlxsw_driver->sb_port_pool_get || 1424 !mlxsw_core_port_check(mlxsw_core_port)) 1425 return -EOPNOTSUPP; 1426 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index, 1427 pool_index, p_threshold); 1428 } 1429 1430 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port, 1431 unsigned int sb_index, u16 pool_index, 1432 u32 threshold, 1433 struct netlink_ext_ack *extack) 1434 { 1435 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1436 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1437 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1438 1439 if (!mlxsw_driver->sb_port_pool_set || 1440 !mlxsw_core_port_check(mlxsw_core_port)) 1441 return -EOPNOTSUPP; 1442 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index, 1443 pool_index, threshold, extack); 1444 } 1445 1446 static int 1447 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port, 1448 unsigned int sb_index, u16 tc_index, 1449 enum devlink_sb_pool_type pool_type, 1450 u16 *p_pool_index, u32 *p_threshold) 1451 { 1452 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1453 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1454 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1455 1456 if (!mlxsw_driver->sb_tc_pool_bind_get || 1457 !mlxsw_core_port_check(mlxsw_core_port)) 1458 return -EOPNOTSUPP; 1459 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index, 1460 tc_index, pool_type, 1461 p_pool_index, p_threshold); 1462 } 1463 1464 static int 1465 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, 1466 unsigned int sb_index, u16 tc_index, 1467 enum devlink_sb_pool_type pool_type, 1468 u16 pool_index, u32 threshold, 1469 struct netlink_ext_ack *extack) 1470 { 1471 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1472 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1473 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1474 1475 if (!mlxsw_driver->sb_tc_pool_bind_set || 1476 !mlxsw_core_port_check(mlxsw_core_port)) 1477 return -EOPNOTSUPP; 1478 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index, 1479 tc_index, pool_type, 1480 pool_index, threshold, extack); 1481 } 1482 1483 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink, 1484 unsigned int sb_index) 1485 { 1486 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1487 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1488 1489 if (!mlxsw_driver->sb_occ_snapshot) 1490 return -EOPNOTSUPP; 1491 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index); 1492 } 1493 1494 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink, 1495 unsigned int sb_index) 1496 { 1497 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1498 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1499 1500 if (!mlxsw_driver->sb_occ_max_clear) 1501 return -EOPNOTSUPP; 1502 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index); 1503 } 1504 1505 static int 1506 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port, 1507 unsigned int sb_index, u16 pool_index, 1508 u32 *p_cur, u32 *p_max) 1509 { 1510 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1511 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1512 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1513 1514 if (!mlxsw_driver->sb_occ_port_pool_get || 1515 !mlxsw_core_port_check(mlxsw_core_port)) 1516 return -EOPNOTSUPP; 1517 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index, 1518 pool_index, p_cur, p_max); 1519 } 1520 1521 static int 1522 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, 1523 unsigned int sb_index, u16 tc_index, 1524 enum devlink_sb_pool_type pool_type, 1525 u32 *p_cur, u32 *p_max) 1526 { 1527 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1528 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1529 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1530 1531 if (!mlxsw_driver->sb_occ_tc_port_bind_get || 1532 !mlxsw_core_port_check(mlxsw_core_port)) 1533 return -EOPNOTSUPP; 1534 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port, 1535 sb_index, tc_index, 1536 pool_type, p_cur, p_max); 1537 } 1538 1539 static int 1540 mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, 1541 struct netlink_ext_ack *extack) 1542 { 1543 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1544 char fw_info_psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE]; 1545 u32 hw_rev, fw_major, fw_minor, fw_sub_minor; 1546 char mgir_pl[MLXSW_REG_MGIR_LEN]; 1547 char buf[32]; 1548 int err; 1549 1550 mlxsw_reg_mgir_pack(mgir_pl); 1551 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl); 1552 if (err) 1553 return err; 1554 mlxsw_reg_mgir_unpack(mgir_pl, &hw_rev, fw_info_psid, &fw_major, 1555 &fw_minor, &fw_sub_minor); 1556 1557 sprintf(buf, "%X", hw_rev); 1558 err = devlink_info_version_fixed_put(req, "hw.revision", buf); 1559 if (err) 1560 return err; 1561 1562 err = devlink_info_version_fixed_put(req, 1563 DEVLINK_INFO_VERSION_GENERIC_FW_PSID, 1564 fw_info_psid); 1565 if (err) 1566 return err; 1567 1568 sprintf(buf, "%d.%d.%d", fw_major, fw_minor, fw_sub_minor); 1569 err = devlink_info_version_running_put(req, "fw.version", buf); 1570 if (err) 1571 return err; 1572 1573 return devlink_info_version_running_put(req, 1574 DEVLINK_INFO_VERSION_GENERIC_FW, 1575 buf); 1576 } 1577 1578 static int 1579 mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink, 1580 bool netns_change, enum devlink_reload_action action, 1581 enum devlink_reload_limit limit, 1582 struct netlink_ext_ack *extack) 1583 { 1584 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1585 1586 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET)) 1587 return -EOPNOTSUPP; 1588 1589 mlxsw_core_bus_device_unregister(mlxsw_core, true); 1590 return 0; 1591 } 1592 1593 static int 1594 mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, enum devlink_reload_action action, 1595 enum devlink_reload_limit limit, u32 *actions_performed, 1596 struct netlink_ext_ack *extack) 1597 { 1598 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1599 int err; 1600 1601 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | 1602 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE); 1603 err = mlxsw_core_bus_device_register(mlxsw_core->bus_info, 1604 mlxsw_core->bus, 1605 mlxsw_core->bus_priv, true, 1606 devlink, extack); 1607 return err; 1608 } 1609 1610 static int mlxsw_devlink_flash_update(struct devlink *devlink, 1611 struct devlink_flash_update_params *params, 1612 struct netlink_ext_ack *extack) 1613 { 1614 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1615 1616 return mlxsw_core_fw_flash_update(mlxsw_core, params, extack); 1617 } 1618 1619 static int mlxsw_devlink_trap_init(struct devlink *devlink, 1620 const struct devlink_trap *trap, 1621 void *trap_ctx) 1622 { 1623 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1624 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1625 1626 if (!mlxsw_driver->trap_init) 1627 return -EOPNOTSUPP; 1628 return mlxsw_driver->trap_init(mlxsw_core, trap, trap_ctx); 1629 } 1630 1631 static void mlxsw_devlink_trap_fini(struct devlink *devlink, 1632 const struct devlink_trap *trap, 1633 void *trap_ctx) 1634 { 1635 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1636 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1637 1638 if (!mlxsw_driver->trap_fini) 1639 return; 1640 mlxsw_driver->trap_fini(mlxsw_core, trap, trap_ctx); 1641 } 1642 1643 static int mlxsw_devlink_trap_action_set(struct devlink *devlink, 1644 const struct devlink_trap *trap, 1645 enum devlink_trap_action action, 1646 struct netlink_ext_ack *extack) 1647 { 1648 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1649 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1650 1651 if (!mlxsw_driver->trap_action_set) 1652 return -EOPNOTSUPP; 1653 return mlxsw_driver->trap_action_set(mlxsw_core, trap, action, extack); 1654 } 1655 1656 static int 1657 mlxsw_devlink_trap_group_init(struct devlink *devlink, 1658 const struct devlink_trap_group *group) 1659 { 1660 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1661 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1662 1663 if (!mlxsw_driver->trap_group_init) 1664 return -EOPNOTSUPP; 1665 return mlxsw_driver->trap_group_init(mlxsw_core, group); 1666 } 1667 1668 static int 1669 mlxsw_devlink_trap_group_set(struct devlink *devlink, 1670 const struct devlink_trap_group *group, 1671 const struct devlink_trap_policer *policer, 1672 struct netlink_ext_ack *extack) 1673 { 1674 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1675 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1676 1677 if (!mlxsw_driver->trap_group_set) 1678 return -EOPNOTSUPP; 1679 return mlxsw_driver->trap_group_set(mlxsw_core, group, policer, extack); 1680 } 1681 1682 static int 1683 mlxsw_devlink_trap_policer_init(struct devlink *devlink, 1684 const struct devlink_trap_policer *policer) 1685 { 1686 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1687 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1688 1689 if (!mlxsw_driver->trap_policer_init) 1690 return -EOPNOTSUPP; 1691 return mlxsw_driver->trap_policer_init(mlxsw_core, policer); 1692 } 1693 1694 static void 1695 mlxsw_devlink_trap_policer_fini(struct devlink *devlink, 1696 const struct devlink_trap_policer *policer) 1697 { 1698 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1699 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1700 1701 if (!mlxsw_driver->trap_policer_fini) 1702 return; 1703 mlxsw_driver->trap_policer_fini(mlxsw_core, policer); 1704 } 1705 1706 static int 1707 mlxsw_devlink_trap_policer_set(struct devlink *devlink, 1708 const struct devlink_trap_policer *policer, 1709 u64 rate, u64 burst, 1710 struct netlink_ext_ack *extack) 1711 { 1712 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1713 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1714 1715 if (!mlxsw_driver->trap_policer_set) 1716 return -EOPNOTSUPP; 1717 return mlxsw_driver->trap_policer_set(mlxsw_core, policer, rate, burst, 1718 extack); 1719 } 1720 1721 static int 1722 mlxsw_devlink_trap_policer_counter_get(struct devlink *devlink, 1723 const struct devlink_trap_policer *policer, 1724 u64 *p_drops) 1725 { 1726 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1727 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1728 1729 if (!mlxsw_driver->trap_policer_counter_get) 1730 return -EOPNOTSUPP; 1731 return mlxsw_driver->trap_policer_counter_get(mlxsw_core, policer, 1732 p_drops); 1733 } 1734 1735 static const struct devlink_ops mlxsw_devlink_ops = { 1736 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | 1737 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), 1738 .reload_down = mlxsw_devlink_core_bus_device_reload_down, 1739 .reload_up = mlxsw_devlink_core_bus_device_reload_up, 1740 .sb_pool_get = mlxsw_devlink_sb_pool_get, 1741 .sb_pool_set = mlxsw_devlink_sb_pool_set, 1742 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get, 1743 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set, 1744 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get, 1745 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set, 1746 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot, 1747 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear, 1748 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get, 1749 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get, 1750 .info_get = mlxsw_devlink_info_get, 1751 .flash_update = mlxsw_devlink_flash_update, 1752 .trap_init = mlxsw_devlink_trap_init, 1753 .trap_fini = mlxsw_devlink_trap_fini, 1754 .trap_action_set = mlxsw_devlink_trap_action_set, 1755 .trap_group_init = mlxsw_devlink_trap_group_init, 1756 .trap_group_set = mlxsw_devlink_trap_group_set, 1757 .trap_policer_init = mlxsw_devlink_trap_policer_init, 1758 .trap_policer_fini = mlxsw_devlink_trap_policer_fini, 1759 .trap_policer_set = mlxsw_devlink_trap_policer_set, 1760 .trap_policer_counter_get = mlxsw_devlink_trap_policer_counter_get, 1761 }; 1762 1763 static int mlxsw_core_params_register(struct mlxsw_core *mlxsw_core) 1764 { 1765 return mlxsw_core_fw_params_register(mlxsw_core); 1766 } 1767 1768 static void mlxsw_core_params_unregister(struct mlxsw_core *mlxsw_core) 1769 { 1770 mlxsw_core_fw_params_unregister(mlxsw_core); 1771 } 1772 1773 struct mlxsw_core_health_event { 1774 struct mlxsw_core *mlxsw_core; 1775 char mfde_pl[MLXSW_REG_MFDE_LEN]; 1776 struct work_struct work; 1777 }; 1778 1779 static void mlxsw_core_health_event_work(struct work_struct *work) 1780 { 1781 struct mlxsw_core_health_event *event; 1782 struct mlxsw_core *mlxsw_core; 1783 1784 event = container_of(work, struct mlxsw_core_health_event, work); 1785 mlxsw_core = event->mlxsw_core; 1786 devlink_health_report(mlxsw_core->health.fw_fatal, "FW fatal event occurred", 1787 event->mfde_pl); 1788 kfree(event); 1789 } 1790 1791 static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg, 1792 char *mfde_pl, void *priv) 1793 { 1794 struct mlxsw_core_health_event *event; 1795 struct mlxsw_core *mlxsw_core = priv; 1796 1797 event = kmalloc(sizeof(*event), GFP_ATOMIC); 1798 if (!event) 1799 return; 1800 event->mlxsw_core = mlxsw_core; 1801 memcpy(event->mfde_pl, mfde_pl, sizeof(event->mfde_pl)); 1802 INIT_WORK(&event->work, mlxsw_core_health_event_work); 1803 mlxsw_core_schedule_work(&event->work); 1804 } 1805 1806 static const struct mlxsw_listener mlxsw_core_health_listener = 1807 MLXSW_CORE_EVENTL(mlxsw_core_health_listener_func, MFDE); 1808 1809 static void 1810 mlxsw_core_health_fw_fatal_dump_fatal_cause(const char *mfde_pl, 1811 struct devlink_fmsg *fmsg) 1812 { 1813 u32 val, tile_v; 1814 1815 val = mlxsw_reg_mfde_fatal_cause_id_get(mfde_pl); 1816 devlink_fmsg_u32_pair_put(fmsg, "cause_id", val); 1817 tile_v = mlxsw_reg_mfde_fatal_cause_tile_v_get(mfde_pl); 1818 if (tile_v) { 1819 val = mlxsw_reg_mfde_fatal_cause_tile_index_get(mfde_pl); 1820 devlink_fmsg_u8_pair_put(fmsg, "tile_index", val); 1821 } 1822 } 1823 1824 static void 1825 mlxsw_core_health_fw_fatal_dump_fw_assert(const char *mfde_pl, 1826 struct devlink_fmsg *fmsg) 1827 { 1828 u32 val, tile_v; 1829 1830 val = mlxsw_reg_mfde_fw_assert_var0_get(mfde_pl); 1831 devlink_fmsg_u32_pair_put(fmsg, "var0", val); 1832 val = mlxsw_reg_mfde_fw_assert_var1_get(mfde_pl); 1833 devlink_fmsg_u32_pair_put(fmsg, "var1", val); 1834 val = mlxsw_reg_mfde_fw_assert_var2_get(mfde_pl); 1835 devlink_fmsg_u32_pair_put(fmsg, "var2", val); 1836 val = mlxsw_reg_mfde_fw_assert_var3_get(mfde_pl); 1837 devlink_fmsg_u32_pair_put(fmsg, "var3", val); 1838 val = mlxsw_reg_mfde_fw_assert_var4_get(mfde_pl); 1839 devlink_fmsg_u32_pair_put(fmsg, "var4", val); 1840 val = mlxsw_reg_mfde_fw_assert_existptr_get(mfde_pl); 1841 devlink_fmsg_u32_pair_put(fmsg, "existptr", val); 1842 val = mlxsw_reg_mfde_fw_assert_callra_get(mfde_pl); 1843 devlink_fmsg_u32_pair_put(fmsg, "callra", val); 1844 val = mlxsw_reg_mfde_fw_assert_oe_get(mfde_pl); 1845 devlink_fmsg_bool_pair_put(fmsg, "old_event", val); 1846 tile_v = mlxsw_reg_mfde_fw_assert_tile_v_get(mfde_pl); 1847 if (tile_v) { 1848 val = mlxsw_reg_mfde_fw_assert_tile_index_get(mfde_pl); 1849 devlink_fmsg_u8_pair_put(fmsg, "tile_index", val); 1850 } 1851 val = mlxsw_reg_mfde_fw_assert_ext_synd_get(mfde_pl); 1852 devlink_fmsg_u32_pair_put(fmsg, "ext_synd", val); 1853 } 1854 1855 static void 1856 mlxsw_core_health_fw_fatal_dump_kvd_im_stop(const char *mfde_pl, 1857 struct devlink_fmsg *fmsg) 1858 { 1859 u32 val; 1860 1861 val = mlxsw_reg_mfde_kvd_im_stop_oe_get(mfde_pl); 1862 devlink_fmsg_bool_pair_put(fmsg, "old_event", val); 1863 val = mlxsw_reg_mfde_kvd_im_stop_pipes_mask_get(mfde_pl); 1864 devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val); 1865 } 1866 1867 static void 1868 mlxsw_core_health_fw_fatal_dump_crspace_to(const char *mfde_pl, 1869 struct devlink_fmsg *fmsg) 1870 { 1871 u32 val; 1872 1873 val = mlxsw_reg_mfde_crspace_to_log_address_get(mfde_pl); 1874 devlink_fmsg_u32_pair_put(fmsg, "log_address", val); 1875 val = mlxsw_reg_mfde_crspace_to_oe_get(mfde_pl); 1876 devlink_fmsg_bool_pair_put(fmsg, "old_event", val); 1877 val = mlxsw_reg_mfde_crspace_to_log_id_get(mfde_pl); 1878 devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val); 1879 val = mlxsw_reg_mfde_crspace_to_log_ip_get(mfde_pl); 1880 devlink_fmsg_u64_pair_put(fmsg, "log_ip", val); 1881 } 1882 1883 static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter, 1884 struct devlink_fmsg *fmsg, void *priv_ctx, 1885 struct netlink_ext_ack *extack) 1886 { 1887 char *mfde_pl = priv_ctx; 1888 char *val_str; 1889 u8 event_id; 1890 u32 val; 1891 1892 if (!priv_ctx) 1893 /* User-triggered dumps are not possible */ 1894 return -EOPNOTSUPP; 1895 1896 val = mlxsw_reg_mfde_irisc_id_get(mfde_pl); 1897 devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val); 1898 1899 devlink_fmsg_arr_pair_nest_start(fmsg, "event"); 1900 event_id = mlxsw_reg_mfde_event_id_get(mfde_pl); 1901 devlink_fmsg_u32_pair_put(fmsg, "id", event_id); 1902 switch (event_id) { 1903 case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO: 1904 val_str = "CR space timeout"; 1905 break; 1906 case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP: 1907 val_str = "KVD insertion machine stopped"; 1908 break; 1909 case MLXSW_REG_MFDE_EVENT_ID_TEST: 1910 val_str = "Test"; 1911 break; 1912 case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT: 1913 val_str = "FW assert"; 1914 break; 1915 case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE: 1916 val_str = "Fatal cause"; 1917 break; 1918 default: 1919 val_str = NULL; 1920 } 1921 if (val_str) 1922 devlink_fmsg_string_pair_put(fmsg, "desc", val_str); 1923 devlink_fmsg_arr_pair_nest_end(fmsg); 1924 1925 devlink_fmsg_arr_pair_nest_start(fmsg, "severity"); 1926 val = mlxsw_reg_mfde_severity_get(mfde_pl); 1927 devlink_fmsg_u8_pair_put(fmsg, "id", val); 1928 switch (val) { 1929 case MLXSW_REG_MFDE_SEVERITY_FATL: 1930 val_str = "Fatal"; 1931 break; 1932 case MLXSW_REG_MFDE_SEVERITY_NRML: 1933 val_str = "Normal"; 1934 break; 1935 case MLXSW_REG_MFDE_SEVERITY_INTR: 1936 val_str = "Debug"; 1937 break; 1938 default: 1939 val_str = NULL; 1940 } 1941 if (val_str) 1942 devlink_fmsg_string_pair_put(fmsg, "desc", val_str); 1943 devlink_fmsg_arr_pair_nest_end(fmsg); 1944 1945 val = mlxsw_reg_mfde_method_get(mfde_pl); 1946 switch (val) { 1947 case MLXSW_REG_MFDE_METHOD_QUERY: 1948 val_str = "query"; 1949 break; 1950 case MLXSW_REG_MFDE_METHOD_WRITE: 1951 val_str = "write"; 1952 break; 1953 default: 1954 val_str = NULL; 1955 } 1956 if (val_str) 1957 devlink_fmsg_string_pair_put(fmsg, "method", val_str); 1958 1959 val = mlxsw_reg_mfde_long_process_get(mfde_pl); 1960 devlink_fmsg_bool_pair_put(fmsg, "long_process", val); 1961 1962 val = mlxsw_reg_mfde_command_type_get(mfde_pl); 1963 switch (val) { 1964 case MLXSW_REG_MFDE_COMMAND_TYPE_MAD: 1965 val_str = "mad"; 1966 break; 1967 case MLXSW_REG_MFDE_COMMAND_TYPE_EMAD: 1968 val_str = "emad"; 1969 break; 1970 case MLXSW_REG_MFDE_COMMAND_TYPE_CMDIF: 1971 val_str = "cmdif"; 1972 break; 1973 default: 1974 val_str = NULL; 1975 } 1976 if (val_str) 1977 devlink_fmsg_string_pair_put(fmsg, "command_type", val_str); 1978 1979 val = mlxsw_reg_mfde_reg_attr_id_get(mfde_pl); 1980 devlink_fmsg_u32_pair_put(fmsg, "reg_attr_id", val); 1981 1982 switch (event_id) { 1983 case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO: 1984 mlxsw_core_health_fw_fatal_dump_crspace_to(mfde_pl, fmsg); 1985 break; 1986 case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP: 1987 mlxsw_core_health_fw_fatal_dump_kvd_im_stop(mfde_pl, fmsg); 1988 break; 1989 case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT: 1990 mlxsw_core_health_fw_fatal_dump_fw_assert(mfde_pl, fmsg); 1991 break; 1992 case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE: 1993 mlxsw_core_health_fw_fatal_dump_fatal_cause(mfde_pl, fmsg); 1994 break; 1995 } 1996 1997 return 0; 1998 } 1999 2000 static int 2001 mlxsw_core_health_fw_fatal_test(struct devlink_health_reporter *reporter, 2002 struct netlink_ext_ack *extack) 2003 { 2004 struct mlxsw_core *mlxsw_core = devlink_health_reporter_priv(reporter); 2005 char mfgd_pl[MLXSW_REG_MFGD_LEN]; 2006 int err; 2007 2008 /* Read the register first to make sure no other bits are changed. */ 2009 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl); 2010 if (err) 2011 return err; 2012 mlxsw_reg_mfgd_trigger_test_set(mfgd_pl, true); 2013 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl); 2014 } 2015 2016 static const struct devlink_health_reporter_ops 2017 mlxsw_core_health_fw_fatal_ops = { 2018 .name = "fw_fatal", 2019 .dump = mlxsw_core_health_fw_fatal_dump, 2020 .test = mlxsw_core_health_fw_fatal_test, 2021 }; 2022 2023 static int mlxsw_core_health_fw_fatal_config(struct mlxsw_core *mlxsw_core, 2024 bool enable) 2025 { 2026 char mfgd_pl[MLXSW_REG_MFGD_LEN]; 2027 int err; 2028 2029 /* Read the register first to make sure no other bits are changed. */ 2030 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl); 2031 if (err) 2032 return err; 2033 mlxsw_reg_mfgd_fatal_event_mode_set(mfgd_pl, enable); 2034 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl); 2035 } 2036 2037 static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core) 2038 { 2039 struct devlink *devlink = priv_to_devlink(mlxsw_core); 2040 struct devlink_health_reporter *fw_fatal; 2041 int err; 2042 2043 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 2044 return 0; 2045 2046 fw_fatal = devl_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops, 2047 0, mlxsw_core); 2048 if (IS_ERR(fw_fatal)) { 2049 dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter"); 2050 return PTR_ERR(fw_fatal); 2051 } 2052 mlxsw_core->health.fw_fatal = fw_fatal; 2053 2054 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core); 2055 if (err) 2056 goto err_trap_register; 2057 2058 err = mlxsw_core_health_fw_fatal_config(mlxsw_core, true); 2059 if (err) 2060 goto err_fw_fatal_config; 2061 2062 return 0; 2063 2064 err_fw_fatal_config: 2065 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core); 2066 err_trap_register: 2067 devl_health_reporter_destroy(mlxsw_core->health.fw_fatal); 2068 return err; 2069 } 2070 2071 static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core) 2072 { 2073 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 2074 return; 2075 2076 mlxsw_core_health_fw_fatal_config(mlxsw_core, false); 2077 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core); 2078 /* Make sure there is no more event work scheduled */ 2079 mlxsw_core_flush_owq(); 2080 devl_health_reporter_destroy(mlxsw_core->health.fw_fatal); 2081 } 2082 2083 static void mlxsw_core_irq_event_handler_init(struct mlxsw_core *mlxsw_core) 2084 { 2085 INIT_LIST_HEAD(&mlxsw_core->irq_event_handler_list); 2086 mutex_init(&mlxsw_core->irq_event_handler_lock); 2087 } 2088 2089 static void mlxsw_core_irq_event_handler_fini(struct mlxsw_core *mlxsw_core) 2090 { 2091 mutex_destroy(&mlxsw_core->irq_event_handler_lock); 2092 WARN_ON(!list_empty(&mlxsw_core->irq_event_handler_list)); 2093 } 2094 2095 static int 2096 __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, 2097 const struct mlxsw_bus *mlxsw_bus, 2098 void *bus_priv, bool reload, 2099 struct devlink *devlink, 2100 struct netlink_ext_ack *extack) 2101 { 2102 const char *device_kind = mlxsw_bus_info->device_kind; 2103 struct mlxsw_core *mlxsw_core; 2104 struct mlxsw_driver *mlxsw_driver; 2105 size_t alloc_size; 2106 u16 max_lag; 2107 int err; 2108 2109 mlxsw_driver = mlxsw_core_driver_get(device_kind); 2110 if (!mlxsw_driver) 2111 return -EINVAL; 2112 2113 if (!reload) { 2114 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; 2115 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size, 2116 mlxsw_bus_info->dev); 2117 if (!devlink) { 2118 err = -ENOMEM; 2119 goto err_devlink_alloc; 2120 } 2121 devl_lock(devlink); 2122 devl_register(devlink); 2123 } 2124 2125 mlxsw_core = devlink_priv(devlink); 2126 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list); 2127 INIT_LIST_HEAD(&mlxsw_core->event_listener_list); 2128 mlxsw_core->driver = mlxsw_driver; 2129 mlxsw_core->bus = mlxsw_bus; 2130 mlxsw_core->bus_priv = bus_priv; 2131 mlxsw_core->bus_info = mlxsw_bus_info; 2132 mlxsw_core_irq_event_handler_init(mlxsw_core); 2133 2134 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, 2135 &mlxsw_core->res); 2136 if (err) 2137 goto err_bus_init; 2138 2139 if (mlxsw_driver->resources_register && !reload) { 2140 err = mlxsw_driver->resources_register(mlxsw_core); 2141 if (err) 2142 goto err_register_resources; 2143 } 2144 2145 err = mlxsw_ports_init(mlxsw_core, reload); 2146 if (err) 2147 goto err_ports_init; 2148 2149 err = mlxsw_core_max_lag(mlxsw_core, &max_lag); 2150 if (!err && MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) { 2151 alloc_size = sizeof(*mlxsw_core->lag.mapping) * max_lag * 2152 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); 2153 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL); 2154 if (!mlxsw_core->lag.mapping) { 2155 err = -ENOMEM; 2156 goto err_alloc_lag_mapping; 2157 } 2158 } 2159 2160 err = mlxsw_core_trap_groups_set(mlxsw_core); 2161 if (err) 2162 goto err_trap_groups_set; 2163 2164 err = mlxsw_emad_init(mlxsw_core); 2165 if (err) 2166 goto err_emad_init; 2167 2168 if (!reload) { 2169 err = mlxsw_core_params_register(mlxsw_core); 2170 if (err) 2171 goto err_register_params; 2172 } 2173 2174 err = mlxsw_core_fw_rev_validate(mlxsw_core, mlxsw_bus_info, mlxsw_driver->fw_req_rev, 2175 mlxsw_driver->fw_filename); 2176 if (err) 2177 goto err_fw_rev_validate; 2178 2179 err = mlxsw_linecards_init(mlxsw_core, mlxsw_bus_info); 2180 if (err) 2181 goto err_linecards_init; 2182 2183 err = mlxsw_core_health_init(mlxsw_core); 2184 if (err) 2185 goto err_health_init; 2186 2187 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); 2188 if (err) 2189 goto err_hwmon_init; 2190 2191 err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info, 2192 &mlxsw_core->thermal); 2193 if (err) 2194 goto err_thermal_init; 2195 2196 err = mlxsw_env_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->env); 2197 if (err) 2198 goto err_env_init; 2199 2200 if (mlxsw_driver->init) { 2201 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack); 2202 if (err) 2203 goto err_driver_init; 2204 } 2205 2206 if (!reload) 2207 devl_unlock(devlink); 2208 return 0; 2209 2210 err_driver_init: 2211 mlxsw_env_fini(mlxsw_core->env); 2212 err_env_init: 2213 mlxsw_thermal_fini(mlxsw_core->thermal); 2214 err_thermal_init: 2215 mlxsw_hwmon_fini(mlxsw_core->hwmon); 2216 err_hwmon_init: 2217 mlxsw_core_health_fini(mlxsw_core); 2218 err_health_init: 2219 mlxsw_linecards_fini(mlxsw_core); 2220 err_linecards_init: 2221 err_fw_rev_validate: 2222 if (!reload) 2223 mlxsw_core_params_unregister(mlxsw_core); 2224 err_register_params: 2225 mlxsw_emad_fini(mlxsw_core); 2226 err_emad_init: 2227 err_trap_groups_set: 2228 kfree(mlxsw_core->lag.mapping); 2229 err_alloc_lag_mapping: 2230 mlxsw_ports_fini(mlxsw_core, reload); 2231 err_ports_init: 2232 if (!reload) 2233 devl_resources_unregister(devlink); 2234 err_register_resources: 2235 mlxsw_bus->fini(bus_priv); 2236 err_bus_init: 2237 mlxsw_core_irq_event_handler_fini(mlxsw_core); 2238 if (!reload) { 2239 devl_unregister(devlink); 2240 devl_unlock(devlink); 2241 devlink_free(devlink); 2242 } 2243 err_devlink_alloc: 2244 return err; 2245 } 2246 2247 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, 2248 const struct mlxsw_bus *mlxsw_bus, 2249 void *bus_priv, bool reload, 2250 struct devlink *devlink, 2251 struct netlink_ext_ack *extack) 2252 { 2253 bool called_again = false; 2254 int err; 2255 2256 again: 2257 err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus, 2258 bus_priv, reload, 2259 devlink, extack); 2260 /* -EAGAIN is returned in case the FW was updated. FW needs 2261 * a reset, so lets try to call __mlxsw_core_bus_device_register() 2262 * again. 2263 */ 2264 if (err == -EAGAIN && !called_again) { 2265 called_again = true; 2266 goto again; 2267 } 2268 2269 return err; 2270 } 2271 EXPORT_SYMBOL(mlxsw_core_bus_device_register); 2272 2273 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, 2274 bool reload) 2275 { 2276 struct devlink *devlink = priv_to_devlink(mlxsw_core); 2277 2278 if (!reload) 2279 devl_lock(devlink); 2280 2281 if (devlink_is_reload_failed(devlink)) { 2282 if (!reload) 2283 /* Only the parts that were not de-initialized in the 2284 * failed reload attempt need to be de-initialized. 2285 */ 2286 goto reload_fail_deinit; 2287 else 2288 return; 2289 } 2290 2291 if (mlxsw_core->driver->fini) 2292 mlxsw_core->driver->fini(mlxsw_core); 2293 mlxsw_env_fini(mlxsw_core->env); 2294 mlxsw_thermal_fini(mlxsw_core->thermal); 2295 mlxsw_hwmon_fini(mlxsw_core->hwmon); 2296 mlxsw_core_health_fini(mlxsw_core); 2297 mlxsw_linecards_fini(mlxsw_core); 2298 if (!reload) 2299 mlxsw_core_params_unregister(mlxsw_core); 2300 mlxsw_emad_fini(mlxsw_core); 2301 kfree(mlxsw_core->lag.mapping); 2302 mlxsw_ports_fini(mlxsw_core, reload); 2303 if (!reload) 2304 devl_resources_unregister(devlink); 2305 mlxsw_core->bus->fini(mlxsw_core->bus_priv); 2306 mlxsw_core_irq_event_handler_fini(mlxsw_core); 2307 if (!reload) { 2308 devl_unregister(devlink); 2309 devl_unlock(devlink); 2310 devlink_free(devlink); 2311 } 2312 2313 return; 2314 2315 reload_fail_deinit: 2316 mlxsw_core_params_unregister(mlxsw_core); 2317 devl_resources_unregister(devlink); 2318 devl_unregister(devlink); 2319 devl_unlock(devlink); 2320 devlink_free(devlink); 2321 } 2322 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); 2323 2324 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, 2325 const struct mlxsw_tx_info *tx_info) 2326 { 2327 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv, 2328 tx_info); 2329 } 2330 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); 2331 2332 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 2333 const struct mlxsw_tx_info *tx_info) 2334 { 2335 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, 2336 tx_info); 2337 } 2338 EXPORT_SYMBOL(mlxsw_core_skb_transmit); 2339 2340 void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core, 2341 struct sk_buff *skb, u16 local_port) 2342 { 2343 if (mlxsw_core->driver->ptp_transmitted) 2344 mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb, 2345 local_port); 2346 } 2347 EXPORT_SYMBOL(mlxsw_core_ptp_transmitted); 2348 2349 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a, 2350 const struct mlxsw_rx_listener *rxl_b) 2351 { 2352 return (rxl_a->func == rxl_b->func && 2353 rxl_a->local_port == rxl_b->local_port && 2354 rxl_a->trap_id == rxl_b->trap_id && 2355 rxl_a->mirror_reason == rxl_b->mirror_reason); 2356 } 2357 2358 static struct mlxsw_rx_listener_item * 2359 __find_rx_listener_item(struct mlxsw_core *mlxsw_core, 2360 const struct mlxsw_rx_listener *rxl) 2361 { 2362 struct mlxsw_rx_listener_item *rxl_item; 2363 2364 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) { 2365 if (__is_rx_listener_equal(&rxl_item->rxl, rxl)) 2366 return rxl_item; 2367 } 2368 return NULL; 2369 } 2370 2371 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core, 2372 const struct mlxsw_rx_listener *rxl, 2373 void *priv, bool enabled) 2374 { 2375 struct mlxsw_rx_listener_item *rxl_item; 2376 2377 rxl_item = __find_rx_listener_item(mlxsw_core, rxl); 2378 if (rxl_item) 2379 return -EEXIST; 2380 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL); 2381 if (!rxl_item) 2382 return -ENOMEM; 2383 rxl_item->rxl = *rxl; 2384 rxl_item->priv = priv; 2385 rxl_item->enabled = enabled; 2386 2387 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list); 2388 return 0; 2389 } 2390 EXPORT_SYMBOL(mlxsw_core_rx_listener_register); 2391 2392 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core, 2393 const struct mlxsw_rx_listener *rxl) 2394 { 2395 struct mlxsw_rx_listener_item *rxl_item; 2396 2397 rxl_item = __find_rx_listener_item(mlxsw_core, rxl); 2398 if (!rxl_item) 2399 return; 2400 list_del_rcu(&rxl_item->list); 2401 synchronize_rcu(); 2402 kfree(rxl_item); 2403 } 2404 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister); 2405 2406 static void 2407 mlxsw_core_rx_listener_state_set(struct mlxsw_core *mlxsw_core, 2408 const struct mlxsw_rx_listener *rxl, 2409 bool enabled) 2410 { 2411 struct mlxsw_rx_listener_item *rxl_item; 2412 2413 rxl_item = __find_rx_listener_item(mlxsw_core, rxl); 2414 if (WARN_ON(!rxl_item)) 2415 return; 2416 rxl_item->enabled = enabled; 2417 } 2418 2419 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u16 local_port, 2420 void *priv) 2421 { 2422 struct mlxsw_event_listener_item *event_listener_item = priv; 2423 struct mlxsw_core *mlxsw_core; 2424 struct mlxsw_reg_info reg; 2425 char *payload; 2426 char *reg_tlv; 2427 char *op_tlv; 2428 2429 mlxsw_core = event_listener_item->mlxsw_core; 2430 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0, 2431 skb->data, skb->len); 2432 2433 mlxsw_emad_tlv_parse(skb); 2434 op_tlv = mlxsw_emad_op_tlv(skb); 2435 reg_tlv = mlxsw_emad_reg_tlv(skb); 2436 2437 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv); 2438 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32); 2439 payload = mlxsw_emad_reg_payload(reg_tlv); 2440 event_listener_item->el.func(®, payload, event_listener_item->priv); 2441 dev_kfree_skb(skb); 2442 } 2443 2444 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a, 2445 const struct mlxsw_event_listener *el_b) 2446 { 2447 return (el_a->func == el_b->func && 2448 el_a->trap_id == el_b->trap_id); 2449 } 2450 2451 static struct mlxsw_event_listener_item * 2452 __find_event_listener_item(struct mlxsw_core *mlxsw_core, 2453 const struct mlxsw_event_listener *el) 2454 { 2455 struct mlxsw_event_listener_item *el_item; 2456 2457 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) { 2458 if (__is_event_listener_equal(&el_item->el, el)) 2459 return el_item; 2460 } 2461 return NULL; 2462 } 2463 2464 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core, 2465 const struct mlxsw_event_listener *el, 2466 void *priv) 2467 { 2468 int err; 2469 struct mlxsw_event_listener_item *el_item; 2470 const struct mlxsw_rx_listener rxl = { 2471 .func = mlxsw_core_event_listener_func, 2472 .local_port = MLXSW_PORT_DONT_CARE, 2473 .trap_id = el->trap_id, 2474 }; 2475 2476 el_item = __find_event_listener_item(mlxsw_core, el); 2477 if (el_item) 2478 return -EEXIST; 2479 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL); 2480 if (!el_item) 2481 return -ENOMEM; 2482 el_item->mlxsw_core = mlxsw_core; 2483 el_item->el = *el; 2484 el_item->priv = priv; 2485 2486 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item, true); 2487 if (err) 2488 goto err_rx_listener_register; 2489 2490 /* No reason to save item if we did not manage to register an RX 2491 * listener for it. 2492 */ 2493 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list); 2494 2495 return 0; 2496 2497 err_rx_listener_register: 2498 kfree(el_item); 2499 return err; 2500 } 2501 EXPORT_SYMBOL(mlxsw_core_event_listener_register); 2502 2503 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, 2504 const struct mlxsw_event_listener *el) 2505 { 2506 struct mlxsw_event_listener_item *el_item; 2507 const struct mlxsw_rx_listener rxl = { 2508 .func = mlxsw_core_event_listener_func, 2509 .local_port = MLXSW_PORT_DONT_CARE, 2510 .trap_id = el->trap_id, 2511 }; 2512 2513 el_item = __find_event_listener_item(mlxsw_core, el); 2514 if (!el_item) 2515 return; 2516 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl); 2517 list_del(&el_item->list); 2518 kfree(el_item); 2519 } 2520 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); 2521 2522 static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core, 2523 const struct mlxsw_listener *listener, 2524 void *priv, bool enabled) 2525 { 2526 if (listener->is_event) { 2527 WARN_ON(!enabled); 2528 return mlxsw_core_event_listener_register(mlxsw_core, 2529 &listener->event_listener, 2530 priv); 2531 } else { 2532 return mlxsw_core_rx_listener_register(mlxsw_core, 2533 &listener->rx_listener, 2534 priv, enabled); 2535 } 2536 } 2537 2538 static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core, 2539 const struct mlxsw_listener *listener, 2540 void *priv) 2541 { 2542 if (listener->is_event) 2543 mlxsw_core_event_listener_unregister(mlxsw_core, 2544 &listener->event_listener); 2545 else 2546 mlxsw_core_rx_listener_unregister(mlxsw_core, 2547 &listener->rx_listener); 2548 } 2549 2550 int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core, 2551 const struct mlxsw_listener *listener, void *priv) 2552 { 2553 enum mlxsw_reg_htgt_trap_group trap_group; 2554 enum mlxsw_reg_hpkt_action action; 2555 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2556 int err; 2557 2558 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 2559 return 0; 2560 2561 err = mlxsw_core_listener_register(mlxsw_core, listener, priv, 2562 listener->enabled_on_register); 2563 if (err) 2564 return err; 2565 2566 action = listener->enabled_on_register ? listener->en_action : 2567 listener->dis_action; 2568 trap_group = listener->enabled_on_register ? listener->en_trap_group : 2569 listener->dis_trap_group; 2570 mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id, 2571 trap_group, listener->is_ctrl); 2572 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 2573 if (err) 2574 goto err_trap_set; 2575 2576 return 0; 2577 2578 err_trap_set: 2579 mlxsw_core_listener_unregister(mlxsw_core, listener, priv); 2580 return err; 2581 } 2582 EXPORT_SYMBOL(mlxsw_core_trap_register); 2583 2584 void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core, 2585 const struct mlxsw_listener *listener, 2586 void *priv) 2587 { 2588 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2589 2590 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 2591 return; 2592 2593 if (!listener->is_event) { 2594 mlxsw_reg_hpkt_pack(hpkt_pl, listener->dis_action, 2595 listener->trap_id, listener->dis_trap_group, 2596 listener->is_ctrl); 2597 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 2598 } 2599 2600 mlxsw_core_listener_unregister(mlxsw_core, listener, priv); 2601 } 2602 EXPORT_SYMBOL(mlxsw_core_trap_unregister); 2603 2604 int mlxsw_core_traps_register(struct mlxsw_core *mlxsw_core, 2605 const struct mlxsw_listener *listeners, 2606 size_t listeners_count, void *priv) 2607 { 2608 int i, err; 2609 2610 for (i = 0; i < listeners_count; i++) { 2611 err = mlxsw_core_trap_register(mlxsw_core, 2612 &listeners[i], 2613 priv); 2614 if (err) 2615 goto err_listener_register; 2616 } 2617 return 0; 2618 2619 err_listener_register: 2620 for (i--; i >= 0; i--) { 2621 mlxsw_core_trap_unregister(mlxsw_core, 2622 &listeners[i], 2623 priv); 2624 } 2625 return err; 2626 } 2627 EXPORT_SYMBOL(mlxsw_core_traps_register); 2628 2629 void mlxsw_core_traps_unregister(struct mlxsw_core *mlxsw_core, 2630 const struct mlxsw_listener *listeners, 2631 size_t listeners_count, void *priv) 2632 { 2633 int i; 2634 2635 for (i = 0; i < listeners_count; i++) { 2636 mlxsw_core_trap_unregister(mlxsw_core, 2637 &listeners[i], 2638 priv); 2639 } 2640 } 2641 EXPORT_SYMBOL(mlxsw_core_traps_unregister); 2642 2643 int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core, 2644 const struct mlxsw_listener *listener, 2645 bool enabled) 2646 { 2647 enum mlxsw_reg_htgt_trap_group trap_group; 2648 enum mlxsw_reg_hpkt_action action; 2649 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2650 int err; 2651 2652 /* Not supported for event listener */ 2653 if (WARN_ON(listener->is_event)) 2654 return -EINVAL; 2655 2656 action = enabled ? listener->en_action : listener->dis_action; 2657 trap_group = enabled ? listener->en_trap_group : 2658 listener->dis_trap_group; 2659 mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id, 2660 trap_group, listener->is_ctrl); 2661 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 2662 if (err) 2663 return err; 2664 2665 mlxsw_core_rx_listener_state_set(mlxsw_core, &listener->rx_listener, 2666 enabled); 2667 return 0; 2668 } 2669 EXPORT_SYMBOL(mlxsw_core_trap_state_set); 2670 2671 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core) 2672 { 2673 return atomic64_inc_return(&mlxsw_core->emad.tid); 2674 } 2675 2676 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, 2677 const struct mlxsw_reg_info *reg, 2678 char *payload, 2679 enum mlxsw_core_reg_access_type type, 2680 struct list_head *bulk_list, 2681 mlxsw_reg_trans_cb_t *cb, 2682 unsigned long cb_priv) 2683 { 2684 u64 tid = mlxsw_core_tid_get(mlxsw_core); 2685 struct mlxsw_reg_trans *trans; 2686 int err; 2687 2688 trans = kzalloc(sizeof(*trans), GFP_KERNEL); 2689 if (!trans) 2690 return -ENOMEM; 2691 2692 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, 2693 bulk_list, cb, cb_priv, tid); 2694 if (err) { 2695 kfree_rcu(trans, rcu); 2696 return err; 2697 } 2698 return 0; 2699 } 2700 2701 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core, 2702 const struct mlxsw_reg_info *reg, char *payload, 2703 struct list_head *bulk_list, 2704 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 2705 { 2706 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 2707 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 2708 bulk_list, cb, cb_priv); 2709 } 2710 EXPORT_SYMBOL(mlxsw_reg_trans_query); 2711 2712 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core, 2713 const struct mlxsw_reg_info *reg, char *payload, 2714 struct list_head *bulk_list, 2715 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 2716 { 2717 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 2718 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 2719 bulk_list, cb, cb_priv); 2720 } 2721 EXPORT_SYMBOL(mlxsw_reg_trans_write); 2722 2723 #define MLXSW_REG_TRANS_ERR_STRING_SIZE 256 2724 2725 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans) 2726 { 2727 char err_string[MLXSW_REG_TRANS_ERR_STRING_SIZE]; 2728 struct mlxsw_core *mlxsw_core = trans->core; 2729 int err; 2730 2731 wait_for_completion(&trans->completion); 2732 cancel_delayed_work_sync(&trans->timeout_dw); 2733 err = trans->err; 2734 2735 if (trans->retries) 2736 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n", 2737 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid); 2738 if (err) { 2739 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n", 2740 trans->tid, trans->reg->id, 2741 mlxsw_reg_id_str(trans->reg->id), 2742 mlxsw_core_reg_access_type_str(trans->type), 2743 trans->emad_status, 2744 mlxsw_emad_op_tlv_status_str(trans->emad_status)); 2745 2746 snprintf(err_string, MLXSW_REG_TRANS_ERR_STRING_SIZE, 2747 "(tid=%llx,reg_id=%x(%s)) %s (%s)\n", trans->tid, 2748 trans->reg->id, mlxsw_reg_id_str(trans->reg->id), 2749 mlxsw_emad_op_tlv_status_str(trans->emad_status), 2750 trans->emad_err_string ? trans->emad_err_string : ""); 2751 2752 trace_devlink_hwerr(priv_to_devlink(mlxsw_core), 2753 trans->emad_status, err_string); 2754 2755 kfree(trans->emad_err_string); 2756 } 2757 2758 list_del(&trans->bulk_list); 2759 kfree_rcu(trans, rcu); 2760 return err; 2761 } 2762 2763 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list) 2764 { 2765 struct mlxsw_reg_trans *trans; 2766 struct mlxsw_reg_trans *tmp; 2767 int sum_err = 0; 2768 int err; 2769 2770 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) { 2771 err = mlxsw_reg_trans_wait(trans); 2772 if (err && sum_err == 0) 2773 sum_err = err; /* first error to be returned */ 2774 } 2775 return sum_err; 2776 } 2777 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait); 2778 2779 struct mlxsw_core_irq_event_handler_item { 2780 struct list_head list; 2781 void (*cb)(struct mlxsw_core *mlxsw_core); 2782 }; 2783 2784 int mlxsw_core_irq_event_handler_register(struct mlxsw_core *mlxsw_core, 2785 mlxsw_irq_event_cb_t cb) 2786 { 2787 struct mlxsw_core_irq_event_handler_item *item; 2788 2789 item = kzalloc(sizeof(*item), GFP_KERNEL); 2790 if (!item) 2791 return -ENOMEM; 2792 item->cb = cb; 2793 mutex_lock(&mlxsw_core->irq_event_handler_lock); 2794 list_add_tail(&item->list, &mlxsw_core->irq_event_handler_list); 2795 mutex_unlock(&mlxsw_core->irq_event_handler_lock); 2796 return 0; 2797 } 2798 EXPORT_SYMBOL(mlxsw_core_irq_event_handler_register); 2799 2800 void mlxsw_core_irq_event_handler_unregister(struct mlxsw_core *mlxsw_core, 2801 mlxsw_irq_event_cb_t cb) 2802 { 2803 struct mlxsw_core_irq_event_handler_item *item, *tmp; 2804 2805 mutex_lock(&mlxsw_core->irq_event_handler_lock); 2806 list_for_each_entry_safe(item, tmp, 2807 &mlxsw_core->irq_event_handler_list, list) { 2808 if (item->cb == cb) { 2809 list_del(&item->list); 2810 kfree(item); 2811 } 2812 } 2813 mutex_unlock(&mlxsw_core->irq_event_handler_lock); 2814 } 2815 EXPORT_SYMBOL(mlxsw_core_irq_event_handler_unregister); 2816 2817 void mlxsw_core_irq_event_handlers_call(struct mlxsw_core *mlxsw_core) 2818 { 2819 struct mlxsw_core_irq_event_handler_item *item; 2820 2821 mutex_lock(&mlxsw_core->irq_event_handler_lock); 2822 list_for_each_entry(item, &mlxsw_core->irq_event_handler_list, list) { 2823 if (item->cb) 2824 item->cb(mlxsw_core); 2825 } 2826 mutex_unlock(&mlxsw_core->irq_event_handler_lock); 2827 } 2828 EXPORT_SYMBOL(mlxsw_core_irq_event_handlers_call); 2829 2830 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, 2831 const struct mlxsw_reg_info *reg, 2832 char *payload, 2833 enum mlxsw_core_reg_access_type type) 2834 { 2835 enum mlxsw_emad_op_tlv_status status; 2836 int err, n_retry; 2837 bool reset_ok; 2838 char *in_mbox, *out_mbox, *tmp; 2839 2840 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n", 2841 reg->id, mlxsw_reg_id_str(reg->id), 2842 mlxsw_core_reg_access_type_str(type)); 2843 2844 in_mbox = mlxsw_cmd_mbox_alloc(); 2845 if (!in_mbox) 2846 return -ENOMEM; 2847 2848 out_mbox = mlxsw_cmd_mbox_alloc(); 2849 if (!out_mbox) { 2850 err = -ENOMEM; 2851 goto free_in_mbox; 2852 } 2853 2854 mlxsw_emad_pack_op_tlv(in_mbox, reg, type, 2855 mlxsw_core_tid_get(mlxsw_core)); 2856 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); 2857 mlxsw_emad_pack_reg_tlv(tmp, reg, payload); 2858 2859 /* There is a special treatment needed for MRSR (reset) register. 2860 * The command interface will return error after the command 2861 * is executed, so tell the lower layer to expect it 2862 * and cope accordingly. 2863 */ 2864 reset_ok = reg->id == MLXSW_REG_MRSR_ID; 2865 2866 n_retry = 0; 2867 retry: 2868 err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox); 2869 if (!err) { 2870 err = mlxsw_emad_process_status(out_mbox, &status); 2871 if (err) { 2872 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) 2873 goto retry; 2874 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n", 2875 status, mlxsw_emad_op_tlv_status_str(status)); 2876 } 2877 } 2878 2879 if (!err) 2880 memcpy(payload, mlxsw_emad_reg_payload_cmd(out_mbox), 2881 reg->len); 2882 2883 mlxsw_cmd_mbox_free(out_mbox); 2884 free_in_mbox: 2885 mlxsw_cmd_mbox_free(in_mbox); 2886 if (err) 2887 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n", 2888 reg->id, mlxsw_reg_id_str(reg->id), 2889 mlxsw_core_reg_access_type_str(type)); 2890 return err; 2891 } 2892 2893 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core, 2894 char *payload, size_t payload_len, 2895 unsigned long cb_priv) 2896 { 2897 char *orig_payload = (char *) cb_priv; 2898 2899 memcpy(orig_payload, payload, payload_len); 2900 } 2901 2902 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, 2903 const struct mlxsw_reg_info *reg, 2904 char *payload, 2905 enum mlxsw_core_reg_access_type type) 2906 { 2907 LIST_HEAD(bulk_list); 2908 int err; 2909 2910 /* During initialization EMAD interface is not available to us, 2911 * so we default to command interface. We switch to EMAD interface 2912 * after setting the appropriate traps. 2913 */ 2914 if (!mlxsw_core->emad.use_emad) 2915 return mlxsw_core_reg_access_cmd(mlxsw_core, reg, 2916 payload, type); 2917 2918 err = mlxsw_core_reg_access_emad(mlxsw_core, reg, 2919 payload, type, &bulk_list, 2920 mlxsw_core_reg_access_cb, 2921 (unsigned long) payload); 2922 if (err) 2923 return err; 2924 return mlxsw_reg_trans_bulk_wait(&bulk_list); 2925 } 2926 2927 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, 2928 const struct mlxsw_reg_info *reg, char *payload) 2929 { 2930 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 2931 MLXSW_CORE_REG_ACCESS_TYPE_QUERY); 2932 } 2933 EXPORT_SYMBOL(mlxsw_reg_query); 2934 2935 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, 2936 const struct mlxsw_reg_info *reg, char *payload) 2937 { 2938 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 2939 MLXSW_CORE_REG_ACCESS_TYPE_WRITE); 2940 } 2941 EXPORT_SYMBOL(mlxsw_reg_write); 2942 2943 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 2944 struct mlxsw_rx_info *rx_info) 2945 { 2946 struct mlxsw_rx_listener_item *rxl_item; 2947 const struct mlxsw_rx_listener *rxl; 2948 u16 local_port; 2949 bool found = false; 2950 2951 if (rx_info->is_lag) { 2952 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n", 2953 __func__, rx_info->u.lag_id, 2954 rx_info->trap_id); 2955 /* Upper layer does not care if the skb came from LAG or not, 2956 * so just get the local_port for the lag port and push it up. 2957 */ 2958 local_port = mlxsw_core_lag_mapping_get(mlxsw_core, 2959 rx_info->u.lag_id, 2960 rx_info->lag_port_index); 2961 } else { 2962 local_port = rx_info->u.sys_port; 2963 } 2964 2965 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n", 2966 __func__, local_port, rx_info->trap_id); 2967 2968 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || 2969 (local_port >= mlxsw_core->max_ports)) 2970 goto drop; 2971 2972 rcu_read_lock(); 2973 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) { 2974 rxl = &rxl_item->rxl; 2975 if ((rxl->local_port == MLXSW_PORT_DONT_CARE || 2976 rxl->local_port == local_port) && 2977 rxl->trap_id == rx_info->trap_id && 2978 rxl->mirror_reason == rx_info->mirror_reason) { 2979 if (rxl_item->enabled) 2980 found = true; 2981 break; 2982 } 2983 } 2984 if (!found) { 2985 rcu_read_unlock(); 2986 goto drop; 2987 } 2988 2989 rxl->func(skb, local_port, rxl_item->priv); 2990 rcu_read_unlock(); 2991 return; 2992 2993 drop: 2994 dev_kfree_skb(skb); 2995 } 2996 EXPORT_SYMBOL(mlxsw_core_skb_receive); 2997 2998 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core, 2999 u16 lag_id, u8 port_index) 3000 { 3001 return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id + 3002 port_index; 3003 } 3004 3005 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, 3006 u16 lag_id, u8 port_index, u16 local_port) 3007 { 3008 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 3009 lag_id, port_index); 3010 3011 mlxsw_core->lag.mapping[index] = local_port; 3012 } 3013 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set); 3014 3015 u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, 3016 u16 lag_id, u8 port_index) 3017 { 3018 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 3019 lag_id, port_index); 3020 3021 return mlxsw_core->lag.mapping[index]; 3022 } 3023 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get); 3024 3025 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, 3026 u16 lag_id, u16 local_port) 3027 { 3028 int i; 3029 3030 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) { 3031 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 3032 lag_id, i); 3033 3034 if (mlxsw_core->lag.mapping[index] == local_port) 3035 mlxsw_core->lag.mapping[index] = 0; 3036 } 3037 } 3038 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); 3039 3040 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, 3041 enum mlxsw_res_id res_id) 3042 { 3043 return mlxsw_res_valid(&mlxsw_core->res, res_id); 3044 } 3045 EXPORT_SYMBOL(mlxsw_core_res_valid); 3046 3047 u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, 3048 enum mlxsw_res_id res_id) 3049 { 3050 return mlxsw_res_get(&mlxsw_core->res, res_id); 3051 } 3052 EXPORT_SYMBOL(mlxsw_core_res_get); 3053 3054 static const struct devlink_port_ops mlxsw_devlink_port_ops = { 3055 .port_split = mlxsw_devlink_port_split, 3056 .port_unsplit = mlxsw_devlink_port_unsplit, 3057 }; 3058 3059 static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, 3060 enum devlink_port_flavour flavour, 3061 u8 slot_index, u32 port_number, bool split, 3062 u32 split_port_subnumber, 3063 bool splittable, u32 lanes, 3064 const unsigned char *switch_id, 3065 unsigned char switch_id_len) 3066 { 3067 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3068 struct mlxsw_core_port *mlxsw_core_port = 3069 &mlxsw_core->ports[local_port]; 3070 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 3071 struct devlink_port_attrs attrs = {}; 3072 int err; 3073 3074 attrs.split = split; 3075 attrs.lanes = lanes; 3076 attrs.splittable = splittable; 3077 attrs.flavour = flavour; 3078 attrs.phys.port_number = port_number; 3079 attrs.phys.split_subport_number = split_port_subnumber; 3080 memcpy(attrs.switch_id.id, switch_id, switch_id_len); 3081 attrs.switch_id.id_len = switch_id_len; 3082 mlxsw_core_port->local_port = local_port; 3083 devlink_port_attrs_set(devlink_port, &attrs); 3084 if (slot_index) { 3085 struct mlxsw_linecard *linecard; 3086 3087 linecard = mlxsw_linecard_get(mlxsw_core->linecards, 3088 slot_index); 3089 mlxsw_core_port->linecard = linecard; 3090 devlink_port_linecard_set(devlink_port, 3091 linecard->devlink_linecard); 3092 } 3093 err = devl_port_register_with_ops(devlink, devlink_port, local_port, 3094 &mlxsw_devlink_port_ops); 3095 if (err) 3096 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); 3097 return err; 3098 } 3099 3100 static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port) 3101 { 3102 struct mlxsw_core_port *mlxsw_core_port = 3103 &mlxsw_core->ports[local_port]; 3104 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 3105 3106 devl_port_unregister(devlink_port); 3107 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); 3108 } 3109 3110 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, 3111 u8 slot_index, u32 port_number, bool split, 3112 u32 split_port_subnumber, 3113 bool splittable, u32 lanes, 3114 const unsigned char *switch_id, 3115 unsigned char switch_id_len) 3116 { 3117 int err; 3118 3119 err = __mlxsw_core_port_init(mlxsw_core, local_port, 3120 DEVLINK_PORT_FLAVOUR_PHYSICAL, slot_index, 3121 port_number, split, split_port_subnumber, 3122 splittable, lanes, 3123 switch_id, switch_id_len); 3124 if (err) 3125 return err; 3126 3127 atomic_inc(&mlxsw_core->active_ports_count); 3128 return 0; 3129 } 3130 EXPORT_SYMBOL(mlxsw_core_port_init); 3131 3132 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port) 3133 { 3134 atomic_dec(&mlxsw_core->active_ports_count); 3135 3136 __mlxsw_core_port_fini(mlxsw_core, local_port); 3137 } 3138 EXPORT_SYMBOL(mlxsw_core_port_fini); 3139 3140 int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core, 3141 void *port_driver_priv, 3142 const unsigned char *switch_id, 3143 unsigned char switch_id_len) 3144 { 3145 struct mlxsw_core_port *mlxsw_core_port = 3146 &mlxsw_core->ports[MLXSW_PORT_CPU_PORT]; 3147 int err; 3148 3149 err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT, 3150 DEVLINK_PORT_FLAVOUR_CPU, 3151 0, 0, false, 0, false, 0, 3152 switch_id, switch_id_len); 3153 if (err) 3154 return err; 3155 3156 mlxsw_core_port->port_driver_priv = port_driver_priv; 3157 return 0; 3158 } 3159 EXPORT_SYMBOL(mlxsw_core_cpu_port_init); 3160 3161 void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core) 3162 { 3163 __mlxsw_core_port_fini(mlxsw_core, MLXSW_PORT_CPU_PORT); 3164 } 3165 EXPORT_SYMBOL(mlxsw_core_cpu_port_fini); 3166 3167 void mlxsw_core_port_netdev_link(struct mlxsw_core *mlxsw_core, u16 local_port, 3168 void *port_driver_priv, struct net_device *dev) 3169 { 3170 struct mlxsw_core_port *mlxsw_core_port = 3171 &mlxsw_core->ports[local_port]; 3172 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 3173 3174 mlxsw_core_port->port_driver_priv = port_driver_priv; 3175 SET_NETDEV_DEVLINK_PORT(dev, devlink_port); 3176 } 3177 EXPORT_SYMBOL(mlxsw_core_port_netdev_link); 3178 3179 struct devlink_port * 3180 mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, 3181 u16 local_port) 3182 { 3183 struct mlxsw_core_port *mlxsw_core_port = 3184 &mlxsw_core->ports[local_port]; 3185 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 3186 3187 return devlink_port; 3188 } 3189 EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get); 3190 3191 struct mlxsw_linecard * 3192 mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core, 3193 u16 local_port) 3194 { 3195 struct mlxsw_core_port *mlxsw_core_port = 3196 &mlxsw_core->ports[local_port]; 3197 3198 return mlxsw_core_port->linecard; 3199 } 3200 3201 void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core, 3202 bool (*selector)(void *priv, u16 local_port), 3203 void *priv) 3204 { 3205 if (WARN_ON_ONCE(!mlxsw_core->driver->ports_remove_selected)) 3206 return; 3207 mlxsw_core->driver->ports_remove_selected(mlxsw_core, selector, priv); 3208 } 3209 3210 struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core) 3211 { 3212 return mlxsw_core->env; 3213 } 3214 3215 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, 3216 const char *buf, size_t size) 3217 { 3218 __be32 *m = (__be32 *) buf; 3219 int i; 3220 int count = size / sizeof(__be32); 3221 3222 for (i = count - 1; i >= 0; i--) 3223 if (m[i]) 3224 break; 3225 i++; 3226 count = i ? i : 1; 3227 for (i = 0; i < count; i += 4) 3228 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", 3229 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), 3230 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); 3231 } 3232 3233 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, 3234 u32 in_mod, bool out_mbox_direct, bool reset_ok, 3235 char *in_mbox, size_t in_mbox_size, 3236 char *out_mbox, size_t out_mbox_size) 3237 { 3238 u8 status; 3239 int err; 3240 3241 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32)); 3242 if (!mlxsw_core->bus->cmd_exec) 3243 return -EOPNOTSUPP; 3244 3245 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 3246 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod); 3247 if (in_mbox) { 3248 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n"); 3249 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size); 3250 } 3251 3252 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode, 3253 opcode_mod, in_mod, out_mbox_direct, 3254 in_mbox, in_mbox_size, 3255 out_mbox, out_mbox_size, &status); 3256 3257 if (!err && out_mbox) { 3258 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); 3259 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); 3260 } 3261 3262 if (reset_ok && err == -EIO && 3263 status == MLXSW_CMD_STATUS_RUNNING_RESET) { 3264 err = 0; 3265 } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { 3266 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n", 3267 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 3268 in_mod, status, mlxsw_cmd_status_str(status)); 3269 } else if (err == -ETIMEDOUT) { 3270 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 3271 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 3272 in_mod); 3273 } 3274 3275 return err; 3276 } 3277 EXPORT_SYMBOL(mlxsw_cmd_exec); 3278 3279 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay) 3280 { 3281 return queue_delayed_work(mlxsw_wq, dwork, delay); 3282 } 3283 EXPORT_SYMBOL(mlxsw_core_schedule_dw); 3284 3285 bool mlxsw_core_schedule_work(struct work_struct *work) 3286 { 3287 return queue_work(mlxsw_owq, work); 3288 } 3289 EXPORT_SYMBOL(mlxsw_core_schedule_work); 3290 3291 void mlxsw_core_flush_owq(void) 3292 { 3293 flush_workqueue(mlxsw_owq); 3294 } 3295 EXPORT_SYMBOL(mlxsw_core_flush_owq); 3296 3297 int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3298 const struct mlxsw_config_profile *profile, 3299 u64 *p_single_size, u64 *p_double_size, 3300 u64 *p_linear_size) 3301 { 3302 struct mlxsw_driver *driver = mlxsw_core->driver; 3303 3304 if (!driver->kvd_sizes_get) 3305 return -EINVAL; 3306 3307 return driver->kvd_sizes_get(mlxsw_core, profile, 3308 p_single_size, p_double_size, 3309 p_linear_size); 3310 } 3311 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get); 3312 3313 int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox, 3314 struct mlxsw_res *res) 3315 { 3316 int index, i; 3317 u64 data; 3318 u16 id; 3319 int err; 3320 3321 mlxsw_cmd_mbox_zero(mbox); 3322 3323 for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES; 3324 index++) { 3325 err = mlxsw_cmd_query_resources(mlxsw_core, mbox, index); 3326 if (err) 3327 return err; 3328 3329 for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) { 3330 id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i); 3331 data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i); 3332 3333 if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID) 3334 return 0; 3335 3336 mlxsw_res_parse(res, id, data); 3337 } 3338 } 3339 3340 /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get 3341 * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW. 3342 */ 3343 return -EIO; 3344 } 3345 EXPORT_SYMBOL(mlxsw_core_resources_query); 3346 3347 u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core) 3348 { 3349 return mlxsw_core->bus->read_frc_h(mlxsw_core->bus_priv); 3350 } 3351 EXPORT_SYMBOL(mlxsw_core_read_frc_h); 3352 3353 u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core) 3354 { 3355 return mlxsw_core->bus->read_frc_l(mlxsw_core->bus_priv); 3356 } 3357 EXPORT_SYMBOL(mlxsw_core_read_frc_l); 3358 3359 u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core) 3360 { 3361 return mlxsw_core->bus->read_utc_sec(mlxsw_core->bus_priv); 3362 } 3363 EXPORT_SYMBOL(mlxsw_core_read_utc_sec); 3364 3365 u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core) 3366 { 3367 return mlxsw_core->bus->read_utc_nsec(mlxsw_core->bus_priv); 3368 } 3369 EXPORT_SYMBOL(mlxsw_core_read_utc_nsec); 3370 3371 bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core) 3372 { 3373 return mlxsw_core->driver->sdq_supports_cqe_v2; 3374 } 3375 EXPORT_SYMBOL(mlxsw_core_sdq_supports_cqe_v2); 3376 3377 static int __init mlxsw_core_module_init(void) 3378 { 3379 int err; 3380 3381 err = mlxsw_linecard_driver_register(); 3382 if (err) 3383 return err; 3384 3385 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0); 3386 if (!mlxsw_wq) { 3387 err = -ENOMEM; 3388 goto err_alloc_workqueue; 3389 } 3390 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0, 3391 mlxsw_core_driver_name); 3392 if (!mlxsw_owq) { 3393 err = -ENOMEM; 3394 goto err_alloc_ordered_workqueue; 3395 } 3396 return 0; 3397 3398 err_alloc_ordered_workqueue: 3399 destroy_workqueue(mlxsw_wq); 3400 err_alloc_workqueue: 3401 mlxsw_linecard_driver_unregister(); 3402 return err; 3403 } 3404 3405 static void __exit mlxsw_core_module_exit(void) 3406 { 3407 destroy_workqueue(mlxsw_owq); 3408 destroy_workqueue(mlxsw_wq); 3409 mlxsw_linecard_driver_unregister(); 3410 } 3411 3412 module_init(mlxsw_core_module_init); 3413 module_exit(mlxsw_core_module_exit); 3414 3415 MODULE_LICENSE("Dual BSD/GPL"); 3416 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 3417 MODULE_DESCRIPTION("Mellanox switch device core driver"); 3418