1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/device.h> 7 #include <linux/export.h> 8 #include <linux/err.h> 9 #include <linux/if_link.h> 10 #include <linux/netdevice.h> 11 #include <linux/completion.h> 12 #include <linux/skbuff.h> 13 #include <linux/etherdevice.h> 14 #include <linux/types.h> 15 #include <linux/string.h> 16 #include <linux/gfp.h> 17 #include <linux/random.h> 18 #include <linux/jiffies.h> 19 #include <linux/mutex.h> 20 #include <linux/rcupdate.h> 21 #include <linux/slab.h> 22 #include <linux/workqueue.h> 23 #include <linux/firmware.h> 24 #include <asm/byteorder.h> 25 #include <net/devlink.h> 26 #include <trace/events/devlink.h> 27 28 #include "core.h" 29 #include "core_env.h" 30 #include "item.h" 31 #include "cmd.h" 32 #include "port.h" 33 #include "trap.h" 34 #include "emad.h" 35 #include "reg.h" 36 #include "resources.h" 37 #include "../mlxfw/mlxfw.h" 38 #include "txheader.h" 39 40 static LIST_HEAD(mlxsw_core_driver_list); 41 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); 42 43 static const char mlxsw_core_driver_name[] = "mlxsw_core"; 44 45 static struct workqueue_struct *mlxsw_wq; 46 static struct workqueue_struct *mlxsw_owq; 47 48 struct mlxsw_core_port { 49 struct devlink_port devlink_port; 50 void *port_driver_priv; 51 u16 local_port; 52 struct mlxsw_linecard *linecard; 53 }; 54 55 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) 56 { 57 return mlxsw_core_port->port_driver_priv; 58 } 59 EXPORT_SYMBOL(mlxsw_core_port_driver_priv); 60 61 static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port) 62 { 63 return mlxsw_core_port->port_driver_priv != NULL; 64 } 65 66 struct mlxsw_core { 67 struct mlxsw_driver *driver; 68 const struct mlxsw_bus *bus; 69 void *bus_priv; 70 const struct mlxsw_bus_info *bus_info; 71 struct workqueue_struct *emad_wq; 72 struct list_head rx_listener_list; 73 struct list_head event_listener_list; 74 struct list_head irq_event_handler_list; 75 struct mutex irq_event_handler_lock; /* Locks access to handlers list */ 76 struct { 77 atomic64_t tid; 78 struct list_head trans_list; 79 spinlock_t trans_list_lock; /* protects trans_list writes */ 80 bool use_emad; 81 bool enable_string_tlv; 82 bool enable_latency_tlv; 83 } emad; 84 struct { 85 u16 *mapping; /* lag_id+port_index to local_port mapping */ 86 } lag; 87 struct mlxsw_res res; 88 struct mlxsw_hwmon *hwmon; 89 struct mlxsw_thermal *thermal; 90 struct mlxsw_linecards *linecards; 91 struct mlxsw_core_port *ports; 92 unsigned int max_ports; 93 atomic_t active_ports_count; 94 bool fw_flash_in_progress; 95 struct { 96 struct devlink_health_reporter *fw_fatal; 97 } health; 98 struct mlxsw_env *env; 99 unsigned long driver_priv[]; 100 /* driver_priv has to be always the last item */ 101 }; 102 103 struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core) 104 { 105 return mlxsw_core->linecards; 106 } 107 108 void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core, 109 struct mlxsw_linecards *linecards) 110 { 111 mlxsw_core->linecards = linecards; 112 } 113 114 #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40 115 116 static u64 mlxsw_ports_occ_get(void *priv) 117 { 118 struct mlxsw_core *mlxsw_core = priv; 119 120 return atomic_read(&mlxsw_core->active_ports_count); 121 } 122 123 static int mlxsw_core_resources_ports_register(struct mlxsw_core *mlxsw_core) 124 { 125 struct devlink *devlink = priv_to_devlink(mlxsw_core); 126 struct devlink_resource_size_params ports_num_params; 127 u32 max_ports; 128 129 max_ports = mlxsw_core->max_ports - 1; 130 devlink_resource_size_params_init(&ports_num_params, max_ports, 131 max_ports, 1, 132 DEVLINK_RESOURCE_UNIT_ENTRY); 133 134 return devl_resource_register(devlink, 135 DEVLINK_RESOURCE_GENERIC_NAME_PORTS, 136 max_ports, MLXSW_CORE_RESOURCE_PORTS, 137 DEVLINK_RESOURCE_ID_PARENT_TOP, 138 &ports_num_params); 139 } 140 141 static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload) 142 { 143 struct devlink *devlink = priv_to_devlink(mlxsw_core); 144 int err; 145 146 /* Switch ports are numbered from 1 to queried value */ 147 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT)) 148 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core, 149 MAX_SYSTEM_PORT) + 1; 150 else 151 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1; 152 153 mlxsw_core->ports = kcalloc(mlxsw_core->max_ports, 154 sizeof(struct mlxsw_core_port), GFP_KERNEL); 155 if (!mlxsw_core->ports) 156 return -ENOMEM; 157 158 if (!reload) { 159 err = mlxsw_core_resources_ports_register(mlxsw_core); 160 if (err) 161 goto err_resources_ports_register; 162 } 163 atomic_set(&mlxsw_core->active_ports_count, 0); 164 devl_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS, 165 mlxsw_ports_occ_get, mlxsw_core); 166 167 return 0; 168 169 err_resources_ports_register: 170 kfree(mlxsw_core->ports); 171 return err; 172 } 173 174 static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload) 175 { 176 struct devlink *devlink = priv_to_devlink(mlxsw_core); 177 178 devl_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS); 179 if (!reload) 180 devl_resources_unregister(priv_to_devlink(mlxsw_core)); 181 182 kfree(mlxsw_core->ports); 183 } 184 185 unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core) 186 { 187 return mlxsw_core->max_ports; 188 } 189 EXPORT_SYMBOL(mlxsw_core_max_ports); 190 191 int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag) 192 { 193 struct mlxsw_driver *driver = mlxsw_core->driver; 194 195 if (driver->profile->used_max_lag) { 196 *p_max_lag = driver->profile->max_lag; 197 return 0; 198 } 199 200 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG)) 201 return -EIO; 202 203 *p_max_lag = MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG); 204 return 0; 205 } 206 EXPORT_SYMBOL(mlxsw_core_max_lag); 207 208 enum mlxsw_cmd_mbox_config_profile_lag_mode 209 mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core) 210 { 211 return mlxsw_core->bus->lag_mode(mlxsw_core->bus_priv); 212 } 213 EXPORT_SYMBOL(mlxsw_core_lag_mode); 214 215 enum mlxsw_cmd_mbox_config_profile_flood_mode 216 mlxsw_core_flood_mode(struct mlxsw_core *mlxsw_core) 217 { 218 return mlxsw_core->bus->flood_mode(mlxsw_core->bus_priv); 219 } 220 EXPORT_SYMBOL(mlxsw_core_flood_mode); 221 222 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core) 223 { 224 return mlxsw_core->driver_priv; 225 } 226 EXPORT_SYMBOL(mlxsw_core_driver_priv); 227 228 bool 229 mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev, 230 const struct mlxsw_fw_rev *req_rev) 231 { 232 return rev->minor > req_rev->minor || 233 (rev->minor == req_rev->minor && 234 rev->subminor >= req_rev->subminor); 235 } 236 EXPORT_SYMBOL(mlxsw_core_fw_rev_minor_subminor_validate); 237 238 struct mlxsw_rx_listener_item { 239 struct list_head list; 240 struct mlxsw_rx_listener rxl; 241 void *priv; 242 bool enabled; 243 }; 244 245 struct mlxsw_event_listener_item { 246 struct list_head list; 247 struct mlxsw_core *mlxsw_core; 248 struct mlxsw_event_listener el; 249 void *priv; 250 }; 251 252 static const u8 mlxsw_core_trap_groups[] = { 253 MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 254 MLXSW_REG_HTGT_TRAP_GROUP_CORE_EVENT, 255 }; 256 257 static int mlxsw_core_trap_groups_set(struct mlxsw_core *mlxsw_core) 258 { 259 char htgt_pl[MLXSW_REG_HTGT_LEN]; 260 int err; 261 int i; 262 263 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 264 return 0; 265 266 for (i = 0; i < ARRAY_SIZE(mlxsw_core_trap_groups); i++) { 267 mlxsw_reg_htgt_pack(htgt_pl, mlxsw_core_trap_groups[i], 268 MLXSW_REG_HTGT_INVALID_POLICER, 269 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 270 MLXSW_REG_HTGT_DEFAULT_TC); 271 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 272 if (err) 273 return err; 274 } 275 return 0; 276 } 277 278 /****************** 279 * EMAD processing 280 ******************/ 281 282 /* emad_eth_hdr_dmac 283 * Destination MAC in EMAD's Ethernet header. 284 * Must be set to 01:02:c9:00:00:01 285 */ 286 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6); 287 288 /* emad_eth_hdr_smac 289 * Source MAC in EMAD's Ethernet header. 290 * Must be set to 00:02:c9:01:02:03 291 */ 292 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6); 293 294 /* emad_eth_hdr_ethertype 295 * Ethertype in EMAD's Ethernet header. 296 * Must be set to 0x8932 297 */ 298 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16); 299 300 /* emad_eth_hdr_mlx_proto 301 * Mellanox protocol. 302 * Must be set to 0x0. 303 */ 304 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8); 305 306 /* emad_eth_hdr_ver 307 * Mellanox protocol version. 308 * Must be set to 0x0. 309 */ 310 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4); 311 312 /* emad_op_tlv_type 313 * Type of the TLV. 314 * Must be set to 0x1 (operation TLV). 315 */ 316 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5); 317 318 /* emad_op_tlv_len 319 * Length of the operation TLV in u32. 320 * Must be set to 0x4. 321 */ 322 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11); 323 324 /* emad_op_tlv_dr 325 * Direct route bit. Setting to 1 indicates the EMAD is a direct route 326 * EMAD. DR TLV must follow. 327 * 328 * Note: Currently not supported and must not be set. 329 */ 330 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1); 331 332 /* emad_op_tlv_status 333 * Returned status in case of EMAD response. Must be set to 0 in case 334 * of EMAD request. 335 * 0x0 - success 336 * 0x1 - device is busy. Requester should retry 337 * 0x2 - Mellanox protocol version not supported 338 * 0x3 - unknown TLV 339 * 0x4 - register not supported 340 * 0x5 - operation class not supported 341 * 0x6 - EMAD method not supported 342 * 0x7 - bad parameter (e.g. port out of range) 343 * 0x8 - resource not available 344 * 0x9 - message receipt acknowledgment. Requester should retry 345 * 0x70 - internal error 346 */ 347 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7); 348 349 /* emad_op_tlv_register_id 350 * Register ID of register within register TLV. 351 */ 352 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16); 353 354 /* emad_op_tlv_r 355 * Response bit. Setting to 1 indicates Response, otherwise request. 356 */ 357 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1); 358 359 /* emad_op_tlv_method 360 * EMAD method type. 361 * 0x1 - query 362 * 0x2 - write 363 * 0x3 - send (currently not supported) 364 * 0x4 - event 365 */ 366 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7); 367 368 /* emad_op_tlv_class 369 * EMAD operation class. Must be set to 0x1 (REG_ACCESS). 370 */ 371 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8); 372 373 /* emad_op_tlv_tid 374 * EMAD transaction ID. Used for pairing request and response EMADs. 375 */ 376 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64); 377 378 /* emad_string_tlv_type 379 * Type of the TLV. 380 * Must be set to 0x2 (string TLV). 381 */ 382 MLXSW_ITEM32(emad, string_tlv, type, 0x00, 27, 5); 383 384 /* emad_string_tlv_len 385 * Length of the string TLV in u32. 386 */ 387 MLXSW_ITEM32(emad, string_tlv, len, 0x00, 16, 11); 388 389 #define MLXSW_EMAD_STRING_TLV_STRING_LEN 128 390 391 /* emad_string_tlv_string 392 * String provided by the device's firmware in case of erroneous register access 393 */ 394 MLXSW_ITEM_BUF(emad, string_tlv, string, 0x04, 395 MLXSW_EMAD_STRING_TLV_STRING_LEN); 396 397 /* emad_latency_tlv_type 398 * Type of the TLV. 399 * Must be set to 0x4 (latency TLV). 400 */ 401 MLXSW_ITEM32(emad, latency_tlv, type, 0x00, 27, 5); 402 403 /* emad_latency_tlv_len 404 * Length of the latency TLV in u32. 405 */ 406 MLXSW_ITEM32(emad, latency_tlv, len, 0x00, 16, 11); 407 408 /* emad_latency_tlv_latency_time 409 * EMAD latency time in units of uSec. 410 */ 411 MLXSW_ITEM32(emad, latency_tlv, latency_time, 0x04, 0, 32); 412 413 /* emad_reg_tlv_type 414 * Type of the TLV. 415 * Must be set to 0x3 (register TLV). 416 */ 417 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5); 418 419 /* emad_reg_tlv_len 420 * Length of the operation TLV in u32. 421 */ 422 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11); 423 424 /* emad_end_tlv_type 425 * Type of the TLV. 426 * Must be set to 0x0 (end TLV). 427 */ 428 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5); 429 430 /* emad_end_tlv_len 431 * Length of the end TLV in u32. 432 * Must be set to 1. 433 */ 434 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11); 435 436 enum mlxsw_core_reg_access_type { 437 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 438 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 439 }; 440 441 static inline const char * 442 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type) 443 { 444 switch (type) { 445 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY: 446 return "query"; 447 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE: 448 return "write"; 449 } 450 BUG(); 451 } 452 453 static void mlxsw_emad_pack_end_tlv(char *end_tlv) 454 { 455 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END); 456 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN); 457 } 458 459 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, 460 const struct mlxsw_reg_info *reg, 461 char *payload) 462 { 463 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG); 464 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1); 465 memcpy(reg_tlv + sizeof(u32), payload, reg->len); 466 } 467 468 static void mlxsw_emad_pack_string_tlv(char *string_tlv) 469 { 470 mlxsw_emad_string_tlv_type_set(string_tlv, MLXSW_EMAD_TLV_TYPE_STRING); 471 mlxsw_emad_string_tlv_len_set(string_tlv, MLXSW_EMAD_STRING_TLV_LEN); 472 } 473 474 static void mlxsw_emad_pack_op_tlv(char *op_tlv, 475 const struct mlxsw_reg_info *reg, 476 enum mlxsw_core_reg_access_type type, 477 u64 tid) 478 { 479 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP); 480 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN); 481 mlxsw_emad_op_tlv_dr_set(op_tlv, 0); 482 mlxsw_emad_op_tlv_status_set(op_tlv, 0); 483 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id); 484 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST); 485 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY) 486 mlxsw_emad_op_tlv_method_set(op_tlv, 487 MLXSW_EMAD_OP_TLV_METHOD_QUERY); 488 else 489 mlxsw_emad_op_tlv_method_set(op_tlv, 490 MLXSW_EMAD_OP_TLV_METHOD_WRITE); 491 mlxsw_emad_op_tlv_class_set(op_tlv, 492 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); 493 mlxsw_emad_op_tlv_tid_set(op_tlv, tid); 494 } 495 496 static void mlxsw_emad_pack_latency_tlv(char *latency_tlv) 497 { 498 mlxsw_emad_latency_tlv_type_set(latency_tlv, MLXSW_EMAD_TLV_TYPE_LATENCY); 499 mlxsw_emad_latency_tlv_len_set(latency_tlv, MLXSW_EMAD_LATENCY_TLV_LEN); 500 } 501 502 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) 503 { 504 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN); 505 506 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC); 507 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC); 508 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE); 509 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO); 510 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION); 511 512 skb_reset_mac_header(skb); 513 514 return 0; 515 } 516 517 static void mlxsw_emad_construct(const struct mlxsw_core *mlxsw_core, 518 struct sk_buff *skb, 519 const struct mlxsw_reg_info *reg, 520 char *payload, 521 enum mlxsw_core_reg_access_type type, u64 tid) 522 { 523 char *buf; 524 525 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32)); 526 mlxsw_emad_pack_end_tlv(buf); 527 528 buf = skb_push(skb, reg->len + sizeof(u32)); 529 mlxsw_emad_pack_reg_tlv(buf, reg, payload); 530 531 if (mlxsw_core->emad.enable_latency_tlv) { 532 buf = skb_push(skb, MLXSW_EMAD_LATENCY_TLV_LEN * sizeof(u32)); 533 mlxsw_emad_pack_latency_tlv(buf); 534 } 535 536 if (mlxsw_core->emad.enable_string_tlv) { 537 buf = skb_push(skb, MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32)); 538 mlxsw_emad_pack_string_tlv(buf); 539 } 540 541 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); 542 mlxsw_emad_pack_op_tlv(buf, reg, type, tid); 543 544 mlxsw_emad_construct_eth_hdr(skb); 545 } 546 547 struct mlxsw_emad_tlv_offsets { 548 u16 op_tlv; 549 u16 string_tlv; 550 u16 latency_tlv; 551 u16 reg_tlv; 552 }; 553 554 static bool mlxsw_emad_tlv_is_string_tlv(const char *tlv) 555 { 556 u8 tlv_type = mlxsw_emad_string_tlv_type_get(tlv); 557 558 return tlv_type == MLXSW_EMAD_TLV_TYPE_STRING; 559 } 560 561 static bool mlxsw_emad_tlv_is_latency_tlv(const char *tlv) 562 { 563 u8 tlv_type = mlxsw_emad_latency_tlv_type_get(tlv); 564 565 return tlv_type == MLXSW_EMAD_TLV_TYPE_LATENCY; 566 } 567 568 static void mlxsw_emad_tlv_parse(struct sk_buff *skb) 569 { 570 struct mlxsw_emad_tlv_offsets *offsets = 571 (struct mlxsw_emad_tlv_offsets *) skb->cb; 572 573 offsets->op_tlv = MLXSW_EMAD_ETH_HDR_LEN; 574 offsets->string_tlv = 0; 575 offsets->latency_tlv = 0; 576 577 offsets->reg_tlv = MLXSW_EMAD_ETH_HDR_LEN + 578 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); 579 580 /* If string TLV is present, it must come after the operation TLV. */ 581 if (mlxsw_emad_tlv_is_string_tlv(skb->data + offsets->reg_tlv)) { 582 offsets->string_tlv = offsets->reg_tlv; 583 offsets->reg_tlv += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32); 584 } 585 586 if (mlxsw_emad_tlv_is_latency_tlv(skb->data + offsets->reg_tlv)) { 587 offsets->latency_tlv = offsets->reg_tlv; 588 offsets->reg_tlv += MLXSW_EMAD_LATENCY_TLV_LEN * sizeof(u32); 589 } 590 } 591 592 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb) 593 { 594 struct mlxsw_emad_tlv_offsets *offsets = 595 (struct mlxsw_emad_tlv_offsets *) skb->cb; 596 597 return ((char *) (skb->data + offsets->op_tlv)); 598 } 599 600 static char *mlxsw_emad_string_tlv(const struct sk_buff *skb) 601 { 602 struct mlxsw_emad_tlv_offsets *offsets = 603 (struct mlxsw_emad_tlv_offsets *) skb->cb; 604 605 if (!offsets->string_tlv) 606 return NULL; 607 608 return ((char *) (skb->data + offsets->string_tlv)); 609 } 610 611 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb) 612 { 613 struct mlxsw_emad_tlv_offsets *offsets = 614 (struct mlxsw_emad_tlv_offsets *) skb->cb; 615 616 return ((char *) (skb->data + offsets->reg_tlv)); 617 } 618 619 static char *mlxsw_emad_reg_payload(const char *reg_tlv) 620 { 621 return ((char *) (reg_tlv + sizeof(u32))); 622 } 623 624 static char *mlxsw_emad_reg_payload_cmd(const char *mbox) 625 { 626 return ((char *) (mbox + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32))); 627 } 628 629 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb) 630 { 631 char *op_tlv; 632 633 op_tlv = mlxsw_emad_op_tlv(skb); 634 return mlxsw_emad_op_tlv_tid_get(op_tlv); 635 } 636 637 static bool mlxsw_emad_is_resp(const struct sk_buff *skb) 638 { 639 char *op_tlv; 640 641 op_tlv = mlxsw_emad_op_tlv(skb); 642 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE); 643 } 644 645 static int mlxsw_emad_process_status(char *op_tlv, 646 enum mlxsw_emad_op_tlv_status *p_status) 647 { 648 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv); 649 650 switch (*p_status) { 651 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: 652 return 0; 653 case MLXSW_EMAD_OP_TLV_STATUS_BUSY: 654 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: 655 return -EAGAIN; 656 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: 657 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: 658 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED: 659 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED: 660 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED: 661 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER: 662 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: 663 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: 664 default: 665 return -EIO; 666 } 667 } 668 669 static int 670 mlxsw_emad_process_status_skb(struct sk_buff *skb, 671 enum mlxsw_emad_op_tlv_status *p_status) 672 { 673 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status); 674 } 675 676 struct mlxsw_reg_trans { 677 struct list_head list; 678 struct list_head bulk_list; 679 struct mlxsw_core *core; 680 struct sk_buff *tx_skb; 681 struct mlxsw_txhdr_info txhdr_info; 682 struct delayed_work timeout_dw; 683 unsigned int retries; 684 u64 tid; 685 struct completion completion; 686 atomic_t active; 687 mlxsw_reg_trans_cb_t *cb; 688 unsigned long cb_priv; 689 const struct mlxsw_reg_info *reg; 690 enum mlxsw_core_reg_access_type type; 691 int err; 692 char *emad_err_string; 693 enum mlxsw_emad_op_tlv_status emad_status; 694 struct rcu_head rcu; 695 }; 696 697 static void mlxsw_emad_process_string_tlv(const struct sk_buff *skb, 698 struct mlxsw_reg_trans *trans) 699 { 700 char *string_tlv; 701 char *string; 702 703 string_tlv = mlxsw_emad_string_tlv(skb); 704 if (!string_tlv) 705 return; 706 707 trans->emad_err_string = kzalloc(MLXSW_EMAD_STRING_TLV_STRING_LEN, 708 GFP_ATOMIC); 709 if (!trans->emad_err_string) 710 return; 711 712 string = mlxsw_emad_string_tlv_string_data(string_tlv); 713 strscpy(trans->emad_err_string, string, 714 MLXSW_EMAD_STRING_TLV_STRING_LEN); 715 } 716 717 #define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000 718 #define MLXSW_EMAD_TIMEOUT_MS 200 719 720 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) 721 { 722 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); 723 724 if (trans->core->fw_flash_in_progress) 725 timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS); 726 727 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, 728 timeout << trans->retries); 729 } 730 731 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 732 struct mlxsw_reg_trans *trans) 733 { 734 struct sk_buff *skb; 735 int err; 736 737 skb = skb_clone(trans->tx_skb, GFP_KERNEL); 738 if (!skb) 739 return -ENOMEM; 740 741 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0, skb->data, 742 skb->len); 743 744 atomic_set(&trans->active, 1); 745 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->txhdr_info); 746 if (err) { 747 dev_kfree_skb(skb); 748 return err; 749 } 750 mlxsw_emad_trans_timeout_schedule(trans); 751 return 0; 752 } 753 754 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err) 755 { 756 struct mlxsw_core *mlxsw_core = trans->core; 757 758 dev_kfree_skb(trans->tx_skb); 759 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 760 list_del_rcu(&trans->list); 761 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 762 trans->err = err; 763 complete(&trans->completion); 764 } 765 766 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core, 767 struct mlxsw_reg_trans *trans) 768 { 769 int err; 770 771 if (trans->retries < MLXSW_EMAD_MAX_RETRY) { 772 trans->retries++; 773 err = mlxsw_emad_transmit(trans->core, trans); 774 if (err == 0) 775 return; 776 777 if (!atomic_dec_and_test(&trans->active)) 778 return; 779 } else { 780 err = -EIO; 781 } 782 mlxsw_emad_trans_finish(trans, err); 783 } 784 785 static void mlxsw_emad_trans_timeout_work(struct work_struct *work) 786 { 787 struct mlxsw_reg_trans *trans = container_of(work, 788 struct mlxsw_reg_trans, 789 timeout_dw.work); 790 791 if (!atomic_dec_and_test(&trans->active)) 792 return; 793 794 mlxsw_emad_transmit_retry(trans->core, trans); 795 } 796 797 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core, 798 struct mlxsw_reg_trans *trans, 799 struct sk_buff *skb) 800 { 801 int err; 802 803 if (!atomic_dec_and_test(&trans->active)) 804 return; 805 806 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status); 807 if (err == -EAGAIN) { 808 mlxsw_emad_transmit_retry(mlxsw_core, trans); 809 } else { 810 if (err == 0) { 811 char *reg_tlv = mlxsw_emad_reg_tlv(skb); 812 813 if (trans->cb) 814 trans->cb(mlxsw_core, 815 mlxsw_emad_reg_payload(reg_tlv), 816 trans->reg->len, trans->cb_priv); 817 } else { 818 mlxsw_emad_process_string_tlv(skb, trans); 819 } 820 mlxsw_emad_trans_finish(trans, err); 821 } 822 } 823 824 /* called with rcu read lock held */ 825 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u16 local_port, 826 void *priv) 827 { 828 struct mlxsw_core *mlxsw_core = priv; 829 struct mlxsw_reg_trans *trans; 830 831 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0, 832 skb->data, skb->len); 833 834 mlxsw_emad_tlv_parse(skb); 835 836 if (!mlxsw_emad_is_resp(skb)) 837 goto free_skb; 838 839 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) { 840 if (mlxsw_emad_get_tid(skb) == trans->tid) { 841 mlxsw_emad_process_response(mlxsw_core, trans, skb); 842 break; 843 } 844 } 845 846 free_skb: 847 dev_kfree_skb(skb); 848 } 849 850 static const struct mlxsw_listener mlxsw_emad_rx_listener = 851 MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false, 852 EMAD, FORWARD); 853 854 static int mlxsw_emad_tlv_enable(struct mlxsw_core *mlxsw_core) 855 { 856 char mgir_pl[MLXSW_REG_MGIR_LEN]; 857 bool string_tlv, latency_tlv; 858 int err; 859 860 mlxsw_reg_mgir_pack(mgir_pl); 861 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl); 862 if (err) 863 return err; 864 865 string_tlv = mlxsw_reg_mgir_fw_info_string_tlv_get(mgir_pl); 866 mlxsw_core->emad.enable_string_tlv = string_tlv; 867 868 latency_tlv = mlxsw_reg_mgir_fw_info_latency_tlv_get(mgir_pl); 869 mlxsw_core->emad.enable_latency_tlv = latency_tlv; 870 871 return 0; 872 } 873 874 static void mlxsw_emad_tlv_disable(struct mlxsw_core *mlxsw_core) 875 { 876 mlxsw_core->emad.enable_latency_tlv = false; 877 mlxsw_core->emad.enable_string_tlv = false; 878 } 879 880 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) 881 { 882 struct workqueue_struct *emad_wq; 883 u64 tid; 884 int err; 885 886 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 887 return 0; 888 889 emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0); 890 if (!emad_wq) 891 return -ENOMEM; 892 mlxsw_core->emad_wq = emad_wq; 893 894 /* Set the upper 32 bits of the transaction ID field to a random 895 * number. This allows us to discard EMADs addressed to other 896 * devices. 897 */ 898 get_random_bytes(&tid, 4); 899 tid <<= 32; 900 atomic64_set(&mlxsw_core->emad.tid, tid); 901 902 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list); 903 spin_lock_init(&mlxsw_core->emad.trans_list_lock); 904 905 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener, 906 mlxsw_core); 907 if (err) 908 goto err_trap_register; 909 910 err = mlxsw_emad_tlv_enable(mlxsw_core); 911 if (err) 912 goto err_emad_tlv_enable; 913 914 mlxsw_core->emad.use_emad = true; 915 916 return 0; 917 918 err_emad_tlv_enable: 919 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 920 mlxsw_core); 921 err_trap_register: 922 destroy_workqueue(mlxsw_core->emad_wq); 923 return err; 924 } 925 926 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) 927 { 928 929 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 930 return; 931 932 mlxsw_core->emad.use_emad = false; 933 mlxsw_emad_tlv_disable(mlxsw_core); 934 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 935 mlxsw_core); 936 destroy_workqueue(mlxsw_core->emad_wq); 937 } 938 939 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, 940 u16 reg_len) 941 { 942 struct sk_buff *skb; 943 u16 emad_len; 944 945 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN + 946 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) * 947 sizeof(u32) + MLXSW_TXHDR_LEN); 948 if (mlxsw_core->emad.enable_string_tlv) 949 emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32); 950 if (mlxsw_core->emad.enable_latency_tlv) 951 emad_len += MLXSW_EMAD_LATENCY_TLV_LEN * sizeof(u32); 952 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN) 953 return NULL; 954 955 skb = netdev_alloc_skb(NULL, emad_len); 956 if (!skb) 957 return NULL; 958 memset(skb->data, 0, emad_len); 959 skb_reserve(skb, emad_len); 960 961 return skb; 962 } 963 964 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, 965 const struct mlxsw_reg_info *reg, 966 char *payload, 967 enum mlxsw_core_reg_access_type type, 968 struct mlxsw_reg_trans *trans, 969 struct list_head *bulk_list, 970 mlxsw_reg_trans_cb_t *cb, 971 unsigned long cb_priv, u64 tid) 972 { 973 struct sk_buff *skb; 974 int err; 975 976 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", 977 tid, reg->id, mlxsw_reg_id_str(reg->id), 978 mlxsw_core_reg_access_type_str(type)); 979 980 skb = mlxsw_emad_alloc(mlxsw_core, reg->len); 981 if (!skb) 982 return -ENOMEM; 983 984 list_add_tail(&trans->bulk_list, bulk_list); 985 trans->core = mlxsw_core; 986 trans->tx_skb = skb; 987 trans->txhdr_info.tx_info.local_port = MLXSW_PORT_CPU_PORT; 988 trans->txhdr_info.tx_info.is_emad = true; 989 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work); 990 trans->tid = tid; 991 init_completion(&trans->completion); 992 trans->cb = cb; 993 trans->cb_priv = cb_priv; 994 trans->reg = reg; 995 trans->type = type; 996 997 mlxsw_emad_construct(mlxsw_core, skb, reg, payload, type, trans->tid); 998 999 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 1000 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list); 1001 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 1002 err = mlxsw_emad_transmit(mlxsw_core, trans); 1003 if (err) 1004 goto err_out; 1005 return 0; 1006 1007 err_out: 1008 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 1009 list_del_rcu(&trans->list); 1010 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 1011 list_del(&trans->bulk_list); 1012 dev_kfree_skb(trans->tx_skb); 1013 return err; 1014 } 1015 1016 /***************** 1017 * Core functions 1018 *****************/ 1019 1020 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) 1021 { 1022 spin_lock(&mlxsw_core_driver_list_lock); 1023 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list); 1024 spin_unlock(&mlxsw_core_driver_list_lock); 1025 return 0; 1026 } 1027 EXPORT_SYMBOL(mlxsw_core_driver_register); 1028 1029 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver) 1030 { 1031 spin_lock(&mlxsw_core_driver_list_lock); 1032 list_del(&mlxsw_driver->list); 1033 spin_unlock(&mlxsw_core_driver_list_lock); 1034 } 1035 EXPORT_SYMBOL(mlxsw_core_driver_unregister); 1036 1037 static struct mlxsw_driver *__driver_find(const char *kind) 1038 { 1039 struct mlxsw_driver *mlxsw_driver; 1040 1041 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) { 1042 if (strcmp(mlxsw_driver->kind, kind) == 0) 1043 return mlxsw_driver; 1044 } 1045 return NULL; 1046 } 1047 1048 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind) 1049 { 1050 struct mlxsw_driver *mlxsw_driver; 1051 1052 spin_lock(&mlxsw_core_driver_list_lock); 1053 mlxsw_driver = __driver_find(kind); 1054 spin_unlock(&mlxsw_core_driver_list_lock); 1055 return mlxsw_driver; 1056 } 1057 1058 int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, 1059 struct mlxfw_dev *mlxfw_dev, 1060 const struct firmware *firmware, 1061 struct netlink_ext_ack *extack) 1062 { 1063 int err; 1064 1065 mlxsw_core->fw_flash_in_progress = true; 1066 err = mlxfw_firmware_flash(mlxfw_dev, firmware, extack); 1067 mlxsw_core->fw_flash_in_progress = false; 1068 1069 return err; 1070 } 1071 1072 struct mlxsw_core_fw_info { 1073 struct mlxfw_dev mlxfw_dev; 1074 struct mlxsw_core *mlxsw_core; 1075 }; 1076 1077 static int mlxsw_core_fw_component_query(struct mlxfw_dev *mlxfw_dev, 1078 u16 component_index, u32 *p_max_size, 1079 u8 *p_align_bits, u16 *p_max_write_size) 1080 { 1081 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1082 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1083 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1084 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 1085 int err; 1086 1087 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 1088 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcqi), mcqi_pl); 1089 if (err) 1090 return err; 1091 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, p_max_write_size); 1092 1093 *p_align_bits = max_t(u8, *p_align_bits, 2); 1094 *p_max_write_size = min_t(u16, *p_max_write_size, MLXSW_REG_MCDA_MAX_DATA_LEN); 1095 return 0; 1096 } 1097 1098 static int mlxsw_core_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 1099 { 1100 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1101 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1102 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1103 char mcc_pl[MLXSW_REG_MCC_LEN]; 1104 u8 control_state; 1105 int err; 1106 1107 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 1108 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1109 if (err) 1110 return err; 1111 1112 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 1113 if (control_state != MLXFW_FSM_STATE_IDLE) 1114 return -EBUSY; 1115 1116 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 0, *fwhandle, 0); 1117 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1118 } 1119 1120 static int mlxsw_core_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 1121 u16 component_index, u32 component_size) 1122 { 1123 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1124 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1125 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1126 char mcc_pl[MLXSW_REG_MCC_LEN]; 1127 1128 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 1129 component_index, fwhandle, component_size); 1130 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1131 } 1132 1133 static int mlxsw_core_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 1134 u8 *data, u16 size, u32 offset) 1135 { 1136 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1137 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1138 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1139 char mcda_pl[MLXSW_REG_MCDA_LEN]; 1140 1141 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 1142 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcda), mcda_pl); 1143 } 1144 1145 static int mlxsw_core_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 1146 u16 component_index) 1147 { 1148 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1149 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1150 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1151 char mcc_pl[MLXSW_REG_MCC_LEN]; 1152 1153 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 1154 component_index, fwhandle, 0); 1155 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1156 } 1157 1158 static int mlxsw_core_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 1159 { 1160 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1161 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1162 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1163 char mcc_pl[MLXSW_REG_MCC_LEN]; 1164 1165 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, fwhandle, 0); 1166 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1167 } 1168 1169 static int mlxsw_core_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 1170 enum mlxfw_fsm_state *fsm_state, 1171 enum mlxfw_fsm_state_err *fsm_state_err) 1172 { 1173 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1174 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1175 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1176 char mcc_pl[MLXSW_REG_MCC_LEN]; 1177 u8 control_state; 1178 u8 error_code; 1179 int err; 1180 1181 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 1182 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1183 if (err) 1184 return err; 1185 1186 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 1187 *fsm_state = control_state; 1188 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, MLXFW_FSM_STATE_ERR_MAX); 1189 return 0; 1190 } 1191 1192 static void mlxsw_core_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 1193 { 1194 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1195 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1196 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1197 char mcc_pl[MLXSW_REG_MCC_LEN]; 1198 1199 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0); 1200 mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1201 } 1202 1203 static void mlxsw_core_fw_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 1204 { 1205 struct mlxsw_core_fw_info *mlxsw_core_fw_info = 1206 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); 1207 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; 1208 char mcc_pl[MLXSW_REG_MCC_LEN]; 1209 1210 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, fwhandle, 0); 1211 mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl); 1212 } 1213 1214 static const struct mlxfw_dev_ops mlxsw_core_fw_mlxsw_dev_ops = { 1215 .component_query = mlxsw_core_fw_component_query, 1216 .fsm_lock = mlxsw_core_fw_fsm_lock, 1217 .fsm_component_update = mlxsw_core_fw_fsm_component_update, 1218 .fsm_block_download = mlxsw_core_fw_fsm_block_download, 1219 .fsm_component_verify = mlxsw_core_fw_fsm_component_verify, 1220 .fsm_activate = mlxsw_core_fw_fsm_activate, 1221 .fsm_query_state = mlxsw_core_fw_fsm_query_state, 1222 .fsm_cancel = mlxsw_core_fw_fsm_cancel, 1223 .fsm_release = mlxsw_core_fw_fsm_release, 1224 }; 1225 1226 static int mlxsw_core_dev_fw_flash(struct mlxsw_core *mlxsw_core, 1227 const struct firmware *firmware, 1228 struct netlink_ext_ack *extack) 1229 { 1230 struct mlxsw_core_fw_info mlxsw_core_fw_info = { 1231 .mlxfw_dev = { 1232 .ops = &mlxsw_core_fw_mlxsw_dev_ops, 1233 .psid = mlxsw_core->bus_info->psid, 1234 .psid_size = strlen(mlxsw_core->bus_info->psid), 1235 .devlink = priv_to_devlink(mlxsw_core), 1236 }, 1237 .mlxsw_core = mlxsw_core 1238 }; 1239 1240 return mlxsw_core_fw_flash(mlxsw_core, &mlxsw_core_fw_info.mlxfw_dev, 1241 firmware, extack); 1242 } 1243 1244 static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core, 1245 const struct mlxsw_bus_info *mlxsw_bus_info, 1246 const struct mlxsw_fw_rev *req_rev, 1247 const char *filename) 1248 { 1249 const struct mlxsw_fw_rev *rev = &mlxsw_bus_info->fw_rev; 1250 union devlink_param_value value; 1251 const struct firmware *firmware; 1252 int err; 1253 1254 /* Don't check if driver does not require it */ 1255 if (!req_rev || !filename) 1256 return 0; 1257 1258 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 1259 err = devl_param_driverinit_value_get(priv_to_devlink(mlxsw_core), 1260 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 1261 &value); 1262 if (err) 1263 return err; 1264 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 1265 return 0; 1266 1267 /* Validate driver & FW are compatible */ 1268 if (rev->major != req_rev->major) { 1269 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 1270 rev->major, req_rev->major); 1271 return -EINVAL; 1272 } 1273 if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev)) 1274 return 0; 1275 1276 dev_err(mlxsw_bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n", 1277 rev->major, rev->minor, rev->subminor, req_rev->major, 1278 req_rev->minor, req_rev->subminor); 1279 dev_info(mlxsw_bus_info->dev, "Flashing firmware using file %s\n", filename); 1280 1281 err = request_firmware_direct(&firmware, filename, mlxsw_bus_info->dev); 1282 if (err) { 1283 dev_err(mlxsw_bus_info->dev, "Could not request firmware file %s\n", filename); 1284 return err; 1285 } 1286 1287 err = mlxsw_core_dev_fw_flash(mlxsw_core, firmware, NULL); 1288 release_firmware(firmware); 1289 if (err) 1290 dev_err(mlxsw_bus_info->dev, "Could not upgrade firmware\n"); 1291 1292 /* On FW flash success, tell the caller FW reset is needed 1293 * if current FW supports it. 1294 */ 1295 if (rev->minor >= req_rev->can_reset_minor) 1296 return err ? err : -EAGAIN; 1297 else 1298 return 0; 1299 } 1300 1301 static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core, 1302 struct devlink_flash_update_params *params, 1303 struct netlink_ext_ack *extack) 1304 { 1305 return mlxsw_core_dev_fw_flash(mlxsw_core, params->fw, extack); 1306 } 1307 1308 static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 1309 union devlink_param_value val, 1310 struct netlink_ext_ack *extack) 1311 { 1312 if (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER && 1313 val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) { 1314 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 1315 return -EINVAL; 1316 } 1317 1318 return 0; 1319 } 1320 1321 static const struct devlink_param mlxsw_core_fw_devlink_params[] = { 1322 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL, 1323 mlxsw_core_devlink_param_fw_load_policy_validate), 1324 }; 1325 1326 static int mlxsw_core_fw_params_register(struct mlxsw_core *mlxsw_core) 1327 { 1328 struct devlink *devlink = priv_to_devlink(mlxsw_core); 1329 union devlink_param_value value; 1330 int err; 1331 1332 err = devl_params_register(devlink, mlxsw_core_fw_devlink_params, 1333 ARRAY_SIZE(mlxsw_core_fw_devlink_params)); 1334 if (err) 1335 return err; 1336 1337 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 1338 devl_param_driverinit_value_set(devlink, 1339 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 1340 value); 1341 return 0; 1342 } 1343 1344 static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core) 1345 { 1346 devl_params_unregister(priv_to_devlink(mlxsw_core), mlxsw_core_fw_devlink_params, 1347 ARRAY_SIZE(mlxsw_core_fw_devlink_params)); 1348 } 1349 1350 static void *__dl_port(struct devlink_port *devlink_port) 1351 { 1352 return container_of(devlink_port, struct mlxsw_core_port, devlink_port); 1353 } 1354 1355 static int mlxsw_devlink_port_split(struct devlink *devlink, 1356 struct devlink_port *port, 1357 unsigned int count, 1358 struct netlink_ext_ack *extack) 1359 { 1360 struct mlxsw_core_port *mlxsw_core_port = __dl_port(port); 1361 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1362 1363 if (!mlxsw_core->driver->port_split) 1364 return -EOPNOTSUPP; 1365 return mlxsw_core->driver->port_split(mlxsw_core, 1366 mlxsw_core_port->local_port, 1367 count, extack); 1368 } 1369 1370 static int mlxsw_devlink_port_unsplit(struct devlink *devlink, 1371 struct devlink_port *port, 1372 struct netlink_ext_ack *extack) 1373 { 1374 struct mlxsw_core_port *mlxsw_core_port = __dl_port(port); 1375 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1376 1377 if (!mlxsw_core->driver->port_unsplit) 1378 return -EOPNOTSUPP; 1379 return mlxsw_core->driver->port_unsplit(mlxsw_core, 1380 mlxsw_core_port->local_port, 1381 extack); 1382 } 1383 1384 static int 1385 mlxsw_devlink_sb_pool_get(struct devlink *devlink, 1386 unsigned int sb_index, u16 pool_index, 1387 struct devlink_sb_pool_info *pool_info) 1388 { 1389 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1390 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1391 1392 if (!mlxsw_driver->sb_pool_get) 1393 return -EOPNOTSUPP; 1394 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index, 1395 pool_index, pool_info); 1396 } 1397 1398 static int 1399 mlxsw_devlink_sb_pool_set(struct devlink *devlink, 1400 unsigned int sb_index, u16 pool_index, u32 size, 1401 enum devlink_sb_threshold_type threshold_type, 1402 struct netlink_ext_ack *extack) 1403 { 1404 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1405 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1406 1407 if (!mlxsw_driver->sb_pool_set) 1408 return -EOPNOTSUPP; 1409 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index, 1410 pool_index, size, threshold_type, 1411 extack); 1412 } 1413 1414 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port, 1415 unsigned int sb_index, u16 pool_index, 1416 u32 *p_threshold) 1417 { 1418 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1419 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1420 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1421 1422 if (!mlxsw_driver->sb_port_pool_get || 1423 !mlxsw_core_port_check(mlxsw_core_port)) 1424 return -EOPNOTSUPP; 1425 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index, 1426 pool_index, p_threshold); 1427 } 1428 1429 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port, 1430 unsigned int sb_index, u16 pool_index, 1431 u32 threshold, 1432 struct netlink_ext_ack *extack) 1433 { 1434 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1435 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1436 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1437 1438 if (!mlxsw_driver->sb_port_pool_set || 1439 !mlxsw_core_port_check(mlxsw_core_port)) 1440 return -EOPNOTSUPP; 1441 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index, 1442 pool_index, threshold, extack); 1443 } 1444 1445 static int 1446 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port, 1447 unsigned int sb_index, u16 tc_index, 1448 enum devlink_sb_pool_type pool_type, 1449 u16 *p_pool_index, u32 *p_threshold) 1450 { 1451 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1452 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1453 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1454 1455 if (!mlxsw_driver->sb_tc_pool_bind_get || 1456 !mlxsw_core_port_check(mlxsw_core_port)) 1457 return -EOPNOTSUPP; 1458 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index, 1459 tc_index, pool_type, 1460 p_pool_index, p_threshold); 1461 } 1462 1463 static int 1464 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, 1465 unsigned int sb_index, u16 tc_index, 1466 enum devlink_sb_pool_type pool_type, 1467 u16 pool_index, u32 threshold, 1468 struct netlink_ext_ack *extack) 1469 { 1470 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1471 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1472 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1473 1474 if (!mlxsw_driver->sb_tc_pool_bind_set || 1475 !mlxsw_core_port_check(mlxsw_core_port)) 1476 return -EOPNOTSUPP; 1477 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index, 1478 tc_index, pool_type, 1479 pool_index, threshold, extack); 1480 } 1481 1482 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink, 1483 unsigned int sb_index) 1484 { 1485 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1486 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1487 1488 if (!mlxsw_driver->sb_occ_snapshot) 1489 return -EOPNOTSUPP; 1490 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index); 1491 } 1492 1493 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink, 1494 unsigned int sb_index) 1495 { 1496 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1497 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1498 1499 if (!mlxsw_driver->sb_occ_max_clear) 1500 return -EOPNOTSUPP; 1501 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index); 1502 } 1503 1504 static int 1505 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port, 1506 unsigned int sb_index, u16 pool_index, 1507 u32 *p_cur, u32 *p_max) 1508 { 1509 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1510 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1511 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1512 1513 if (!mlxsw_driver->sb_occ_port_pool_get || 1514 !mlxsw_core_port_check(mlxsw_core_port)) 1515 return -EOPNOTSUPP; 1516 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index, 1517 pool_index, p_cur, p_max); 1518 } 1519 1520 static int 1521 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, 1522 unsigned int sb_index, u16 tc_index, 1523 enum devlink_sb_pool_type pool_type, 1524 u32 *p_cur, u32 *p_max) 1525 { 1526 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 1527 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1528 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 1529 1530 if (!mlxsw_driver->sb_occ_tc_port_bind_get || 1531 !mlxsw_core_port_check(mlxsw_core_port)) 1532 return -EOPNOTSUPP; 1533 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port, 1534 sb_index, tc_index, 1535 pool_type, p_cur, p_max); 1536 } 1537 1538 static int 1539 mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, 1540 struct netlink_ext_ack *extack) 1541 { 1542 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1543 char fw_info_psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE]; 1544 u32 hw_rev, fw_major, fw_minor, fw_sub_minor; 1545 char mgir_pl[MLXSW_REG_MGIR_LEN]; 1546 char buf[32]; 1547 int err; 1548 1549 mlxsw_reg_mgir_pack(mgir_pl); 1550 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl); 1551 if (err) 1552 return err; 1553 mlxsw_reg_mgir_unpack(mgir_pl, &hw_rev, fw_info_psid, &fw_major, 1554 &fw_minor, &fw_sub_minor); 1555 1556 sprintf(buf, "%X", hw_rev); 1557 err = devlink_info_version_fixed_put(req, "hw.revision", buf); 1558 if (err) 1559 return err; 1560 1561 err = devlink_info_version_fixed_put(req, 1562 DEVLINK_INFO_VERSION_GENERIC_FW_PSID, 1563 fw_info_psid); 1564 if (err) 1565 return err; 1566 1567 sprintf(buf, "%d.%d.%d", fw_major, fw_minor, fw_sub_minor); 1568 err = devlink_info_version_running_put(req, "fw.version", buf); 1569 if (err) 1570 return err; 1571 1572 return devlink_info_version_running_put(req, 1573 DEVLINK_INFO_VERSION_GENERIC_FW, 1574 buf); 1575 } 1576 1577 static int 1578 mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink, 1579 bool netns_change, enum devlink_reload_action action, 1580 enum devlink_reload_limit limit, 1581 struct netlink_ext_ack *extack) 1582 { 1583 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1584 1585 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET)) 1586 return -EOPNOTSUPP; 1587 1588 mlxsw_core_bus_device_unregister(mlxsw_core, true); 1589 return 0; 1590 } 1591 1592 static int 1593 mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, enum devlink_reload_action action, 1594 enum devlink_reload_limit limit, u32 *actions_performed, 1595 struct netlink_ext_ack *extack) 1596 { 1597 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1598 int err; 1599 1600 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | 1601 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE); 1602 err = mlxsw_core_bus_device_register(mlxsw_core->bus_info, 1603 mlxsw_core->bus, 1604 mlxsw_core->bus_priv, true, 1605 devlink, extack); 1606 return err; 1607 } 1608 1609 static int mlxsw_devlink_flash_update(struct devlink *devlink, 1610 struct devlink_flash_update_params *params, 1611 struct netlink_ext_ack *extack) 1612 { 1613 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1614 1615 return mlxsw_core_fw_flash_update(mlxsw_core, params, extack); 1616 } 1617 1618 static int mlxsw_devlink_trap_init(struct devlink *devlink, 1619 const struct devlink_trap *trap, 1620 void *trap_ctx) 1621 { 1622 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1623 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1624 1625 if (!mlxsw_driver->trap_init) 1626 return -EOPNOTSUPP; 1627 return mlxsw_driver->trap_init(mlxsw_core, trap, trap_ctx); 1628 } 1629 1630 static void mlxsw_devlink_trap_fini(struct devlink *devlink, 1631 const struct devlink_trap *trap, 1632 void *trap_ctx) 1633 { 1634 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1635 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1636 1637 if (!mlxsw_driver->trap_fini) 1638 return; 1639 mlxsw_driver->trap_fini(mlxsw_core, trap, trap_ctx); 1640 } 1641 1642 static int mlxsw_devlink_trap_action_set(struct devlink *devlink, 1643 const struct devlink_trap *trap, 1644 enum devlink_trap_action action, 1645 struct netlink_ext_ack *extack) 1646 { 1647 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1648 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1649 1650 if (!mlxsw_driver->trap_action_set) 1651 return -EOPNOTSUPP; 1652 return mlxsw_driver->trap_action_set(mlxsw_core, trap, action, extack); 1653 } 1654 1655 static int 1656 mlxsw_devlink_trap_group_init(struct devlink *devlink, 1657 const struct devlink_trap_group *group) 1658 { 1659 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1660 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1661 1662 if (!mlxsw_driver->trap_group_init) 1663 return -EOPNOTSUPP; 1664 return mlxsw_driver->trap_group_init(mlxsw_core, group); 1665 } 1666 1667 static int 1668 mlxsw_devlink_trap_group_set(struct devlink *devlink, 1669 const struct devlink_trap_group *group, 1670 const struct devlink_trap_policer *policer, 1671 struct netlink_ext_ack *extack) 1672 { 1673 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1674 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1675 1676 if (!mlxsw_driver->trap_group_set) 1677 return -EOPNOTSUPP; 1678 return mlxsw_driver->trap_group_set(mlxsw_core, group, policer, extack); 1679 } 1680 1681 static int 1682 mlxsw_devlink_trap_policer_init(struct devlink *devlink, 1683 const struct devlink_trap_policer *policer) 1684 { 1685 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1686 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1687 1688 if (!mlxsw_driver->trap_policer_init) 1689 return -EOPNOTSUPP; 1690 return mlxsw_driver->trap_policer_init(mlxsw_core, policer); 1691 } 1692 1693 static void 1694 mlxsw_devlink_trap_policer_fini(struct devlink *devlink, 1695 const struct devlink_trap_policer *policer) 1696 { 1697 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1698 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1699 1700 if (!mlxsw_driver->trap_policer_fini) 1701 return; 1702 mlxsw_driver->trap_policer_fini(mlxsw_core, policer); 1703 } 1704 1705 static int 1706 mlxsw_devlink_trap_policer_set(struct devlink *devlink, 1707 const struct devlink_trap_policer *policer, 1708 u64 rate, u64 burst, 1709 struct netlink_ext_ack *extack) 1710 { 1711 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1712 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1713 1714 if (!mlxsw_driver->trap_policer_set) 1715 return -EOPNOTSUPP; 1716 return mlxsw_driver->trap_policer_set(mlxsw_core, policer, rate, burst, 1717 extack); 1718 } 1719 1720 static int 1721 mlxsw_devlink_trap_policer_counter_get(struct devlink *devlink, 1722 const struct devlink_trap_policer *policer, 1723 u64 *p_drops) 1724 { 1725 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1726 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 1727 1728 if (!mlxsw_driver->trap_policer_counter_get) 1729 return -EOPNOTSUPP; 1730 return mlxsw_driver->trap_policer_counter_get(mlxsw_core, policer, 1731 p_drops); 1732 } 1733 1734 static const struct devlink_ops mlxsw_devlink_ops = { 1735 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | 1736 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), 1737 .reload_down = mlxsw_devlink_core_bus_device_reload_down, 1738 .reload_up = mlxsw_devlink_core_bus_device_reload_up, 1739 .sb_pool_get = mlxsw_devlink_sb_pool_get, 1740 .sb_pool_set = mlxsw_devlink_sb_pool_set, 1741 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get, 1742 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set, 1743 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get, 1744 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set, 1745 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot, 1746 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear, 1747 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get, 1748 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get, 1749 .info_get = mlxsw_devlink_info_get, 1750 .flash_update = mlxsw_devlink_flash_update, 1751 .trap_init = mlxsw_devlink_trap_init, 1752 .trap_fini = mlxsw_devlink_trap_fini, 1753 .trap_action_set = mlxsw_devlink_trap_action_set, 1754 .trap_group_init = mlxsw_devlink_trap_group_init, 1755 .trap_group_set = mlxsw_devlink_trap_group_set, 1756 .trap_policer_init = mlxsw_devlink_trap_policer_init, 1757 .trap_policer_fini = mlxsw_devlink_trap_policer_fini, 1758 .trap_policer_set = mlxsw_devlink_trap_policer_set, 1759 .trap_policer_counter_get = mlxsw_devlink_trap_policer_counter_get, 1760 }; 1761 1762 static int mlxsw_core_params_register(struct mlxsw_core *mlxsw_core) 1763 { 1764 return mlxsw_core_fw_params_register(mlxsw_core); 1765 } 1766 1767 static void mlxsw_core_params_unregister(struct mlxsw_core *mlxsw_core) 1768 { 1769 mlxsw_core_fw_params_unregister(mlxsw_core); 1770 } 1771 1772 struct mlxsw_core_health_event { 1773 struct mlxsw_core *mlxsw_core; 1774 char mfde_pl[MLXSW_REG_MFDE_LEN]; 1775 struct work_struct work; 1776 }; 1777 1778 static void mlxsw_core_health_event_work(struct work_struct *work) 1779 { 1780 struct mlxsw_core_health_event *event; 1781 struct mlxsw_core *mlxsw_core; 1782 1783 event = container_of(work, struct mlxsw_core_health_event, work); 1784 mlxsw_core = event->mlxsw_core; 1785 devlink_health_report(mlxsw_core->health.fw_fatal, "FW fatal event occurred", 1786 event->mfde_pl); 1787 kfree(event); 1788 } 1789 1790 static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg, 1791 char *mfde_pl, void *priv) 1792 { 1793 struct mlxsw_core_health_event *event; 1794 struct mlxsw_core *mlxsw_core = priv; 1795 1796 event = kmalloc(sizeof(*event), GFP_ATOMIC); 1797 if (!event) 1798 return; 1799 event->mlxsw_core = mlxsw_core; 1800 memcpy(event->mfde_pl, mfde_pl, sizeof(event->mfde_pl)); 1801 INIT_WORK(&event->work, mlxsw_core_health_event_work); 1802 mlxsw_core_schedule_work(&event->work); 1803 } 1804 1805 static const struct mlxsw_listener mlxsw_core_health_listener = 1806 MLXSW_CORE_EVENTL(mlxsw_core_health_listener_func, MFDE); 1807 1808 static void 1809 mlxsw_core_health_fw_fatal_dump_fatal_cause(const char *mfde_pl, 1810 struct devlink_fmsg *fmsg) 1811 { 1812 u32 val, tile_v; 1813 1814 val = mlxsw_reg_mfde_fatal_cause_id_get(mfde_pl); 1815 devlink_fmsg_u32_pair_put(fmsg, "cause_id", val); 1816 tile_v = mlxsw_reg_mfde_fatal_cause_tile_v_get(mfde_pl); 1817 if (tile_v) { 1818 val = mlxsw_reg_mfde_fatal_cause_tile_index_get(mfde_pl); 1819 devlink_fmsg_u8_pair_put(fmsg, "tile_index", val); 1820 } 1821 } 1822 1823 static void 1824 mlxsw_core_health_fw_fatal_dump_fw_assert(const char *mfde_pl, 1825 struct devlink_fmsg *fmsg) 1826 { 1827 u32 val, tile_v; 1828 1829 val = mlxsw_reg_mfde_fw_assert_var0_get(mfde_pl); 1830 devlink_fmsg_u32_pair_put(fmsg, "var0", val); 1831 val = mlxsw_reg_mfde_fw_assert_var1_get(mfde_pl); 1832 devlink_fmsg_u32_pair_put(fmsg, "var1", val); 1833 val = mlxsw_reg_mfde_fw_assert_var2_get(mfde_pl); 1834 devlink_fmsg_u32_pair_put(fmsg, "var2", val); 1835 val = mlxsw_reg_mfde_fw_assert_var3_get(mfde_pl); 1836 devlink_fmsg_u32_pair_put(fmsg, "var3", val); 1837 val = mlxsw_reg_mfde_fw_assert_var4_get(mfde_pl); 1838 devlink_fmsg_u32_pair_put(fmsg, "var4", val); 1839 val = mlxsw_reg_mfde_fw_assert_existptr_get(mfde_pl); 1840 devlink_fmsg_u32_pair_put(fmsg, "existptr", val); 1841 val = mlxsw_reg_mfde_fw_assert_callra_get(mfde_pl); 1842 devlink_fmsg_u32_pair_put(fmsg, "callra", val); 1843 val = mlxsw_reg_mfde_fw_assert_oe_get(mfde_pl); 1844 devlink_fmsg_bool_pair_put(fmsg, "old_event", val); 1845 tile_v = mlxsw_reg_mfde_fw_assert_tile_v_get(mfde_pl); 1846 if (tile_v) { 1847 val = mlxsw_reg_mfde_fw_assert_tile_index_get(mfde_pl); 1848 devlink_fmsg_u8_pair_put(fmsg, "tile_index", val); 1849 } 1850 val = mlxsw_reg_mfde_fw_assert_ext_synd_get(mfde_pl); 1851 devlink_fmsg_u32_pair_put(fmsg, "ext_synd", val); 1852 } 1853 1854 static void 1855 mlxsw_core_health_fw_fatal_dump_kvd_im_stop(const char *mfde_pl, 1856 struct devlink_fmsg *fmsg) 1857 { 1858 u32 val; 1859 1860 val = mlxsw_reg_mfde_kvd_im_stop_oe_get(mfde_pl); 1861 devlink_fmsg_bool_pair_put(fmsg, "old_event", val); 1862 val = mlxsw_reg_mfde_kvd_im_stop_pipes_mask_get(mfde_pl); 1863 devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val); 1864 } 1865 1866 static void 1867 mlxsw_core_health_fw_fatal_dump_crspace_to(const char *mfde_pl, 1868 struct devlink_fmsg *fmsg) 1869 { 1870 u32 val; 1871 1872 val = mlxsw_reg_mfde_crspace_to_log_address_get(mfde_pl); 1873 devlink_fmsg_u32_pair_put(fmsg, "log_address", val); 1874 val = mlxsw_reg_mfde_crspace_to_oe_get(mfde_pl); 1875 devlink_fmsg_bool_pair_put(fmsg, "old_event", val); 1876 val = mlxsw_reg_mfde_crspace_to_log_id_get(mfde_pl); 1877 devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val); 1878 val = mlxsw_reg_mfde_crspace_to_log_ip_get(mfde_pl); 1879 devlink_fmsg_u64_pair_put(fmsg, "log_ip", val); 1880 } 1881 1882 static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter, 1883 struct devlink_fmsg *fmsg, void *priv_ctx, 1884 struct netlink_ext_ack *extack) 1885 { 1886 char *mfde_pl = priv_ctx; 1887 char *val_str; 1888 u8 event_id; 1889 u32 val; 1890 1891 if (!priv_ctx) 1892 /* User-triggered dumps are not possible */ 1893 return -EOPNOTSUPP; 1894 1895 val = mlxsw_reg_mfde_irisc_id_get(mfde_pl); 1896 devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val); 1897 1898 devlink_fmsg_arr_pair_nest_start(fmsg, "event"); 1899 event_id = mlxsw_reg_mfde_event_id_get(mfde_pl); 1900 devlink_fmsg_u32_pair_put(fmsg, "id", event_id); 1901 switch (event_id) { 1902 case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO: 1903 val_str = "CR space timeout"; 1904 break; 1905 case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP: 1906 val_str = "KVD insertion machine stopped"; 1907 break; 1908 case MLXSW_REG_MFDE_EVENT_ID_TEST: 1909 val_str = "Test"; 1910 break; 1911 case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT: 1912 val_str = "FW assert"; 1913 break; 1914 case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE: 1915 val_str = "Fatal cause"; 1916 break; 1917 default: 1918 val_str = NULL; 1919 } 1920 if (val_str) 1921 devlink_fmsg_string_pair_put(fmsg, "desc", val_str); 1922 devlink_fmsg_arr_pair_nest_end(fmsg); 1923 1924 devlink_fmsg_arr_pair_nest_start(fmsg, "severity"); 1925 val = mlxsw_reg_mfde_severity_get(mfde_pl); 1926 devlink_fmsg_u8_pair_put(fmsg, "id", val); 1927 switch (val) { 1928 case MLXSW_REG_MFDE_SEVERITY_FATL: 1929 val_str = "Fatal"; 1930 break; 1931 case MLXSW_REG_MFDE_SEVERITY_NRML: 1932 val_str = "Normal"; 1933 break; 1934 case MLXSW_REG_MFDE_SEVERITY_INTR: 1935 val_str = "Debug"; 1936 break; 1937 default: 1938 val_str = NULL; 1939 } 1940 if (val_str) 1941 devlink_fmsg_string_pair_put(fmsg, "desc", val_str); 1942 devlink_fmsg_arr_pair_nest_end(fmsg); 1943 1944 val = mlxsw_reg_mfde_method_get(mfde_pl); 1945 switch (val) { 1946 case MLXSW_REG_MFDE_METHOD_QUERY: 1947 val_str = "query"; 1948 break; 1949 case MLXSW_REG_MFDE_METHOD_WRITE: 1950 val_str = "write"; 1951 break; 1952 default: 1953 val_str = NULL; 1954 } 1955 if (val_str) 1956 devlink_fmsg_string_pair_put(fmsg, "method", val_str); 1957 1958 val = mlxsw_reg_mfde_long_process_get(mfde_pl); 1959 devlink_fmsg_bool_pair_put(fmsg, "long_process", val); 1960 1961 val = mlxsw_reg_mfde_command_type_get(mfde_pl); 1962 switch (val) { 1963 case MLXSW_REG_MFDE_COMMAND_TYPE_MAD: 1964 val_str = "mad"; 1965 break; 1966 case MLXSW_REG_MFDE_COMMAND_TYPE_EMAD: 1967 val_str = "emad"; 1968 break; 1969 case MLXSW_REG_MFDE_COMMAND_TYPE_CMDIF: 1970 val_str = "cmdif"; 1971 break; 1972 default: 1973 val_str = NULL; 1974 } 1975 if (val_str) 1976 devlink_fmsg_string_pair_put(fmsg, "command_type", val_str); 1977 1978 val = mlxsw_reg_mfde_reg_attr_id_get(mfde_pl); 1979 devlink_fmsg_u32_pair_put(fmsg, "reg_attr_id", val); 1980 1981 switch (event_id) { 1982 case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO: 1983 mlxsw_core_health_fw_fatal_dump_crspace_to(mfde_pl, fmsg); 1984 break; 1985 case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP: 1986 mlxsw_core_health_fw_fatal_dump_kvd_im_stop(mfde_pl, fmsg); 1987 break; 1988 case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT: 1989 mlxsw_core_health_fw_fatal_dump_fw_assert(mfde_pl, fmsg); 1990 break; 1991 case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE: 1992 mlxsw_core_health_fw_fatal_dump_fatal_cause(mfde_pl, fmsg); 1993 break; 1994 } 1995 1996 return 0; 1997 } 1998 1999 static int 2000 mlxsw_core_health_fw_fatal_test(struct devlink_health_reporter *reporter, 2001 struct netlink_ext_ack *extack) 2002 { 2003 struct mlxsw_core *mlxsw_core = devlink_health_reporter_priv(reporter); 2004 char mfgd_pl[MLXSW_REG_MFGD_LEN]; 2005 int err; 2006 2007 /* Read the register first to make sure no other bits are changed. */ 2008 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl); 2009 if (err) 2010 return err; 2011 mlxsw_reg_mfgd_trigger_test_set(mfgd_pl, true); 2012 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl); 2013 } 2014 2015 static const struct devlink_health_reporter_ops 2016 mlxsw_core_health_fw_fatal_ops = { 2017 .name = "fw_fatal", 2018 .dump = mlxsw_core_health_fw_fatal_dump, 2019 .test = mlxsw_core_health_fw_fatal_test, 2020 }; 2021 2022 static int mlxsw_core_health_fw_fatal_config(struct mlxsw_core *mlxsw_core, 2023 bool enable) 2024 { 2025 char mfgd_pl[MLXSW_REG_MFGD_LEN]; 2026 int err; 2027 2028 /* Read the register first to make sure no other bits are changed. */ 2029 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl); 2030 if (err) 2031 return err; 2032 mlxsw_reg_mfgd_fatal_event_mode_set(mfgd_pl, enable); 2033 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl); 2034 } 2035 2036 static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core) 2037 { 2038 struct devlink *devlink = priv_to_devlink(mlxsw_core); 2039 struct devlink_health_reporter *fw_fatal; 2040 int err; 2041 2042 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 2043 return 0; 2044 2045 fw_fatal = devl_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops, 2046 0, mlxsw_core); 2047 if (IS_ERR(fw_fatal)) { 2048 dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter"); 2049 return PTR_ERR(fw_fatal); 2050 } 2051 mlxsw_core->health.fw_fatal = fw_fatal; 2052 2053 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core); 2054 if (err) 2055 goto err_trap_register; 2056 2057 err = mlxsw_core_health_fw_fatal_config(mlxsw_core, true); 2058 if (err) 2059 goto err_fw_fatal_config; 2060 2061 return 0; 2062 2063 err_fw_fatal_config: 2064 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core); 2065 err_trap_register: 2066 devl_health_reporter_destroy(mlxsw_core->health.fw_fatal); 2067 return err; 2068 } 2069 2070 static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core) 2071 { 2072 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 2073 return; 2074 2075 mlxsw_core_health_fw_fatal_config(mlxsw_core, false); 2076 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core); 2077 /* Make sure there is no more event work scheduled */ 2078 mlxsw_core_flush_owq(); 2079 devl_health_reporter_destroy(mlxsw_core->health.fw_fatal); 2080 } 2081 2082 static void mlxsw_core_irq_event_handler_init(struct mlxsw_core *mlxsw_core) 2083 { 2084 INIT_LIST_HEAD(&mlxsw_core->irq_event_handler_list); 2085 mutex_init(&mlxsw_core->irq_event_handler_lock); 2086 } 2087 2088 static void mlxsw_core_irq_event_handler_fini(struct mlxsw_core *mlxsw_core) 2089 { 2090 mutex_destroy(&mlxsw_core->irq_event_handler_lock); 2091 WARN_ON(!list_empty(&mlxsw_core->irq_event_handler_list)); 2092 } 2093 2094 static int 2095 __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, 2096 const struct mlxsw_bus *mlxsw_bus, 2097 void *bus_priv, bool reload, 2098 struct devlink *devlink, 2099 struct netlink_ext_ack *extack) 2100 { 2101 const char *device_kind = mlxsw_bus_info->device_kind; 2102 struct mlxsw_core *mlxsw_core; 2103 struct mlxsw_driver *mlxsw_driver; 2104 size_t alloc_size; 2105 u16 max_lag; 2106 int err; 2107 2108 mlxsw_driver = mlxsw_core_driver_get(device_kind); 2109 if (!mlxsw_driver) 2110 return -EINVAL; 2111 2112 if (!reload) { 2113 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; 2114 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size, 2115 mlxsw_bus_info->dev); 2116 if (!devlink) { 2117 err = -ENOMEM; 2118 goto err_devlink_alloc; 2119 } 2120 devl_lock(devlink); 2121 devl_register(devlink); 2122 } 2123 2124 mlxsw_core = devlink_priv(devlink); 2125 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list); 2126 INIT_LIST_HEAD(&mlxsw_core->event_listener_list); 2127 mlxsw_core->driver = mlxsw_driver; 2128 mlxsw_core->bus = mlxsw_bus; 2129 mlxsw_core->bus_priv = bus_priv; 2130 mlxsw_core->bus_info = mlxsw_bus_info; 2131 mlxsw_core_irq_event_handler_init(mlxsw_core); 2132 2133 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, 2134 &mlxsw_core->res); 2135 if (err) 2136 goto err_bus_init; 2137 2138 if (mlxsw_driver->resources_register && !reload) { 2139 err = mlxsw_driver->resources_register(mlxsw_core); 2140 if (err) 2141 goto err_register_resources; 2142 } 2143 2144 err = mlxsw_ports_init(mlxsw_core, reload); 2145 if (err) 2146 goto err_ports_init; 2147 2148 err = mlxsw_core_max_lag(mlxsw_core, &max_lag); 2149 if (!err && MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) { 2150 alloc_size = sizeof(*mlxsw_core->lag.mapping) * max_lag * 2151 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); 2152 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL); 2153 if (!mlxsw_core->lag.mapping) { 2154 err = -ENOMEM; 2155 goto err_alloc_lag_mapping; 2156 } 2157 } 2158 2159 err = mlxsw_core_trap_groups_set(mlxsw_core); 2160 if (err) 2161 goto err_trap_groups_set; 2162 2163 err = mlxsw_emad_init(mlxsw_core); 2164 if (err) 2165 goto err_emad_init; 2166 2167 if (!reload) { 2168 err = mlxsw_core_params_register(mlxsw_core); 2169 if (err) 2170 goto err_register_params; 2171 } 2172 2173 err = mlxsw_core_fw_rev_validate(mlxsw_core, mlxsw_bus_info, mlxsw_driver->fw_req_rev, 2174 mlxsw_driver->fw_filename); 2175 if (err) 2176 goto err_fw_rev_validate; 2177 2178 err = mlxsw_linecards_init(mlxsw_core, mlxsw_bus_info); 2179 if (err) 2180 goto err_linecards_init; 2181 2182 err = mlxsw_core_health_init(mlxsw_core); 2183 if (err) 2184 goto err_health_init; 2185 2186 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); 2187 if (err) 2188 goto err_hwmon_init; 2189 2190 err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info, 2191 &mlxsw_core->thermal); 2192 if (err) 2193 goto err_thermal_init; 2194 2195 err = mlxsw_env_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->env); 2196 if (err) 2197 goto err_env_init; 2198 2199 if (mlxsw_driver->init) { 2200 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack); 2201 if (err) 2202 goto err_driver_init; 2203 } 2204 2205 if (!reload) 2206 devl_unlock(devlink); 2207 return 0; 2208 2209 err_driver_init: 2210 mlxsw_env_fini(mlxsw_core->env); 2211 err_env_init: 2212 mlxsw_thermal_fini(mlxsw_core->thermal); 2213 err_thermal_init: 2214 mlxsw_hwmon_fini(mlxsw_core->hwmon); 2215 err_hwmon_init: 2216 mlxsw_core_health_fini(mlxsw_core); 2217 err_health_init: 2218 mlxsw_linecards_fini(mlxsw_core); 2219 err_linecards_init: 2220 err_fw_rev_validate: 2221 if (!reload) 2222 mlxsw_core_params_unregister(mlxsw_core); 2223 err_register_params: 2224 mlxsw_emad_fini(mlxsw_core); 2225 err_emad_init: 2226 err_trap_groups_set: 2227 kfree(mlxsw_core->lag.mapping); 2228 err_alloc_lag_mapping: 2229 mlxsw_ports_fini(mlxsw_core, reload); 2230 err_ports_init: 2231 if (!reload) 2232 devl_resources_unregister(devlink); 2233 err_register_resources: 2234 mlxsw_bus->fini(bus_priv); 2235 err_bus_init: 2236 mlxsw_core_irq_event_handler_fini(mlxsw_core); 2237 if (!reload) { 2238 devl_unregister(devlink); 2239 devl_unlock(devlink); 2240 devlink_free(devlink); 2241 } 2242 err_devlink_alloc: 2243 return err; 2244 } 2245 2246 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, 2247 const struct mlxsw_bus *mlxsw_bus, 2248 void *bus_priv, bool reload, 2249 struct devlink *devlink, 2250 struct netlink_ext_ack *extack) 2251 { 2252 bool called_again = false; 2253 int err; 2254 2255 again: 2256 err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus, 2257 bus_priv, reload, 2258 devlink, extack); 2259 /* -EAGAIN is returned in case the FW was updated. FW needs 2260 * a reset, so lets try to call __mlxsw_core_bus_device_register() 2261 * again. 2262 */ 2263 if (err == -EAGAIN && !called_again) { 2264 called_again = true; 2265 goto again; 2266 } 2267 2268 return err; 2269 } 2270 EXPORT_SYMBOL(mlxsw_core_bus_device_register); 2271 2272 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, 2273 bool reload) 2274 { 2275 struct devlink *devlink = priv_to_devlink(mlxsw_core); 2276 2277 if (!reload) 2278 devl_lock(devlink); 2279 2280 if (devlink_is_reload_failed(devlink)) { 2281 if (!reload) 2282 /* Only the parts that were not de-initialized in the 2283 * failed reload attempt need to be de-initialized. 2284 */ 2285 goto reload_fail_deinit; 2286 else 2287 return; 2288 } 2289 2290 if (mlxsw_core->driver->fini) 2291 mlxsw_core->driver->fini(mlxsw_core); 2292 mlxsw_env_fini(mlxsw_core->env); 2293 mlxsw_thermal_fini(mlxsw_core->thermal); 2294 mlxsw_hwmon_fini(mlxsw_core->hwmon); 2295 mlxsw_core_health_fini(mlxsw_core); 2296 mlxsw_linecards_fini(mlxsw_core); 2297 if (!reload) 2298 mlxsw_core_params_unregister(mlxsw_core); 2299 mlxsw_emad_fini(mlxsw_core); 2300 kfree(mlxsw_core->lag.mapping); 2301 mlxsw_ports_fini(mlxsw_core, reload); 2302 if (!reload) 2303 devl_resources_unregister(devlink); 2304 mlxsw_core->bus->fini(mlxsw_core->bus_priv); 2305 mlxsw_core_irq_event_handler_fini(mlxsw_core); 2306 if (!reload) { 2307 devl_unregister(devlink); 2308 devl_unlock(devlink); 2309 devlink_free(devlink); 2310 } 2311 2312 return; 2313 2314 reload_fail_deinit: 2315 mlxsw_core_params_unregister(mlxsw_core); 2316 devl_resources_unregister(devlink); 2317 devl_unregister(devlink); 2318 devl_unlock(devlink); 2319 devlink_free(devlink); 2320 } 2321 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); 2322 2323 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, 2324 const struct mlxsw_tx_info *tx_info) 2325 { 2326 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv, 2327 tx_info); 2328 } 2329 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); 2330 2331 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 2332 const struct mlxsw_txhdr_info *txhdr_info) 2333 { 2334 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, 2335 txhdr_info); 2336 } 2337 EXPORT_SYMBOL(mlxsw_core_skb_transmit); 2338 2339 void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core, 2340 struct sk_buff *skb, u16 local_port) 2341 { 2342 if (mlxsw_core->driver->ptp_transmitted) 2343 mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb, 2344 local_port); 2345 } 2346 EXPORT_SYMBOL(mlxsw_core_ptp_transmitted); 2347 2348 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a, 2349 const struct mlxsw_rx_listener *rxl_b) 2350 { 2351 return (rxl_a->func == rxl_b->func && 2352 rxl_a->local_port == rxl_b->local_port && 2353 rxl_a->trap_id == rxl_b->trap_id && 2354 rxl_a->mirror_reason == rxl_b->mirror_reason); 2355 } 2356 2357 static struct mlxsw_rx_listener_item * 2358 __find_rx_listener_item(struct mlxsw_core *mlxsw_core, 2359 const struct mlxsw_rx_listener *rxl) 2360 { 2361 struct mlxsw_rx_listener_item *rxl_item; 2362 2363 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) { 2364 if (__is_rx_listener_equal(&rxl_item->rxl, rxl)) 2365 return rxl_item; 2366 } 2367 return NULL; 2368 } 2369 2370 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core, 2371 const struct mlxsw_rx_listener *rxl, 2372 void *priv, bool enabled) 2373 { 2374 struct mlxsw_rx_listener_item *rxl_item; 2375 2376 rxl_item = __find_rx_listener_item(mlxsw_core, rxl); 2377 if (rxl_item) 2378 return -EEXIST; 2379 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL); 2380 if (!rxl_item) 2381 return -ENOMEM; 2382 rxl_item->rxl = *rxl; 2383 rxl_item->priv = priv; 2384 rxl_item->enabled = enabled; 2385 2386 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list); 2387 return 0; 2388 } 2389 EXPORT_SYMBOL(mlxsw_core_rx_listener_register); 2390 2391 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core, 2392 const struct mlxsw_rx_listener *rxl) 2393 { 2394 struct mlxsw_rx_listener_item *rxl_item; 2395 2396 rxl_item = __find_rx_listener_item(mlxsw_core, rxl); 2397 if (!rxl_item) 2398 return; 2399 list_del_rcu(&rxl_item->list); 2400 synchronize_rcu(); 2401 kfree(rxl_item); 2402 } 2403 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister); 2404 2405 static void 2406 mlxsw_core_rx_listener_state_set(struct mlxsw_core *mlxsw_core, 2407 const struct mlxsw_rx_listener *rxl, 2408 bool enabled) 2409 { 2410 struct mlxsw_rx_listener_item *rxl_item; 2411 2412 rxl_item = __find_rx_listener_item(mlxsw_core, rxl); 2413 if (WARN_ON(!rxl_item)) 2414 return; 2415 rxl_item->enabled = enabled; 2416 } 2417 2418 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u16 local_port, 2419 void *priv) 2420 { 2421 struct mlxsw_event_listener_item *event_listener_item = priv; 2422 struct mlxsw_core *mlxsw_core; 2423 struct mlxsw_reg_info reg; 2424 char *payload; 2425 char *reg_tlv; 2426 char *op_tlv; 2427 2428 mlxsw_core = event_listener_item->mlxsw_core; 2429 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0, 2430 skb->data, skb->len); 2431 2432 mlxsw_emad_tlv_parse(skb); 2433 op_tlv = mlxsw_emad_op_tlv(skb); 2434 reg_tlv = mlxsw_emad_reg_tlv(skb); 2435 2436 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv); 2437 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32); 2438 payload = mlxsw_emad_reg_payload(reg_tlv); 2439 event_listener_item->el.func(®, payload, event_listener_item->priv); 2440 dev_kfree_skb(skb); 2441 } 2442 2443 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a, 2444 const struct mlxsw_event_listener *el_b) 2445 { 2446 return (el_a->func == el_b->func && 2447 el_a->trap_id == el_b->trap_id); 2448 } 2449 2450 static struct mlxsw_event_listener_item * 2451 __find_event_listener_item(struct mlxsw_core *mlxsw_core, 2452 const struct mlxsw_event_listener *el) 2453 { 2454 struct mlxsw_event_listener_item *el_item; 2455 2456 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) { 2457 if (__is_event_listener_equal(&el_item->el, el)) 2458 return el_item; 2459 } 2460 return NULL; 2461 } 2462 2463 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core, 2464 const struct mlxsw_event_listener *el, 2465 void *priv) 2466 { 2467 int err; 2468 struct mlxsw_event_listener_item *el_item; 2469 const struct mlxsw_rx_listener rxl = { 2470 .func = mlxsw_core_event_listener_func, 2471 .local_port = MLXSW_PORT_DONT_CARE, 2472 .trap_id = el->trap_id, 2473 }; 2474 2475 el_item = __find_event_listener_item(mlxsw_core, el); 2476 if (el_item) 2477 return -EEXIST; 2478 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL); 2479 if (!el_item) 2480 return -ENOMEM; 2481 el_item->mlxsw_core = mlxsw_core; 2482 el_item->el = *el; 2483 el_item->priv = priv; 2484 2485 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item, true); 2486 if (err) 2487 goto err_rx_listener_register; 2488 2489 /* No reason to save item if we did not manage to register an RX 2490 * listener for it. 2491 */ 2492 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list); 2493 2494 return 0; 2495 2496 err_rx_listener_register: 2497 kfree(el_item); 2498 return err; 2499 } 2500 EXPORT_SYMBOL(mlxsw_core_event_listener_register); 2501 2502 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, 2503 const struct mlxsw_event_listener *el) 2504 { 2505 struct mlxsw_event_listener_item *el_item; 2506 const struct mlxsw_rx_listener rxl = { 2507 .func = mlxsw_core_event_listener_func, 2508 .local_port = MLXSW_PORT_DONT_CARE, 2509 .trap_id = el->trap_id, 2510 }; 2511 2512 el_item = __find_event_listener_item(mlxsw_core, el); 2513 if (!el_item) 2514 return; 2515 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl); 2516 list_del(&el_item->list); 2517 kfree(el_item); 2518 } 2519 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); 2520 2521 static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core, 2522 const struct mlxsw_listener *listener, 2523 void *priv, bool enabled) 2524 { 2525 if (listener->is_event) { 2526 WARN_ON(!enabled); 2527 return mlxsw_core_event_listener_register(mlxsw_core, 2528 &listener->event_listener, 2529 priv); 2530 } else { 2531 return mlxsw_core_rx_listener_register(mlxsw_core, 2532 &listener->rx_listener, 2533 priv, enabled); 2534 } 2535 } 2536 2537 static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core, 2538 const struct mlxsw_listener *listener, 2539 void *priv) 2540 { 2541 if (listener->is_event) 2542 mlxsw_core_event_listener_unregister(mlxsw_core, 2543 &listener->event_listener); 2544 else 2545 mlxsw_core_rx_listener_unregister(mlxsw_core, 2546 &listener->rx_listener); 2547 } 2548 2549 int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core, 2550 const struct mlxsw_listener *listener, void *priv) 2551 { 2552 enum mlxsw_reg_htgt_trap_group trap_group; 2553 enum mlxsw_reg_hpkt_action action; 2554 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2555 int err; 2556 2557 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 2558 return 0; 2559 2560 err = mlxsw_core_listener_register(mlxsw_core, listener, priv, 2561 listener->enabled_on_register); 2562 if (err) 2563 return err; 2564 2565 action = listener->enabled_on_register ? listener->en_action : 2566 listener->dis_action; 2567 trap_group = listener->enabled_on_register ? listener->en_trap_group : 2568 listener->dis_trap_group; 2569 mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id, 2570 trap_group, listener->is_ctrl); 2571 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 2572 if (err) 2573 goto err_trap_set; 2574 2575 return 0; 2576 2577 err_trap_set: 2578 mlxsw_core_listener_unregister(mlxsw_core, listener, priv); 2579 return err; 2580 } 2581 EXPORT_SYMBOL(mlxsw_core_trap_register); 2582 2583 void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core, 2584 const struct mlxsw_listener *listener, 2585 void *priv) 2586 { 2587 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2588 2589 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 2590 return; 2591 2592 if (!listener->is_event) { 2593 mlxsw_reg_hpkt_pack(hpkt_pl, listener->dis_action, 2594 listener->trap_id, listener->dis_trap_group, 2595 listener->is_ctrl); 2596 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 2597 } 2598 2599 mlxsw_core_listener_unregister(mlxsw_core, listener, priv); 2600 } 2601 EXPORT_SYMBOL(mlxsw_core_trap_unregister); 2602 2603 int mlxsw_core_traps_register(struct mlxsw_core *mlxsw_core, 2604 const struct mlxsw_listener *listeners, 2605 size_t listeners_count, void *priv) 2606 { 2607 int i, err; 2608 2609 for (i = 0; i < listeners_count; i++) { 2610 err = mlxsw_core_trap_register(mlxsw_core, 2611 &listeners[i], 2612 priv); 2613 if (err) 2614 goto err_listener_register; 2615 } 2616 return 0; 2617 2618 err_listener_register: 2619 for (i--; i >= 0; i--) { 2620 mlxsw_core_trap_unregister(mlxsw_core, 2621 &listeners[i], 2622 priv); 2623 } 2624 return err; 2625 } 2626 EXPORT_SYMBOL(mlxsw_core_traps_register); 2627 2628 void mlxsw_core_traps_unregister(struct mlxsw_core *mlxsw_core, 2629 const struct mlxsw_listener *listeners, 2630 size_t listeners_count, void *priv) 2631 { 2632 int i; 2633 2634 for (i = 0; i < listeners_count; i++) { 2635 mlxsw_core_trap_unregister(mlxsw_core, 2636 &listeners[i], 2637 priv); 2638 } 2639 } 2640 EXPORT_SYMBOL(mlxsw_core_traps_unregister); 2641 2642 int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core, 2643 const struct mlxsw_listener *listener, 2644 bool enabled) 2645 { 2646 enum mlxsw_reg_htgt_trap_group trap_group; 2647 enum mlxsw_reg_hpkt_action action; 2648 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2649 int err; 2650 2651 /* Not supported for event listener */ 2652 if (WARN_ON(listener->is_event)) 2653 return -EINVAL; 2654 2655 action = enabled ? listener->en_action : listener->dis_action; 2656 trap_group = enabled ? listener->en_trap_group : 2657 listener->dis_trap_group; 2658 mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id, 2659 trap_group, listener->is_ctrl); 2660 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 2661 if (err) 2662 return err; 2663 2664 mlxsw_core_rx_listener_state_set(mlxsw_core, &listener->rx_listener, 2665 enabled); 2666 return 0; 2667 } 2668 EXPORT_SYMBOL(mlxsw_core_trap_state_set); 2669 2670 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core) 2671 { 2672 return atomic64_inc_return(&mlxsw_core->emad.tid); 2673 } 2674 2675 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, 2676 const struct mlxsw_reg_info *reg, 2677 char *payload, 2678 enum mlxsw_core_reg_access_type type, 2679 struct list_head *bulk_list, 2680 mlxsw_reg_trans_cb_t *cb, 2681 unsigned long cb_priv) 2682 { 2683 u64 tid = mlxsw_core_tid_get(mlxsw_core); 2684 struct mlxsw_reg_trans *trans; 2685 int err; 2686 2687 trans = kzalloc(sizeof(*trans), GFP_KERNEL); 2688 if (!trans) 2689 return -ENOMEM; 2690 2691 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, 2692 bulk_list, cb, cb_priv, tid); 2693 if (err) { 2694 kfree_rcu(trans, rcu); 2695 return err; 2696 } 2697 return 0; 2698 } 2699 2700 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core, 2701 const struct mlxsw_reg_info *reg, char *payload, 2702 struct list_head *bulk_list, 2703 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 2704 { 2705 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 2706 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 2707 bulk_list, cb, cb_priv); 2708 } 2709 EXPORT_SYMBOL(mlxsw_reg_trans_query); 2710 2711 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core, 2712 const struct mlxsw_reg_info *reg, char *payload, 2713 struct list_head *bulk_list, 2714 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 2715 { 2716 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 2717 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 2718 bulk_list, cb, cb_priv); 2719 } 2720 EXPORT_SYMBOL(mlxsw_reg_trans_write); 2721 2722 #define MLXSW_REG_TRANS_ERR_STRING_SIZE 256 2723 2724 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans) 2725 { 2726 char err_string[MLXSW_REG_TRANS_ERR_STRING_SIZE]; 2727 struct mlxsw_core *mlxsw_core = trans->core; 2728 int err; 2729 2730 wait_for_completion(&trans->completion); 2731 cancel_delayed_work_sync(&trans->timeout_dw); 2732 err = trans->err; 2733 2734 if (trans->retries) 2735 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n", 2736 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid); 2737 if (err) { 2738 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n", 2739 trans->tid, trans->reg->id, 2740 mlxsw_reg_id_str(trans->reg->id), 2741 mlxsw_core_reg_access_type_str(trans->type), 2742 trans->emad_status, 2743 mlxsw_emad_op_tlv_status_str(trans->emad_status)); 2744 2745 snprintf(err_string, MLXSW_REG_TRANS_ERR_STRING_SIZE, 2746 "(tid=%llx,reg_id=%x(%s)) %s (%s)\n", trans->tid, 2747 trans->reg->id, mlxsw_reg_id_str(trans->reg->id), 2748 mlxsw_emad_op_tlv_status_str(trans->emad_status), 2749 trans->emad_err_string ? trans->emad_err_string : ""); 2750 2751 trace_devlink_hwerr(priv_to_devlink(mlxsw_core), 2752 trans->emad_status, err_string); 2753 2754 kfree(trans->emad_err_string); 2755 } 2756 2757 list_del(&trans->bulk_list); 2758 kfree_rcu(trans, rcu); 2759 return err; 2760 } 2761 2762 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list) 2763 { 2764 struct mlxsw_reg_trans *trans; 2765 struct mlxsw_reg_trans *tmp; 2766 int sum_err = 0; 2767 int err; 2768 2769 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) { 2770 err = mlxsw_reg_trans_wait(trans); 2771 if (err && sum_err == 0) 2772 sum_err = err; /* first error to be returned */ 2773 } 2774 return sum_err; 2775 } 2776 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait); 2777 2778 struct mlxsw_core_irq_event_handler_item { 2779 struct list_head list; 2780 void (*cb)(struct mlxsw_core *mlxsw_core); 2781 }; 2782 2783 int mlxsw_core_irq_event_handler_register(struct mlxsw_core *mlxsw_core, 2784 mlxsw_irq_event_cb_t cb) 2785 { 2786 struct mlxsw_core_irq_event_handler_item *item; 2787 2788 item = kzalloc(sizeof(*item), GFP_KERNEL); 2789 if (!item) 2790 return -ENOMEM; 2791 item->cb = cb; 2792 mutex_lock(&mlxsw_core->irq_event_handler_lock); 2793 list_add_tail(&item->list, &mlxsw_core->irq_event_handler_list); 2794 mutex_unlock(&mlxsw_core->irq_event_handler_lock); 2795 return 0; 2796 } 2797 EXPORT_SYMBOL(mlxsw_core_irq_event_handler_register); 2798 2799 void mlxsw_core_irq_event_handler_unregister(struct mlxsw_core *mlxsw_core, 2800 mlxsw_irq_event_cb_t cb) 2801 { 2802 struct mlxsw_core_irq_event_handler_item *item, *tmp; 2803 2804 mutex_lock(&mlxsw_core->irq_event_handler_lock); 2805 list_for_each_entry_safe(item, tmp, 2806 &mlxsw_core->irq_event_handler_list, list) { 2807 if (item->cb == cb) { 2808 list_del(&item->list); 2809 kfree(item); 2810 } 2811 } 2812 mutex_unlock(&mlxsw_core->irq_event_handler_lock); 2813 } 2814 EXPORT_SYMBOL(mlxsw_core_irq_event_handler_unregister); 2815 2816 void mlxsw_core_irq_event_handlers_call(struct mlxsw_core *mlxsw_core) 2817 { 2818 struct mlxsw_core_irq_event_handler_item *item; 2819 2820 mutex_lock(&mlxsw_core->irq_event_handler_lock); 2821 list_for_each_entry(item, &mlxsw_core->irq_event_handler_list, list) { 2822 if (item->cb) 2823 item->cb(mlxsw_core); 2824 } 2825 mutex_unlock(&mlxsw_core->irq_event_handler_lock); 2826 } 2827 EXPORT_SYMBOL(mlxsw_core_irq_event_handlers_call); 2828 2829 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, 2830 const struct mlxsw_reg_info *reg, 2831 char *payload, 2832 enum mlxsw_core_reg_access_type type) 2833 { 2834 enum mlxsw_emad_op_tlv_status status; 2835 int err, n_retry; 2836 bool reset_ok; 2837 char *in_mbox, *out_mbox, *tmp; 2838 2839 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n", 2840 reg->id, mlxsw_reg_id_str(reg->id), 2841 mlxsw_core_reg_access_type_str(type)); 2842 2843 in_mbox = mlxsw_cmd_mbox_alloc(); 2844 if (!in_mbox) 2845 return -ENOMEM; 2846 2847 out_mbox = mlxsw_cmd_mbox_alloc(); 2848 if (!out_mbox) { 2849 err = -ENOMEM; 2850 goto free_in_mbox; 2851 } 2852 2853 mlxsw_emad_pack_op_tlv(in_mbox, reg, type, 2854 mlxsw_core_tid_get(mlxsw_core)); 2855 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); 2856 mlxsw_emad_pack_reg_tlv(tmp, reg, payload); 2857 2858 /* There is a special treatment needed for MRSR (reset) register. 2859 * The command interface will return error after the command 2860 * is executed, so tell the lower layer to expect it 2861 * and cope accordingly. 2862 */ 2863 reset_ok = reg->id == MLXSW_REG_MRSR_ID; 2864 2865 n_retry = 0; 2866 retry: 2867 err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox); 2868 if (!err) { 2869 err = mlxsw_emad_process_status(out_mbox, &status); 2870 if (err) { 2871 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) 2872 goto retry; 2873 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n", 2874 status, mlxsw_emad_op_tlv_status_str(status)); 2875 } 2876 } 2877 2878 if (!err) 2879 memcpy(payload, mlxsw_emad_reg_payload_cmd(out_mbox), 2880 reg->len); 2881 2882 mlxsw_cmd_mbox_free(out_mbox); 2883 free_in_mbox: 2884 mlxsw_cmd_mbox_free(in_mbox); 2885 if (err) 2886 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n", 2887 reg->id, mlxsw_reg_id_str(reg->id), 2888 mlxsw_core_reg_access_type_str(type)); 2889 return err; 2890 } 2891 2892 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core, 2893 char *payload, size_t payload_len, 2894 unsigned long cb_priv) 2895 { 2896 char *orig_payload = (char *) cb_priv; 2897 2898 memcpy(orig_payload, payload, payload_len); 2899 } 2900 2901 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, 2902 const struct mlxsw_reg_info *reg, 2903 char *payload, 2904 enum mlxsw_core_reg_access_type type) 2905 { 2906 LIST_HEAD(bulk_list); 2907 int err; 2908 2909 /* During initialization EMAD interface is not available to us, 2910 * so we default to command interface. We switch to EMAD interface 2911 * after setting the appropriate traps. 2912 */ 2913 if (!mlxsw_core->emad.use_emad) 2914 return mlxsw_core_reg_access_cmd(mlxsw_core, reg, 2915 payload, type); 2916 2917 err = mlxsw_core_reg_access_emad(mlxsw_core, reg, 2918 payload, type, &bulk_list, 2919 mlxsw_core_reg_access_cb, 2920 (unsigned long) payload); 2921 if (err) 2922 return err; 2923 return mlxsw_reg_trans_bulk_wait(&bulk_list); 2924 } 2925 2926 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, 2927 const struct mlxsw_reg_info *reg, char *payload) 2928 { 2929 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 2930 MLXSW_CORE_REG_ACCESS_TYPE_QUERY); 2931 } 2932 EXPORT_SYMBOL(mlxsw_reg_query); 2933 2934 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, 2935 const struct mlxsw_reg_info *reg, char *payload) 2936 { 2937 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 2938 MLXSW_CORE_REG_ACCESS_TYPE_WRITE); 2939 } 2940 EXPORT_SYMBOL(mlxsw_reg_write); 2941 2942 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 2943 struct mlxsw_rx_info *rx_info) 2944 { 2945 struct mlxsw_rx_listener_item *rxl_item; 2946 const struct mlxsw_rx_listener *rxl; 2947 u16 local_port; 2948 bool found = false; 2949 2950 if (rx_info->is_lag) { 2951 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n", 2952 __func__, rx_info->u.lag_id, 2953 rx_info->trap_id); 2954 /* Upper layer does not care if the skb came from LAG or not, 2955 * so just get the local_port for the lag port and push it up. 2956 */ 2957 local_port = mlxsw_core_lag_mapping_get(mlxsw_core, 2958 rx_info->u.lag_id, 2959 rx_info->lag_port_index); 2960 } else { 2961 local_port = rx_info->u.sys_port; 2962 } 2963 2964 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n", 2965 __func__, local_port, rx_info->trap_id); 2966 2967 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || 2968 (local_port >= mlxsw_core->max_ports)) 2969 goto drop; 2970 2971 rcu_read_lock(); 2972 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) { 2973 rxl = &rxl_item->rxl; 2974 if ((rxl->local_port == MLXSW_PORT_DONT_CARE || 2975 rxl->local_port == local_port) && 2976 rxl->trap_id == rx_info->trap_id && 2977 rxl->mirror_reason == rx_info->mirror_reason) { 2978 if (rxl_item->enabled) 2979 found = true; 2980 break; 2981 } 2982 } 2983 if (!found) { 2984 rcu_read_unlock(); 2985 goto drop; 2986 } 2987 2988 rxl->func(skb, local_port, rxl_item->priv); 2989 rcu_read_unlock(); 2990 return; 2991 2992 drop: 2993 dev_kfree_skb(skb); 2994 } 2995 EXPORT_SYMBOL(mlxsw_core_skb_receive); 2996 2997 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core, 2998 u16 lag_id, u8 port_index) 2999 { 3000 return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id + 3001 port_index; 3002 } 3003 3004 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, 3005 u16 lag_id, u8 port_index, u16 local_port) 3006 { 3007 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 3008 lag_id, port_index); 3009 3010 mlxsw_core->lag.mapping[index] = local_port; 3011 } 3012 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set); 3013 3014 u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, 3015 u16 lag_id, u8 port_index) 3016 { 3017 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 3018 lag_id, port_index); 3019 3020 return mlxsw_core->lag.mapping[index]; 3021 } 3022 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get); 3023 3024 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, 3025 u16 lag_id, u16 local_port) 3026 { 3027 int i; 3028 3029 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) { 3030 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 3031 lag_id, i); 3032 3033 if (mlxsw_core->lag.mapping[index] == local_port) 3034 mlxsw_core->lag.mapping[index] = 0; 3035 } 3036 } 3037 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); 3038 3039 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, 3040 enum mlxsw_res_id res_id) 3041 { 3042 return mlxsw_res_valid(&mlxsw_core->res, res_id); 3043 } 3044 EXPORT_SYMBOL(mlxsw_core_res_valid); 3045 3046 u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, 3047 enum mlxsw_res_id res_id) 3048 { 3049 return mlxsw_res_get(&mlxsw_core->res, res_id); 3050 } 3051 EXPORT_SYMBOL(mlxsw_core_res_get); 3052 3053 static const struct devlink_port_ops mlxsw_devlink_port_ops = { 3054 .port_split = mlxsw_devlink_port_split, 3055 .port_unsplit = mlxsw_devlink_port_unsplit, 3056 }; 3057 3058 static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, 3059 enum devlink_port_flavour flavour, 3060 u8 slot_index, u32 port_number, bool split, 3061 u32 split_port_subnumber, 3062 bool splittable, u32 lanes, 3063 const unsigned char *switch_id, 3064 unsigned char switch_id_len) 3065 { 3066 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3067 struct mlxsw_core_port *mlxsw_core_port = 3068 &mlxsw_core->ports[local_port]; 3069 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 3070 struct devlink_port_attrs attrs = {}; 3071 int err; 3072 3073 attrs.split = split; 3074 attrs.lanes = lanes; 3075 attrs.splittable = splittable; 3076 attrs.flavour = flavour; 3077 attrs.phys.port_number = port_number; 3078 attrs.phys.split_subport_number = split_port_subnumber; 3079 memcpy(attrs.switch_id.id, switch_id, switch_id_len); 3080 attrs.switch_id.id_len = switch_id_len; 3081 mlxsw_core_port->local_port = local_port; 3082 devlink_port_attrs_set(devlink_port, &attrs); 3083 if (slot_index) { 3084 struct mlxsw_linecard *linecard; 3085 3086 linecard = mlxsw_linecard_get(mlxsw_core->linecards, 3087 slot_index); 3088 mlxsw_core_port->linecard = linecard; 3089 devlink_port_linecard_set(devlink_port, 3090 linecard->devlink_linecard); 3091 } 3092 err = devl_port_register_with_ops(devlink, devlink_port, local_port, 3093 &mlxsw_devlink_port_ops); 3094 if (err) 3095 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); 3096 return err; 3097 } 3098 3099 static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port) 3100 { 3101 struct mlxsw_core_port *mlxsw_core_port = 3102 &mlxsw_core->ports[local_port]; 3103 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 3104 3105 devl_port_unregister(devlink_port); 3106 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); 3107 } 3108 3109 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, 3110 u8 slot_index, u32 port_number, bool split, 3111 u32 split_port_subnumber, 3112 bool splittable, u32 lanes, 3113 const unsigned char *switch_id, 3114 unsigned char switch_id_len) 3115 { 3116 int err; 3117 3118 err = __mlxsw_core_port_init(mlxsw_core, local_port, 3119 DEVLINK_PORT_FLAVOUR_PHYSICAL, slot_index, 3120 port_number, split, split_port_subnumber, 3121 splittable, lanes, 3122 switch_id, switch_id_len); 3123 if (err) 3124 return err; 3125 3126 atomic_inc(&mlxsw_core->active_ports_count); 3127 return 0; 3128 } 3129 EXPORT_SYMBOL(mlxsw_core_port_init); 3130 3131 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port) 3132 { 3133 atomic_dec(&mlxsw_core->active_ports_count); 3134 3135 __mlxsw_core_port_fini(mlxsw_core, local_port); 3136 } 3137 EXPORT_SYMBOL(mlxsw_core_port_fini); 3138 3139 int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core, 3140 void *port_driver_priv, 3141 const unsigned char *switch_id, 3142 unsigned char switch_id_len) 3143 { 3144 struct mlxsw_core_port *mlxsw_core_port = 3145 &mlxsw_core->ports[MLXSW_PORT_CPU_PORT]; 3146 int err; 3147 3148 err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT, 3149 DEVLINK_PORT_FLAVOUR_CPU, 3150 0, 0, false, 0, false, 0, 3151 switch_id, switch_id_len); 3152 if (err) 3153 return err; 3154 3155 mlxsw_core_port->port_driver_priv = port_driver_priv; 3156 return 0; 3157 } 3158 EXPORT_SYMBOL(mlxsw_core_cpu_port_init); 3159 3160 void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core) 3161 { 3162 __mlxsw_core_port_fini(mlxsw_core, MLXSW_PORT_CPU_PORT); 3163 } 3164 EXPORT_SYMBOL(mlxsw_core_cpu_port_fini); 3165 3166 void mlxsw_core_port_netdev_link(struct mlxsw_core *mlxsw_core, u16 local_port, 3167 void *port_driver_priv, struct net_device *dev) 3168 { 3169 struct mlxsw_core_port *mlxsw_core_port = 3170 &mlxsw_core->ports[local_port]; 3171 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 3172 3173 mlxsw_core_port->port_driver_priv = port_driver_priv; 3174 SET_NETDEV_DEVLINK_PORT(dev, devlink_port); 3175 } 3176 EXPORT_SYMBOL(mlxsw_core_port_netdev_link); 3177 3178 struct devlink_port * 3179 mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, 3180 u16 local_port) 3181 { 3182 struct mlxsw_core_port *mlxsw_core_port = 3183 &mlxsw_core->ports[local_port]; 3184 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 3185 3186 return devlink_port; 3187 } 3188 EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get); 3189 3190 struct mlxsw_linecard * 3191 mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core, 3192 u16 local_port) 3193 { 3194 struct mlxsw_core_port *mlxsw_core_port = 3195 &mlxsw_core->ports[local_port]; 3196 3197 return mlxsw_core_port->linecard; 3198 } 3199 3200 void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core, 3201 bool (*selector)(void *priv, u16 local_port), 3202 void *priv) 3203 { 3204 if (WARN_ON_ONCE(!mlxsw_core->driver->ports_remove_selected)) 3205 return; 3206 mlxsw_core->driver->ports_remove_selected(mlxsw_core, selector, priv); 3207 } 3208 3209 struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core) 3210 { 3211 return mlxsw_core->env; 3212 } 3213 3214 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, 3215 const char *buf, size_t size) 3216 { 3217 __be32 *m = (__be32 *) buf; 3218 int i; 3219 int count = size / sizeof(__be32); 3220 3221 for (i = count - 1; i >= 0; i--) 3222 if (m[i]) 3223 break; 3224 i++; 3225 count = i ? i : 1; 3226 for (i = 0; i < count; i += 4) 3227 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", 3228 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), 3229 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); 3230 } 3231 3232 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, 3233 u32 in_mod, bool out_mbox_direct, bool reset_ok, 3234 char *in_mbox, size_t in_mbox_size, 3235 char *out_mbox, size_t out_mbox_size) 3236 { 3237 u8 status; 3238 int err; 3239 3240 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32)); 3241 if (!mlxsw_core->bus->cmd_exec) 3242 return -EOPNOTSUPP; 3243 3244 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 3245 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod); 3246 if (in_mbox) { 3247 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n"); 3248 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size); 3249 } 3250 3251 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode, 3252 opcode_mod, in_mod, out_mbox_direct, 3253 in_mbox, in_mbox_size, 3254 out_mbox, out_mbox_size, &status); 3255 3256 if (!err && out_mbox) { 3257 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); 3258 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); 3259 } 3260 3261 if (reset_ok && err == -EIO && 3262 status == MLXSW_CMD_STATUS_RUNNING_RESET) { 3263 err = 0; 3264 } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { 3265 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n", 3266 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 3267 in_mod, status, mlxsw_cmd_status_str(status)); 3268 } else if (err == -ETIMEDOUT) { 3269 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 3270 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 3271 in_mod); 3272 } 3273 3274 return err; 3275 } 3276 EXPORT_SYMBOL(mlxsw_cmd_exec); 3277 3278 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay) 3279 { 3280 return queue_delayed_work(mlxsw_wq, dwork, delay); 3281 } 3282 EXPORT_SYMBOL(mlxsw_core_schedule_dw); 3283 3284 bool mlxsw_core_schedule_work(struct work_struct *work) 3285 { 3286 return queue_work(mlxsw_owq, work); 3287 } 3288 EXPORT_SYMBOL(mlxsw_core_schedule_work); 3289 3290 void mlxsw_core_flush_owq(void) 3291 { 3292 flush_workqueue(mlxsw_owq); 3293 } 3294 EXPORT_SYMBOL(mlxsw_core_flush_owq); 3295 3296 int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3297 const struct mlxsw_config_profile *profile, 3298 u64 *p_single_size, u64 *p_double_size, 3299 u64 *p_linear_size) 3300 { 3301 struct mlxsw_driver *driver = mlxsw_core->driver; 3302 3303 if (!driver->kvd_sizes_get) 3304 return -EINVAL; 3305 3306 return driver->kvd_sizes_get(mlxsw_core, profile, 3307 p_single_size, p_double_size, 3308 p_linear_size); 3309 } 3310 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get); 3311 3312 int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox, 3313 struct mlxsw_res *res) 3314 { 3315 int index, i; 3316 u64 data; 3317 u16 id; 3318 int err; 3319 3320 mlxsw_cmd_mbox_zero(mbox); 3321 3322 for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES; 3323 index++) { 3324 err = mlxsw_cmd_query_resources(mlxsw_core, mbox, index); 3325 if (err) 3326 return err; 3327 3328 for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) { 3329 id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i); 3330 data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i); 3331 3332 if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID) 3333 return 0; 3334 3335 mlxsw_res_parse(res, id, data); 3336 } 3337 } 3338 3339 /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get 3340 * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW. 3341 */ 3342 return -EIO; 3343 } 3344 EXPORT_SYMBOL(mlxsw_core_resources_query); 3345 3346 u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core) 3347 { 3348 return mlxsw_core->bus->read_frc_h(mlxsw_core->bus_priv); 3349 } 3350 EXPORT_SYMBOL(mlxsw_core_read_frc_h); 3351 3352 u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core) 3353 { 3354 return mlxsw_core->bus->read_frc_l(mlxsw_core->bus_priv); 3355 } 3356 EXPORT_SYMBOL(mlxsw_core_read_frc_l); 3357 3358 u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core) 3359 { 3360 return mlxsw_core->bus->read_utc_sec(mlxsw_core->bus_priv); 3361 } 3362 EXPORT_SYMBOL(mlxsw_core_read_utc_sec); 3363 3364 u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core) 3365 { 3366 return mlxsw_core->bus->read_utc_nsec(mlxsw_core->bus_priv); 3367 } 3368 EXPORT_SYMBOL(mlxsw_core_read_utc_nsec); 3369 3370 bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core) 3371 { 3372 return mlxsw_core->driver->sdq_supports_cqe_v2; 3373 } 3374 EXPORT_SYMBOL(mlxsw_core_sdq_supports_cqe_v2); 3375 3376 static int __init mlxsw_core_module_init(void) 3377 { 3378 int err; 3379 3380 err = mlxsw_linecard_driver_register(); 3381 if (err) 3382 return err; 3383 3384 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0); 3385 if (!mlxsw_wq) { 3386 err = -ENOMEM; 3387 goto err_alloc_workqueue; 3388 } 3389 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0, 3390 mlxsw_core_driver_name); 3391 if (!mlxsw_owq) { 3392 err = -ENOMEM; 3393 goto err_alloc_ordered_workqueue; 3394 } 3395 return 0; 3396 3397 err_alloc_ordered_workqueue: 3398 destroy_workqueue(mlxsw_wq); 3399 err_alloc_workqueue: 3400 mlxsw_linecard_driver_unregister(); 3401 return err; 3402 } 3403 3404 static void __exit mlxsw_core_module_exit(void) 3405 { 3406 destroy_workqueue(mlxsw_owq); 3407 destroy_workqueue(mlxsw_wq); 3408 mlxsw_linecard_driver_unregister(); 3409 } 3410 3411 module_init(mlxsw_core_module_init); 3412 module_exit(mlxsw_core_module_exit); 3413 3414 MODULE_LICENSE("Dual BSD/GPL"); 3415 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 3416 MODULE_DESCRIPTION("Mellanox switch device core driver"); 3417