1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/core.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/device.h> 40 #include <linux/export.h> 41 #include <linux/err.h> 42 #include <linux/if_link.h> 43 #include <linux/netdevice.h> 44 #include <linux/completion.h> 45 #include <linux/skbuff.h> 46 #include <linux/etherdevice.h> 47 #include <linux/types.h> 48 #include <linux/string.h> 49 #include <linux/gfp.h> 50 #include <linux/random.h> 51 #include <linux/jiffies.h> 52 #include <linux/mutex.h> 53 #include <linux/rcupdate.h> 54 #include <linux/slab.h> 55 #include <linux/workqueue.h> 56 #include <asm/byteorder.h> 57 #include <net/devlink.h> 58 #include <trace/events/devlink.h> 59 60 #include "core.h" 61 #include "item.h" 62 #include "cmd.h" 63 #include "port.h" 64 #include "trap.h" 65 #include "emad.h" 66 #include "reg.h" 67 #include "resources.h" 68 69 static LIST_HEAD(mlxsw_core_driver_list); 70 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); 71 72 static const char mlxsw_core_driver_name[] = "mlxsw_core"; 73 74 static struct workqueue_struct *mlxsw_wq; 75 static struct workqueue_struct *mlxsw_owq; 76 77 struct mlxsw_core_port { 78 struct devlink_port devlink_port; 79 void *port_driver_priv; 80 u8 local_port; 81 }; 82 83 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) 84 { 85 return mlxsw_core_port->port_driver_priv; 86 } 87 EXPORT_SYMBOL(mlxsw_core_port_driver_priv); 88 89 static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port) 90 { 91 return mlxsw_core_port->port_driver_priv != NULL; 92 } 93 94 struct mlxsw_core { 95 struct mlxsw_driver *driver; 96 const struct mlxsw_bus *bus; 97 void *bus_priv; 98 const struct mlxsw_bus_info *bus_info; 99 struct list_head rx_listener_list; 100 struct list_head event_listener_list; 101 struct { 102 atomic64_t tid; 103 struct list_head trans_list; 104 spinlock_t trans_list_lock; /* protects trans_list writes */ 105 bool use_emad; 106 } emad; 107 struct { 108 u8 *mapping; /* lag_id+port_index to local_port mapping */ 109 } lag; 110 struct mlxsw_res res; 111 struct mlxsw_hwmon *hwmon; 112 struct mlxsw_thermal *thermal; 113 struct mlxsw_core_port *ports; 114 unsigned int max_ports; 115 unsigned long driver_priv[0]; 116 /* driver_priv has to be always the last item */ 117 }; 118 119 #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40 120 121 static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core) 122 { 123 /* Switch ports are numbered from 1 to queried value */ 124 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT)) 125 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core, 126 MAX_SYSTEM_PORT) + 1; 127 else 128 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1; 129 130 mlxsw_core->ports = kcalloc(mlxsw_core->max_ports, 131 sizeof(struct mlxsw_core_port), GFP_KERNEL); 132 if (!mlxsw_core->ports) 133 return -ENOMEM; 134 135 return 0; 136 } 137 138 static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core) 139 { 140 kfree(mlxsw_core->ports); 141 } 142 143 unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core) 144 { 145 return mlxsw_core->max_ports; 146 } 147 EXPORT_SYMBOL(mlxsw_core_max_ports); 148 149 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core) 150 { 151 return mlxsw_core->driver_priv; 152 } 153 EXPORT_SYMBOL(mlxsw_core_driver_priv); 154 155 struct mlxsw_rx_listener_item { 156 struct list_head list; 157 struct mlxsw_rx_listener rxl; 158 void *priv; 159 }; 160 161 struct mlxsw_event_listener_item { 162 struct list_head list; 163 struct mlxsw_event_listener el; 164 void *priv; 165 }; 166 167 /****************** 168 * EMAD processing 169 ******************/ 170 171 /* emad_eth_hdr_dmac 172 * Destination MAC in EMAD's Ethernet header. 173 * Must be set to 01:02:c9:00:00:01 174 */ 175 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6); 176 177 /* emad_eth_hdr_smac 178 * Source MAC in EMAD's Ethernet header. 179 * Must be set to 00:02:c9:01:02:03 180 */ 181 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6); 182 183 /* emad_eth_hdr_ethertype 184 * Ethertype in EMAD's Ethernet header. 185 * Must be set to 0x8932 186 */ 187 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16); 188 189 /* emad_eth_hdr_mlx_proto 190 * Mellanox protocol. 191 * Must be set to 0x0. 192 */ 193 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8); 194 195 /* emad_eth_hdr_ver 196 * Mellanox protocol version. 197 * Must be set to 0x0. 198 */ 199 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4); 200 201 /* emad_op_tlv_type 202 * Type of the TLV. 203 * Must be set to 0x1 (operation TLV). 204 */ 205 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5); 206 207 /* emad_op_tlv_len 208 * Length of the operation TLV in u32. 209 * Must be set to 0x4. 210 */ 211 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11); 212 213 /* emad_op_tlv_dr 214 * Direct route bit. Setting to 1 indicates the EMAD is a direct route 215 * EMAD. DR TLV must follow. 216 * 217 * Note: Currently not supported and must not be set. 218 */ 219 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1); 220 221 /* emad_op_tlv_status 222 * Returned status in case of EMAD response. Must be set to 0 in case 223 * of EMAD request. 224 * 0x0 - success 225 * 0x1 - device is busy. Requester should retry 226 * 0x2 - Mellanox protocol version not supported 227 * 0x3 - unknown TLV 228 * 0x4 - register not supported 229 * 0x5 - operation class not supported 230 * 0x6 - EMAD method not supported 231 * 0x7 - bad parameter (e.g. port out of range) 232 * 0x8 - resource not available 233 * 0x9 - message receipt acknowledgment. Requester should retry 234 * 0x70 - internal error 235 */ 236 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7); 237 238 /* emad_op_tlv_register_id 239 * Register ID of register within register TLV. 240 */ 241 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16); 242 243 /* emad_op_tlv_r 244 * Response bit. Setting to 1 indicates Response, otherwise request. 245 */ 246 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1); 247 248 /* emad_op_tlv_method 249 * EMAD method type. 250 * 0x1 - query 251 * 0x2 - write 252 * 0x3 - send (currently not supported) 253 * 0x4 - event 254 */ 255 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7); 256 257 /* emad_op_tlv_class 258 * EMAD operation class. Must be set to 0x1 (REG_ACCESS). 259 */ 260 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8); 261 262 /* emad_op_tlv_tid 263 * EMAD transaction ID. Used for pairing request and response EMADs. 264 */ 265 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64); 266 267 /* emad_reg_tlv_type 268 * Type of the TLV. 269 * Must be set to 0x3 (register TLV). 270 */ 271 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5); 272 273 /* emad_reg_tlv_len 274 * Length of the operation TLV in u32. 275 */ 276 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11); 277 278 /* emad_end_tlv_type 279 * Type of the TLV. 280 * Must be set to 0x0 (end TLV). 281 */ 282 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5); 283 284 /* emad_end_tlv_len 285 * Length of the end TLV in u32. 286 * Must be set to 1. 287 */ 288 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11); 289 290 enum mlxsw_core_reg_access_type { 291 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 292 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 293 }; 294 295 static inline const char * 296 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type) 297 { 298 switch (type) { 299 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY: 300 return "query"; 301 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE: 302 return "write"; 303 } 304 BUG(); 305 } 306 307 static void mlxsw_emad_pack_end_tlv(char *end_tlv) 308 { 309 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END); 310 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN); 311 } 312 313 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, 314 const struct mlxsw_reg_info *reg, 315 char *payload) 316 { 317 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG); 318 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1); 319 memcpy(reg_tlv + sizeof(u32), payload, reg->len); 320 } 321 322 static void mlxsw_emad_pack_op_tlv(char *op_tlv, 323 const struct mlxsw_reg_info *reg, 324 enum mlxsw_core_reg_access_type type, 325 u64 tid) 326 { 327 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP); 328 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN); 329 mlxsw_emad_op_tlv_dr_set(op_tlv, 0); 330 mlxsw_emad_op_tlv_status_set(op_tlv, 0); 331 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id); 332 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST); 333 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY) 334 mlxsw_emad_op_tlv_method_set(op_tlv, 335 MLXSW_EMAD_OP_TLV_METHOD_QUERY); 336 else 337 mlxsw_emad_op_tlv_method_set(op_tlv, 338 MLXSW_EMAD_OP_TLV_METHOD_WRITE); 339 mlxsw_emad_op_tlv_class_set(op_tlv, 340 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); 341 mlxsw_emad_op_tlv_tid_set(op_tlv, tid); 342 } 343 344 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) 345 { 346 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN); 347 348 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC); 349 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC); 350 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE); 351 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO); 352 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION); 353 354 skb_reset_mac_header(skb); 355 356 return 0; 357 } 358 359 static void mlxsw_emad_construct(struct sk_buff *skb, 360 const struct mlxsw_reg_info *reg, 361 char *payload, 362 enum mlxsw_core_reg_access_type type, 363 u64 tid) 364 { 365 char *buf; 366 367 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32)); 368 mlxsw_emad_pack_end_tlv(buf); 369 370 buf = skb_push(skb, reg->len + sizeof(u32)); 371 mlxsw_emad_pack_reg_tlv(buf, reg, payload); 372 373 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); 374 mlxsw_emad_pack_op_tlv(buf, reg, type, tid); 375 376 mlxsw_emad_construct_eth_hdr(skb); 377 } 378 379 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb) 380 { 381 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN)); 382 } 383 384 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb) 385 { 386 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN + 387 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32))); 388 } 389 390 static char *mlxsw_emad_reg_payload(const char *op_tlv) 391 { 392 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32))); 393 } 394 395 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb) 396 { 397 char *op_tlv; 398 399 op_tlv = mlxsw_emad_op_tlv(skb); 400 return mlxsw_emad_op_tlv_tid_get(op_tlv); 401 } 402 403 static bool mlxsw_emad_is_resp(const struct sk_buff *skb) 404 { 405 char *op_tlv; 406 407 op_tlv = mlxsw_emad_op_tlv(skb); 408 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE); 409 } 410 411 static int mlxsw_emad_process_status(char *op_tlv, 412 enum mlxsw_emad_op_tlv_status *p_status) 413 { 414 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv); 415 416 switch (*p_status) { 417 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: 418 return 0; 419 case MLXSW_EMAD_OP_TLV_STATUS_BUSY: 420 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: 421 return -EAGAIN; 422 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: 423 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: 424 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED: 425 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED: 426 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED: 427 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER: 428 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: 429 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: 430 default: 431 return -EIO; 432 } 433 } 434 435 static int 436 mlxsw_emad_process_status_skb(struct sk_buff *skb, 437 enum mlxsw_emad_op_tlv_status *p_status) 438 { 439 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status); 440 } 441 442 struct mlxsw_reg_trans { 443 struct list_head list; 444 struct list_head bulk_list; 445 struct mlxsw_core *core; 446 struct sk_buff *tx_skb; 447 struct mlxsw_tx_info tx_info; 448 struct delayed_work timeout_dw; 449 unsigned int retries; 450 u64 tid; 451 struct completion completion; 452 atomic_t active; 453 mlxsw_reg_trans_cb_t *cb; 454 unsigned long cb_priv; 455 const struct mlxsw_reg_info *reg; 456 enum mlxsw_core_reg_access_type type; 457 int err; 458 enum mlxsw_emad_op_tlv_status emad_status; 459 struct rcu_head rcu; 460 }; 461 462 #define MLXSW_EMAD_TIMEOUT_MS 200 463 464 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) 465 { 466 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); 467 468 mlxsw_core_schedule_dw(&trans->timeout_dw, timeout); 469 } 470 471 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 472 struct mlxsw_reg_trans *trans) 473 { 474 struct sk_buff *skb; 475 int err; 476 477 skb = skb_copy(trans->tx_skb, GFP_KERNEL); 478 if (!skb) 479 return -ENOMEM; 480 481 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0, 482 skb->data + mlxsw_core->driver->txhdr_len, 483 skb->len - mlxsw_core->driver->txhdr_len); 484 485 atomic_set(&trans->active, 1); 486 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info); 487 if (err) { 488 dev_kfree_skb(skb); 489 return err; 490 } 491 mlxsw_emad_trans_timeout_schedule(trans); 492 return 0; 493 } 494 495 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err) 496 { 497 struct mlxsw_core *mlxsw_core = trans->core; 498 499 dev_kfree_skb(trans->tx_skb); 500 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 501 list_del_rcu(&trans->list); 502 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 503 trans->err = err; 504 complete(&trans->completion); 505 } 506 507 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core, 508 struct mlxsw_reg_trans *trans) 509 { 510 int err; 511 512 if (trans->retries < MLXSW_EMAD_MAX_RETRY) { 513 trans->retries++; 514 err = mlxsw_emad_transmit(trans->core, trans); 515 if (err == 0) 516 return; 517 } else { 518 err = -EIO; 519 } 520 mlxsw_emad_trans_finish(trans, err); 521 } 522 523 static void mlxsw_emad_trans_timeout_work(struct work_struct *work) 524 { 525 struct mlxsw_reg_trans *trans = container_of(work, 526 struct mlxsw_reg_trans, 527 timeout_dw.work); 528 529 if (!atomic_dec_and_test(&trans->active)) 530 return; 531 532 mlxsw_emad_transmit_retry(trans->core, trans); 533 } 534 535 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core, 536 struct mlxsw_reg_trans *trans, 537 struct sk_buff *skb) 538 { 539 int err; 540 541 if (!atomic_dec_and_test(&trans->active)) 542 return; 543 544 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status); 545 if (err == -EAGAIN) { 546 mlxsw_emad_transmit_retry(mlxsw_core, trans); 547 } else { 548 if (err == 0) { 549 char *op_tlv = mlxsw_emad_op_tlv(skb); 550 551 if (trans->cb) 552 trans->cb(mlxsw_core, 553 mlxsw_emad_reg_payload(op_tlv), 554 trans->reg->len, trans->cb_priv); 555 } 556 mlxsw_emad_trans_finish(trans, err); 557 } 558 } 559 560 /* called with rcu read lock held */ 561 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, 562 void *priv) 563 { 564 struct mlxsw_core *mlxsw_core = priv; 565 struct mlxsw_reg_trans *trans; 566 567 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0, 568 skb->data, skb->len); 569 570 if (!mlxsw_emad_is_resp(skb)) 571 goto free_skb; 572 573 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) { 574 if (mlxsw_emad_get_tid(skb) == trans->tid) { 575 mlxsw_emad_process_response(mlxsw_core, trans, skb); 576 break; 577 } 578 } 579 580 free_skb: 581 dev_kfree_skb(skb); 582 } 583 584 static const struct mlxsw_listener mlxsw_emad_rx_listener = 585 MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false, 586 EMAD, DISCARD); 587 588 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) 589 { 590 u64 tid; 591 int err; 592 593 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 594 return 0; 595 596 /* Set the upper 32 bits of the transaction ID field to a random 597 * number. This allows us to discard EMADs addressed to other 598 * devices. 599 */ 600 get_random_bytes(&tid, 4); 601 tid <<= 32; 602 atomic64_set(&mlxsw_core->emad.tid, tid); 603 604 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list); 605 spin_lock_init(&mlxsw_core->emad.trans_list_lock); 606 607 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener, 608 mlxsw_core); 609 if (err) 610 return err; 611 612 err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core); 613 if (err) 614 goto err_emad_trap_set; 615 mlxsw_core->emad.use_emad = true; 616 617 return 0; 618 619 err_emad_trap_set: 620 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 621 mlxsw_core); 622 return err; 623 } 624 625 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) 626 { 627 628 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 629 return; 630 631 mlxsw_core->emad.use_emad = false; 632 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 633 mlxsw_core); 634 } 635 636 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, 637 u16 reg_len) 638 { 639 struct sk_buff *skb; 640 u16 emad_len; 641 642 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN + 643 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) * 644 sizeof(u32) + mlxsw_core->driver->txhdr_len); 645 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN) 646 return NULL; 647 648 skb = netdev_alloc_skb(NULL, emad_len); 649 if (!skb) 650 return NULL; 651 memset(skb->data, 0, emad_len); 652 skb_reserve(skb, emad_len); 653 654 return skb; 655 } 656 657 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, 658 const struct mlxsw_reg_info *reg, 659 char *payload, 660 enum mlxsw_core_reg_access_type type, 661 struct mlxsw_reg_trans *trans, 662 struct list_head *bulk_list, 663 mlxsw_reg_trans_cb_t *cb, 664 unsigned long cb_priv, u64 tid) 665 { 666 struct sk_buff *skb; 667 int err; 668 669 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", 670 trans->tid, reg->id, mlxsw_reg_id_str(reg->id), 671 mlxsw_core_reg_access_type_str(type)); 672 673 skb = mlxsw_emad_alloc(mlxsw_core, reg->len); 674 if (!skb) 675 return -ENOMEM; 676 677 list_add_tail(&trans->bulk_list, bulk_list); 678 trans->core = mlxsw_core; 679 trans->tx_skb = skb; 680 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT; 681 trans->tx_info.is_emad = true; 682 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work); 683 trans->tid = tid; 684 init_completion(&trans->completion); 685 trans->cb = cb; 686 trans->cb_priv = cb_priv; 687 trans->reg = reg; 688 trans->type = type; 689 690 mlxsw_emad_construct(skb, reg, payload, type, trans->tid); 691 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info); 692 693 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 694 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list); 695 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 696 err = mlxsw_emad_transmit(mlxsw_core, trans); 697 if (err) 698 goto err_out; 699 return 0; 700 701 err_out: 702 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 703 list_del_rcu(&trans->list); 704 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 705 list_del(&trans->bulk_list); 706 dev_kfree_skb(trans->tx_skb); 707 return err; 708 } 709 710 /***************** 711 * Core functions 712 *****************/ 713 714 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) 715 { 716 spin_lock(&mlxsw_core_driver_list_lock); 717 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list); 718 spin_unlock(&mlxsw_core_driver_list_lock); 719 return 0; 720 } 721 EXPORT_SYMBOL(mlxsw_core_driver_register); 722 723 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver) 724 { 725 spin_lock(&mlxsw_core_driver_list_lock); 726 list_del(&mlxsw_driver->list); 727 spin_unlock(&mlxsw_core_driver_list_lock); 728 } 729 EXPORT_SYMBOL(mlxsw_core_driver_unregister); 730 731 static struct mlxsw_driver *__driver_find(const char *kind) 732 { 733 struct mlxsw_driver *mlxsw_driver; 734 735 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) { 736 if (strcmp(mlxsw_driver->kind, kind) == 0) 737 return mlxsw_driver; 738 } 739 return NULL; 740 } 741 742 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind) 743 { 744 struct mlxsw_driver *mlxsw_driver; 745 746 spin_lock(&mlxsw_core_driver_list_lock); 747 mlxsw_driver = __driver_find(kind); 748 spin_unlock(&mlxsw_core_driver_list_lock); 749 return mlxsw_driver; 750 } 751 752 static void mlxsw_core_driver_put(const char *kind) 753 { 754 struct mlxsw_driver *mlxsw_driver; 755 756 spin_lock(&mlxsw_core_driver_list_lock); 757 mlxsw_driver = __driver_find(kind); 758 spin_unlock(&mlxsw_core_driver_list_lock); 759 } 760 761 static int mlxsw_devlink_port_split(struct devlink *devlink, 762 unsigned int port_index, 763 unsigned int count) 764 { 765 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 766 767 if (port_index >= mlxsw_core->max_ports) 768 return -EINVAL; 769 if (!mlxsw_core->driver->port_split) 770 return -EOPNOTSUPP; 771 return mlxsw_core->driver->port_split(mlxsw_core, port_index, count); 772 } 773 774 static int mlxsw_devlink_port_unsplit(struct devlink *devlink, 775 unsigned int port_index) 776 { 777 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 778 779 if (port_index >= mlxsw_core->max_ports) 780 return -EINVAL; 781 if (!mlxsw_core->driver->port_unsplit) 782 return -EOPNOTSUPP; 783 return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index); 784 } 785 786 static int 787 mlxsw_devlink_sb_pool_get(struct devlink *devlink, 788 unsigned int sb_index, u16 pool_index, 789 struct devlink_sb_pool_info *pool_info) 790 { 791 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 792 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 793 794 if (!mlxsw_driver->sb_pool_get) 795 return -EOPNOTSUPP; 796 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index, 797 pool_index, pool_info); 798 } 799 800 static int 801 mlxsw_devlink_sb_pool_set(struct devlink *devlink, 802 unsigned int sb_index, u16 pool_index, u32 size, 803 enum devlink_sb_threshold_type threshold_type) 804 { 805 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 806 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 807 808 if (!mlxsw_driver->sb_pool_set) 809 return -EOPNOTSUPP; 810 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index, 811 pool_index, size, threshold_type); 812 } 813 814 static void *__dl_port(struct devlink_port *devlink_port) 815 { 816 return container_of(devlink_port, struct mlxsw_core_port, devlink_port); 817 } 818 819 static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port, 820 enum devlink_port_type port_type) 821 { 822 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 823 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 824 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 825 826 if (!mlxsw_driver->port_type_set) 827 return -EOPNOTSUPP; 828 829 return mlxsw_driver->port_type_set(mlxsw_core, 830 mlxsw_core_port->local_port, 831 port_type); 832 } 833 834 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port, 835 unsigned int sb_index, u16 pool_index, 836 u32 *p_threshold) 837 { 838 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 839 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 840 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 841 842 if (!mlxsw_driver->sb_port_pool_get || 843 !mlxsw_core_port_check(mlxsw_core_port)) 844 return -EOPNOTSUPP; 845 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index, 846 pool_index, p_threshold); 847 } 848 849 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port, 850 unsigned int sb_index, u16 pool_index, 851 u32 threshold) 852 { 853 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 854 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 855 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 856 857 if (!mlxsw_driver->sb_port_pool_set || 858 !mlxsw_core_port_check(mlxsw_core_port)) 859 return -EOPNOTSUPP; 860 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index, 861 pool_index, threshold); 862 } 863 864 static int 865 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port, 866 unsigned int sb_index, u16 tc_index, 867 enum devlink_sb_pool_type pool_type, 868 u16 *p_pool_index, u32 *p_threshold) 869 { 870 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 871 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 872 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 873 874 if (!mlxsw_driver->sb_tc_pool_bind_get || 875 !mlxsw_core_port_check(mlxsw_core_port)) 876 return -EOPNOTSUPP; 877 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index, 878 tc_index, pool_type, 879 p_pool_index, p_threshold); 880 } 881 882 static int 883 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, 884 unsigned int sb_index, u16 tc_index, 885 enum devlink_sb_pool_type pool_type, 886 u16 pool_index, u32 threshold) 887 { 888 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 889 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 890 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 891 892 if (!mlxsw_driver->sb_tc_pool_bind_set || 893 !mlxsw_core_port_check(mlxsw_core_port)) 894 return -EOPNOTSUPP; 895 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index, 896 tc_index, pool_type, 897 pool_index, threshold); 898 } 899 900 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink, 901 unsigned int sb_index) 902 { 903 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 904 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 905 906 if (!mlxsw_driver->sb_occ_snapshot) 907 return -EOPNOTSUPP; 908 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index); 909 } 910 911 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink, 912 unsigned int sb_index) 913 { 914 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 915 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 916 917 if (!mlxsw_driver->sb_occ_max_clear) 918 return -EOPNOTSUPP; 919 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index); 920 } 921 922 static int 923 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port, 924 unsigned int sb_index, u16 pool_index, 925 u32 *p_cur, u32 *p_max) 926 { 927 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 928 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 929 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 930 931 if (!mlxsw_driver->sb_occ_port_pool_get || 932 !mlxsw_core_port_check(mlxsw_core_port)) 933 return -EOPNOTSUPP; 934 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index, 935 pool_index, p_cur, p_max); 936 } 937 938 static int 939 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, 940 unsigned int sb_index, u16 tc_index, 941 enum devlink_sb_pool_type pool_type, 942 u32 *p_cur, u32 *p_max) 943 { 944 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 945 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 946 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 947 948 if (!mlxsw_driver->sb_occ_tc_port_bind_get || 949 !mlxsw_core_port_check(mlxsw_core_port)) 950 return -EOPNOTSUPP; 951 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port, 952 sb_index, tc_index, 953 pool_type, p_cur, p_max); 954 } 955 956 static const struct devlink_ops mlxsw_devlink_ops = { 957 .port_type_set = mlxsw_devlink_port_type_set, 958 .port_split = mlxsw_devlink_port_split, 959 .port_unsplit = mlxsw_devlink_port_unsplit, 960 .sb_pool_get = mlxsw_devlink_sb_pool_get, 961 .sb_pool_set = mlxsw_devlink_sb_pool_set, 962 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get, 963 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set, 964 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get, 965 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set, 966 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot, 967 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear, 968 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get, 969 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get, 970 }; 971 972 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, 973 const struct mlxsw_bus *mlxsw_bus, 974 void *bus_priv) 975 { 976 const char *device_kind = mlxsw_bus_info->device_kind; 977 struct mlxsw_core *mlxsw_core; 978 struct mlxsw_driver *mlxsw_driver; 979 struct devlink *devlink; 980 size_t alloc_size; 981 int err; 982 983 mlxsw_driver = mlxsw_core_driver_get(device_kind); 984 if (!mlxsw_driver) 985 return -EINVAL; 986 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; 987 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size); 988 if (!devlink) { 989 err = -ENOMEM; 990 goto err_devlink_alloc; 991 } 992 993 mlxsw_core = devlink_priv(devlink); 994 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list); 995 INIT_LIST_HEAD(&mlxsw_core->event_listener_list); 996 mlxsw_core->driver = mlxsw_driver; 997 mlxsw_core->bus = mlxsw_bus; 998 mlxsw_core->bus_priv = bus_priv; 999 mlxsw_core->bus_info = mlxsw_bus_info; 1000 1001 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, 1002 &mlxsw_core->res); 1003 if (err) 1004 goto err_bus_init; 1005 1006 err = mlxsw_ports_init(mlxsw_core); 1007 if (err) 1008 goto err_ports_init; 1009 1010 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) && 1011 MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) { 1012 alloc_size = sizeof(u8) * 1013 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) * 1014 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); 1015 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL); 1016 if (!mlxsw_core->lag.mapping) { 1017 err = -ENOMEM; 1018 goto err_alloc_lag_mapping; 1019 } 1020 } 1021 1022 err = mlxsw_emad_init(mlxsw_core); 1023 if (err) 1024 goto err_emad_init; 1025 1026 err = devlink_register(devlink, mlxsw_bus_info->dev); 1027 if (err) 1028 goto err_devlink_register; 1029 1030 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); 1031 if (err) 1032 goto err_hwmon_init; 1033 1034 err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info, 1035 &mlxsw_core->thermal); 1036 if (err) 1037 goto err_thermal_init; 1038 1039 if (mlxsw_driver->init) { 1040 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info); 1041 if (err) 1042 goto err_driver_init; 1043 } 1044 1045 return 0; 1046 1047 err_driver_init: 1048 mlxsw_thermal_fini(mlxsw_core->thermal); 1049 err_thermal_init: 1050 err_hwmon_init: 1051 devlink_unregister(devlink); 1052 err_devlink_register: 1053 mlxsw_emad_fini(mlxsw_core); 1054 err_emad_init: 1055 kfree(mlxsw_core->lag.mapping); 1056 err_alloc_lag_mapping: 1057 mlxsw_ports_fini(mlxsw_core); 1058 err_ports_init: 1059 mlxsw_bus->fini(bus_priv); 1060 err_bus_init: 1061 devlink_free(devlink); 1062 err_devlink_alloc: 1063 mlxsw_core_driver_put(device_kind); 1064 return err; 1065 } 1066 EXPORT_SYMBOL(mlxsw_core_bus_device_register); 1067 1068 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) 1069 { 1070 const char *device_kind = mlxsw_core->bus_info->device_kind; 1071 struct devlink *devlink = priv_to_devlink(mlxsw_core); 1072 1073 if (mlxsw_core->driver->fini) 1074 mlxsw_core->driver->fini(mlxsw_core); 1075 mlxsw_thermal_fini(mlxsw_core->thermal); 1076 devlink_unregister(devlink); 1077 mlxsw_emad_fini(mlxsw_core); 1078 kfree(mlxsw_core->lag.mapping); 1079 mlxsw_ports_fini(mlxsw_core); 1080 mlxsw_core->bus->fini(mlxsw_core->bus_priv); 1081 devlink_free(devlink); 1082 mlxsw_core_driver_put(device_kind); 1083 } 1084 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); 1085 1086 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, 1087 const struct mlxsw_tx_info *tx_info) 1088 { 1089 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv, 1090 tx_info); 1091 } 1092 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); 1093 1094 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 1095 const struct mlxsw_tx_info *tx_info) 1096 { 1097 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, 1098 tx_info); 1099 } 1100 EXPORT_SYMBOL(mlxsw_core_skb_transmit); 1101 1102 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a, 1103 const struct mlxsw_rx_listener *rxl_b) 1104 { 1105 return (rxl_a->func == rxl_b->func && 1106 rxl_a->local_port == rxl_b->local_port && 1107 rxl_a->trap_id == rxl_b->trap_id); 1108 } 1109 1110 static struct mlxsw_rx_listener_item * 1111 __find_rx_listener_item(struct mlxsw_core *mlxsw_core, 1112 const struct mlxsw_rx_listener *rxl, 1113 void *priv) 1114 { 1115 struct mlxsw_rx_listener_item *rxl_item; 1116 1117 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) { 1118 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) && 1119 rxl_item->priv == priv) 1120 return rxl_item; 1121 } 1122 return NULL; 1123 } 1124 1125 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core, 1126 const struct mlxsw_rx_listener *rxl, 1127 void *priv) 1128 { 1129 struct mlxsw_rx_listener_item *rxl_item; 1130 1131 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); 1132 if (rxl_item) 1133 return -EEXIST; 1134 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL); 1135 if (!rxl_item) 1136 return -ENOMEM; 1137 rxl_item->rxl = *rxl; 1138 rxl_item->priv = priv; 1139 1140 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list); 1141 return 0; 1142 } 1143 EXPORT_SYMBOL(mlxsw_core_rx_listener_register); 1144 1145 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core, 1146 const struct mlxsw_rx_listener *rxl, 1147 void *priv) 1148 { 1149 struct mlxsw_rx_listener_item *rxl_item; 1150 1151 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); 1152 if (!rxl_item) 1153 return; 1154 list_del_rcu(&rxl_item->list); 1155 synchronize_rcu(); 1156 kfree(rxl_item); 1157 } 1158 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister); 1159 1160 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port, 1161 void *priv) 1162 { 1163 struct mlxsw_event_listener_item *event_listener_item = priv; 1164 struct mlxsw_reg_info reg; 1165 char *payload; 1166 char *op_tlv = mlxsw_emad_op_tlv(skb); 1167 char *reg_tlv = mlxsw_emad_reg_tlv(skb); 1168 1169 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv); 1170 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32); 1171 payload = mlxsw_emad_reg_payload(op_tlv); 1172 event_listener_item->el.func(®, payload, event_listener_item->priv); 1173 dev_kfree_skb(skb); 1174 } 1175 1176 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a, 1177 const struct mlxsw_event_listener *el_b) 1178 { 1179 return (el_a->func == el_b->func && 1180 el_a->trap_id == el_b->trap_id); 1181 } 1182 1183 static struct mlxsw_event_listener_item * 1184 __find_event_listener_item(struct mlxsw_core *mlxsw_core, 1185 const struct mlxsw_event_listener *el, 1186 void *priv) 1187 { 1188 struct mlxsw_event_listener_item *el_item; 1189 1190 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) { 1191 if (__is_event_listener_equal(&el_item->el, el) && 1192 el_item->priv == priv) 1193 return el_item; 1194 } 1195 return NULL; 1196 } 1197 1198 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core, 1199 const struct mlxsw_event_listener *el, 1200 void *priv) 1201 { 1202 int err; 1203 struct mlxsw_event_listener_item *el_item; 1204 const struct mlxsw_rx_listener rxl = { 1205 .func = mlxsw_core_event_listener_func, 1206 .local_port = MLXSW_PORT_DONT_CARE, 1207 .trap_id = el->trap_id, 1208 }; 1209 1210 el_item = __find_event_listener_item(mlxsw_core, el, priv); 1211 if (el_item) 1212 return -EEXIST; 1213 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL); 1214 if (!el_item) 1215 return -ENOMEM; 1216 el_item->el = *el; 1217 el_item->priv = priv; 1218 1219 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item); 1220 if (err) 1221 goto err_rx_listener_register; 1222 1223 /* No reason to save item if we did not manage to register an RX 1224 * listener for it. 1225 */ 1226 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list); 1227 1228 return 0; 1229 1230 err_rx_listener_register: 1231 kfree(el_item); 1232 return err; 1233 } 1234 EXPORT_SYMBOL(mlxsw_core_event_listener_register); 1235 1236 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, 1237 const struct mlxsw_event_listener *el, 1238 void *priv) 1239 { 1240 struct mlxsw_event_listener_item *el_item; 1241 const struct mlxsw_rx_listener rxl = { 1242 .func = mlxsw_core_event_listener_func, 1243 .local_port = MLXSW_PORT_DONT_CARE, 1244 .trap_id = el->trap_id, 1245 }; 1246 1247 el_item = __find_event_listener_item(mlxsw_core, el, priv); 1248 if (!el_item) 1249 return; 1250 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item); 1251 list_del(&el_item->list); 1252 kfree(el_item); 1253 } 1254 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); 1255 1256 static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core, 1257 const struct mlxsw_listener *listener, 1258 void *priv) 1259 { 1260 if (listener->is_event) 1261 return mlxsw_core_event_listener_register(mlxsw_core, 1262 &listener->u.event_listener, 1263 priv); 1264 else 1265 return mlxsw_core_rx_listener_register(mlxsw_core, 1266 &listener->u.rx_listener, 1267 priv); 1268 } 1269 1270 static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core, 1271 const struct mlxsw_listener *listener, 1272 void *priv) 1273 { 1274 if (listener->is_event) 1275 mlxsw_core_event_listener_unregister(mlxsw_core, 1276 &listener->u.event_listener, 1277 priv); 1278 else 1279 mlxsw_core_rx_listener_unregister(mlxsw_core, 1280 &listener->u.rx_listener, 1281 priv); 1282 } 1283 1284 int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core, 1285 const struct mlxsw_listener *listener, void *priv) 1286 { 1287 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1288 int err; 1289 1290 err = mlxsw_core_listener_register(mlxsw_core, listener, priv); 1291 if (err) 1292 return err; 1293 1294 mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id, 1295 listener->trap_group, listener->is_ctrl); 1296 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 1297 if (err) 1298 goto err_trap_set; 1299 1300 return 0; 1301 1302 err_trap_set: 1303 mlxsw_core_listener_unregister(mlxsw_core, listener, priv); 1304 return err; 1305 } 1306 EXPORT_SYMBOL(mlxsw_core_trap_register); 1307 1308 void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core, 1309 const struct mlxsw_listener *listener, 1310 void *priv) 1311 { 1312 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1313 1314 if (!listener->is_event) { 1315 mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action, 1316 listener->trap_id, listener->trap_group, 1317 listener->is_ctrl); 1318 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 1319 } 1320 1321 mlxsw_core_listener_unregister(mlxsw_core, listener, priv); 1322 } 1323 EXPORT_SYMBOL(mlxsw_core_trap_unregister); 1324 1325 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core) 1326 { 1327 return atomic64_inc_return(&mlxsw_core->emad.tid); 1328 } 1329 1330 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, 1331 const struct mlxsw_reg_info *reg, 1332 char *payload, 1333 enum mlxsw_core_reg_access_type type, 1334 struct list_head *bulk_list, 1335 mlxsw_reg_trans_cb_t *cb, 1336 unsigned long cb_priv) 1337 { 1338 u64 tid = mlxsw_core_tid_get(mlxsw_core); 1339 struct mlxsw_reg_trans *trans; 1340 int err; 1341 1342 trans = kzalloc(sizeof(*trans), GFP_KERNEL); 1343 if (!trans) 1344 return -ENOMEM; 1345 1346 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, 1347 bulk_list, cb, cb_priv, tid); 1348 if (err) { 1349 kfree(trans); 1350 return err; 1351 } 1352 return 0; 1353 } 1354 1355 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core, 1356 const struct mlxsw_reg_info *reg, char *payload, 1357 struct list_head *bulk_list, 1358 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 1359 { 1360 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 1361 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 1362 bulk_list, cb, cb_priv); 1363 } 1364 EXPORT_SYMBOL(mlxsw_reg_trans_query); 1365 1366 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core, 1367 const struct mlxsw_reg_info *reg, char *payload, 1368 struct list_head *bulk_list, 1369 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 1370 { 1371 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 1372 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 1373 bulk_list, cb, cb_priv); 1374 } 1375 EXPORT_SYMBOL(mlxsw_reg_trans_write); 1376 1377 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans) 1378 { 1379 struct mlxsw_core *mlxsw_core = trans->core; 1380 int err; 1381 1382 wait_for_completion(&trans->completion); 1383 cancel_delayed_work_sync(&trans->timeout_dw); 1384 err = trans->err; 1385 1386 if (trans->retries) 1387 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n", 1388 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid); 1389 if (err) 1390 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n", 1391 trans->tid, trans->reg->id, 1392 mlxsw_reg_id_str(trans->reg->id), 1393 mlxsw_core_reg_access_type_str(trans->type), 1394 trans->emad_status, 1395 mlxsw_emad_op_tlv_status_str(trans->emad_status)); 1396 1397 list_del(&trans->bulk_list); 1398 kfree_rcu(trans, rcu); 1399 return err; 1400 } 1401 1402 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list) 1403 { 1404 struct mlxsw_reg_trans *trans; 1405 struct mlxsw_reg_trans *tmp; 1406 int sum_err = 0; 1407 int err; 1408 1409 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) { 1410 err = mlxsw_reg_trans_wait(trans); 1411 if (err && sum_err == 0) 1412 sum_err = err; /* first error to be returned */ 1413 } 1414 return sum_err; 1415 } 1416 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait); 1417 1418 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, 1419 const struct mlxsw_reg_info *reg, 1420 char *payload, 1421 enum mlxsw_core_reg_access_type type) 1422 { 1423 enum mlxsw_emad_op_tlv_status status; 1424 int err, n_retry; 1425 char *in_mbox, *out_mbox, *tmp; 1426 1427 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n", 1428 reg->id, mlxsw_reg_id_str(reg->id), 1429 mlxsw_core_reg_access_type_str(type)); 1430 1431 in_mbox = mlxsw_cmd_mbox_alloc(); 1432 if (!in_mbox) 1433 return -ENOMEM; 1434 1435 out_mbox = mlxsw_cmd_mbox_alloc(); 1436 if (!out_mbox) { 1437 err = -ENOMEM; 1438 goto free_in_mbox; 1439 } 1440 1441 mlxsw_emad_pack_op_tlv(in_mbox, reg, type, 1442 mlxsw_core_tid_get(mlxsw_core)); 1443 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); 1444 mlxsw_emad_pack_reg_tlv(tmp, reg, payload); 1445 1446 n_retry = 0; 1447 retry: 1448 err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox); 1449 if (!err) { 1450 err = mlxsw_emad_process_status(out_mbox, &status); 1451 if (err) { 1452 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) 1453 goto retry; 1454 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n", 1455 status, mlxsw_emad_op_tlv_status_str(status)); 1456 } 1457 } 1458 1459 if (!err) 1460 memcpy(payload, mlxsw_emad_reg_payload(out_mbox), 1461 reg->len); 1462 1463 mlxsw_cmd_mbox_free(out_mbox); 1464 free_in_mbox: 1465 mlxsw_cmd_mbox_free(in_mbox); 1466 if (err) 1467 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n", 1468 reg->id, mlxsw_reg_id_str(reg->id), 1469 mlxsw_core_reg_access_type_str(type)); 1470 return err; 1471 } 1472 1473 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core, 1474 char *payload, size_t payload_len, 1475 unsigned long cb_priv) 1476 { 1477 char *orig_payload = (char *) cb_priv; 1478 1479 memcpy(orig_payload, payload, payload_len); 1480 } 1481 1482 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, 1483 const struct mlxsw_reg_info *reg, 1484 char *payload, 1485 enum mlxsw_core_reg_access_type type) 1486 { 1487 LIST_HEAD(bulk_list); 1488 int err; 1489 1490 /* During initialization EMAD interface is not available to us, 1491 * so we default to command interface. We switch to EMAD interface 1492 * after setting the appropriate traps. 1493 */ 1494 if (!mlxsw_core->emad.use_emad) 1495 return mlxsw_core_reg_access_cmd(mlxsw_core, reg, 1496 payload, type); 1497 1498 err = mlxsw_core_reg_access_emad(mlxsw_core, reg, 1499 payload, type, &bulk_list, 1500 mlxsw_core_reg_access_cb, 1501 (unsigned long) payload); 1502 if (err) 1503 return err; 1504 return mlxsw_reg_trans_bulk_wait(&bulk_list); 1505 } 1506 1507 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, 1508 const struct mlxsw_reg_info *reg, char *payload) 1509 { 1510 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 1511 MLXSW_CORE_REG_ACCESS_TYPE_QUERY); 1512 } 1513 EXPORT_SYMBOL(mlxsw_reg_query); 1514 1515 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, 1516 const struct mlxsw_reg_info *reg, char *payload) 1517 { 1518 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 1519 MLXSW_CORE_REG_ACCESS_TYPE_WRITE); 1520 } 1521 EXPORT_SYMBOL(mlxsw_reg_write); 1522 1523 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 1524 struct mlxsw_rx_info *rx_info) 1525 { 1526 struct mlxsw_rx_listener_item *rxl_item; 1527 const struct mlxsw_rx_listener *rxl; 1528 u8 local_port; 1529 bool found = false; 1530 1531 if (rx_info->is_lag) { 1532 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n", 1533 __func__, rx_info->u.lag_id, 1534 rx_info->trap_id); 1535 /* Upper layer does not care if the skb came from LAG or not, 1536 * so just get the local_port for the lag port and push it up. 1537 */ 1538 local_port = mlxsw_core_lag_mapping_get(mlxsw_core, 1539 rx_info->u.lag_id, 1540 rx_info->lag_port_index); 1541 } else { 1542 local_port = rx_info->u.sys_port; 1543 } 1544 1545 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n", 1546 __func__, local_port, rx_info->trap_id); 1547 1548 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || 1549 (local_port >= mlxsw_core->max_ports)) 1550 goto drop; 1551 1552 rcu_read_lock(); 1553 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) { 1554 rxl = &rxl_item->rxl; 1555 if ((rxl->local_port == MLXSW_PORT_DONT_CARE || 1556 rxl->local_port == local_port) && 1557 rxl->trap_id == rx_info->trap_id) { 1558 found = true; 1559 break; 1560 } 1561 } 1562 rcu_read_unlock(); 1563 if (!found) 1564 goto drop; 1565 1566 rxl->func(skb, local_port, rxl_item->priv); 1567 return; 1568 1569 drop: 1570 dev_kfree_skb(skb); 1571 } 1572 EXPORT_SYMBOL(mlxsw_core_skb_receive); 1573 1574 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core, 1575 u16 lag_id, u8 port_index) 1576 { 1577 return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id + 1578 port_index; 1579 } 1580 1581 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, 1582 u16 lag_id, u8 port_index, u8 local_port) 1583 { 1584 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 1585 lag_id, port_index); 1586 1587 mlxsw_core->lag.mapping[index] = local_port; 1588 } 1589 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set); 1590 1591 u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, 1592 u16 lag_id, u8 port_index) 1593 { 1594 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 1595 lag_id, port_index); 1596 1597 return mlxsw_core->lag.mapping[index]; 1598 } 1599 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get); 1600 1601 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, 1602 u16 lag_id, u8 local_port) 1603 { 1604 int i; 1605 1606 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) { 1607 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 1608 lag_id, i); 1609 1610 if (mlxsw_core->lag.mapping[index] == local_port) 1611 mlxsw_core->lag.mapping[index] = 0; 1612 } 1613 } 1614 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); 1615 1616 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, 1617 enum mlxsw_res_id res_id) 1618 { 1619 return mlxsw_res_valid(&mlxsw_core->res, res_id); 1620 } 1621 EXPORT_SYMBOL(mlxsw_core_res_valid); 1622 1623 u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, 1624 enum mlxsw_res_id res_id) 1625 { 1626 return mlxsw_res_get(&mlxsw_core->res, res_id); 1627 } 1628 EXPORT_SYMBOL(mlxsw_core_res_get); 1629 1630 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port) 1631 { 1632 struct devlink *devlink = priv_to_devlink(mlxsw_core); 1633 struct mlxsw_core_port *mlxsw_core_port = 1634 &mlxsw_core->ports[local_port]; 1635 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1636 int err; 1637 1638 mlxsw_core_port->local_port = local_port; 1639 err = devlink_port_register(devlink, devlink_port, local_port); 1640 if (err) 1641 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); 1642 return err; 1643 } 1644 EXPORT_SYMBOL(mlxsw_core_port_init); 1645 1646 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) 1647 { 1648 struct mlxsw_core_port *mlxsw_core_port = 1649 &mlxsw_core->ports[local_port]; 1650 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1651 1652 devlink_port_unregister(devlink_port); 1653 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); 1654 } 1655 EXPORT_SYMBOL(mlxsw_core_port_fini); 1656 1657 void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, 1658 void *port_driver_priv, struct net_device *dev, 1659 bool split, u32 split_group) 1660 { 1661 struct mlxsw_core_port *mlxsw_core_port = 1662 &mlxsw_core->ports[local_port]; 1663 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1664 1665 mlxsw_core_port->port_driver_priv = port_driver_priv; 1666 if (split) 1667 devlink_port_split_set(devlink_port, split_group); 1668 devlink_port_type_eth_set(devlink_port, dev); 1669 } 1670 EXPORT_SYMBOL(mlxsw_core_port_eth_set); 1671 1672 void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port, 1673 void *port_driver_priv) 1674 { 1675 struct mlxsw_core_port *mlxsw_core_port = 1676 &mlxsw_core->ports[local_port]; 1677 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1678 1679 mlxsw_core_port->port_driver_priv = port_driver_priv; 1680 devlink_port_type_ib_set(devlink_port, NULL); 1681 } 1682 EXPORT_SYMBOL(mlxsw_core_port_ib_set); 1683 1684 void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port, 1685 void *port_driver_priv) 1686 { 1687 struct mlxsw_core_port *mlxsw_core_port = 1688 &mlxsw_core->ports[local_port]; 1689 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1690 1691 mlxsw_core_port->port_driver_priv = port_driver_priv; 1692 devlink_port_type_clear(devlink_port); 1693 } 1694 EXPORT_SYMBOL(mlxsw_core_port_clear); 1695 1696 enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, 1697 u8 local_port) 1698 { 1699 struct mlxsw_core_port *mlxsw_core_port = 1700 &mlxsw_core->ports[local_port]; 1701 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1702 1703 return devlink_port->type; 1704 } 1705 EXPORT_SYMBOL(mlxsw_core_port_type_get); 1706 1707 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, 1708 const char *buf, size_t size) 1709 { 1710 __be32 *m = (__be32 *) buf; 1711 int i; 1712 int count = size / sizeof(__be32); 1713 1714 for (i = count - 1; i >= 0; i--) 1715 if (m[i]) 1716 break; 1717 i++; 1718 count = i ? i : 1; 1719 for (i = 0; i < count; i += 4) 1720 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", 1721 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), 1722 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); 1723 } 1724 1725 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, 1726 u32 in_mod, bool out_mbox_direct, 1727 char *in_mbox, size_t in_mbox_size, 1728 char *out_mbox, size_t out_mbox_size) 1729 { 1730 u8 status; 1731 int err; 1732 1733 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32)); 1734 if (!mlxsw_core->bus->cmd_exec) 1735 return -EOPNOTSUPP; 1736 1737 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 1738 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod); 1739 if (in_mbox) { 1740 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n"); 1741 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size); 1742 } 1743 1744 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode, 1745 opcode_mod, in_mod, out_mbox_direct, 1746 in_mbox, in_mbox_size, 1747 out_mbox, out_mbox_size, &status); 1748 1749 if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { 1750 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n", 1751 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 1752 in_mod, status, mlxsw_cmd_status_str(status)); 1753 } else if (err == -ETIMEDOUT) { 1754 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 1755 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 1756 in_mod); 1757 } 1758 1759 if (!err && out_mbox) { 1760 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); 1761 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); 1762 } 1763 return err; 1764 } 1765 EXPORT_SYMBOL(mlxsw_cmd_exec); 1766 1767 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay) 1768 { 1769 return queue_delayed_work(mlxsw_wq, dwork, delay); 1770 } 1771 EXPORT_SYMBOL(mlxsw_core_schedule_dw); 1772 1773 bool mlxsw_core_schedule_work(struct work_struct *work) 1774 { 1775 return queue_work(mlxsw_owq, work); 1776 } 1777 EXPORT_SYMBOL(mlxsw_core_schedule_work); 1778 1779 void mlxsw_core_flush_owq(void) 1780 { 1781 flush_workqueue(mlxsw_owq); 1782 } 1783 EXPORT_SYMBOL(mlxsw_core_flush_owq); 1784 1785 static int __init mlxsw_core_module_init(void) 1786 { 1787 int err; 1788 1789 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0); 1790 if (!mlxsw_wq) 1791 return -ENOMEM; 1792 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM, 1793 mlxsw_core_driver_name); 1794 if (!mlxsw_owq) { 1795 err = -ENOMEM; 1796 goto err_alloc_ordered_workqueue; 1797 } 1798 return 0; 1799 1800 err_alloc_ordered_workqueue: 1801 destroy_workqueue(mlxsw_wq); 1802 return err; 1803 } 1804 1805 static void __exit mlxsw_core_module_exit(void) 1806 { 1807 destroy_workqueue(mlxsw_owq); 1808 destroy_workqueue(mlxsw_wq); 1809 } 1810 1811 module_init(mlxsw_core_module_init); 1812 module_exit(mlxsw_core_module_exit); 1813 1814 MODULE_LICENSE("Dual BSD/GPL"); 1815 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 1816 MODULE_DESCRIPTION("Mellanox switch device core driver"); 1817