1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/core.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/device.h> 40 #include <linux/export.h> 41 #include <linux/err.h> 42 #include <linux/if_link.h> 43 #include <linux/debugfs.h> 44 #include <linux/seq_file.h> 45 #include <linux/u64_stats_sync.h> 46 #include <linux/netdevice.h> 47 #include <linux/wait.h> 48 #include <linux/skbuff.h> 49 #include <linux/etherdevice.h> 50 #include <linux/types.h> 51 #include <linux/string.h> 52 #include <linux/gfp.h> 53 #include <linux/random.h> 54 #include <linux/jiffies.h> 55 #include <linux/mutex.h> 56 #include <linux/rcupdate.h> 57 #include <linux/slab.h> 58 #include <asm/byteorder.h> 59 60 #include "core.h" 61 #include "item.h" 62 #include "cmd.h" 63 #include "port.h" 64 #include "trap.h" 65 #include "emad.h" 66 #include "reg.h" 67 68 static LIST_HEAD(mlxsw_core_driver_list); 69 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); 70 71 static const char mlxsw_core_driver_name[] = "mlxsw_core"; 72 73 static struct dentry *mlxsw_core_dbg_root; 74 75 struct mlxsw_core_pcpu_stats { 76 u64 trap_rx_packets[MLXSW_TRAP_ID_MAX]; 77 u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX]; 78 u64 port_rx_packets[MLXSW_PORT_MAX_PORTS]; 79 u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS]; 80 struct u64_stats_sync syncp; 81 u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX]; 82 u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS]; 83 u32 trap_rx_invalid; 84 u32 port_rx_invalid; 85 }; 86 87 struct mlxsw_core { 88 struct mlxsw_driver *driver; 89 const struct mlxsw_bus *bus; 90 void *bus_priv; 91 const struct mlxsw_bus_info *bus_info; 92 struct list_head rx_listener_list; 93 struct list_head event_listener_list; 94 struct { 95 struct sk_buff *resp_skb; 96 u64 tid; 97 wait_queue_head_t wait; 98 bool trans_active; 99 struct mutex lock; /* One EMAD transaction at a time. */ 100 bool use_emad; 101 } emad; 102 struct mlxsw_core_pcpu_stats __percpu *pcpu_stats; 103 struct dentry *dbg_dir; 104 struct { 105 struct debugfs_blob_wrapper vsd_blob; 106 struct debugfs_blob_wrapper psid_blob; 107 } dbg; 108 unsigned long driver_priv[0]; 109 /* driver_priv has to be always the last item */ 110 }; 111 112 struct mlxsw_rx_listener_item { 113 struct list_head list; 114 struct mlxsw_rx_listener rxl; 115 void *priv; 116 }; 117 118 struct mlxsw_event_listener_item { 119 struct list_head list; 120 struct mlxsw_event_listener el; 121 void *priv; 122 }; 123 124 /****************** 125 * EMAD processing 126 ******************/ 127 128 /* emad_eth_hdr_dmac 129 * Destination MAC in EMAD's Ethernet header. 130 * Must be set to 01:02:c9:00:00:01 131 */ 132 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6); 133 134 /* emad_eth_hdr_smac 135 * Source MAC in EMAD's Ethernet header. 136 * Must be set to 00:02:c9:01:02:03 137 */ 138 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6); 139 140 /* emad_eth_hdr_ethertype 141 * Ethertype in EMAD's Ethernet header. 142 * Must be set to 0x8932 143 */ 144 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16); 145 146 /* emad_eth_hdr_mlx_proto 147 * Mellanox protocol. 148 * Must be set to 0x0. 149 */ 150 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8); 151 152 /* emad_eth_hdr_ver 153 * Mellanox protocol version. 154 * Must be set to 0x0. 155 */ 156 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4); 157 158 /* emad_op_tlv_type 159 * Type of the TLV. 160 * Must be set to 0x1 (operation TLV). 161 */ 162 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5); 163 164 /* emad_op_tlv_len 165 * Length of the operation TLV in u32. 166 * Must be set to 0x4. 167 */ 168 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11); 169 170 /* emad_op_tlv_dr 171 * Direct route bit. Setting to 1 indicates the EMAD is a direct route 172 * EMAD. DR TLV must follow. 173 * 174 * Note: Currently not supported and must not be set. 175 */ 176 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1); 177 178 /* emad_op_tlv_status 179 * Returned status in case of EMAD response. Must be set to 0 in case 180 * of EMAD request. 181 * 0x0 - success 182 * 0x1 - device is busy. Requester should retry 183 * 0x2 - Mellanox protocol version not supported 184 * 0x3 - unknown TLV 185 * 0x4 - register not supported 186 * 0x5 - operation class not supported 187 * 0x6 - EMAD method not supported 188 * 0x7 - bad parameter (e.g. port out of range) 189 * 0x8 - resource not available 190 * 0x9 - message receipt acknowledgment. Requester should retry 191 * 0x70 - internal error 192 */ 193 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7); 194 195 /* emad_op_tlv_register_id 196 * Register ID of register within register TLV. 197 */ 198 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16); 199 200 /* emad_op_tlv_r 201 * Response bit. Setting to 1 indicates Response, otherwise request. 202 */ 203 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1); 204 205 /* emad_op_tlv_method 206 * EMAD method type. 207 * 0x1 - query 208 * 0x2 - write 209 * 0x3 - send (currently not supported) 210 * 0x4 - event 211 */ 212 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7); 213 214 /* emad_op_tlv_class 215 * EMAD operation class. Must be set to 0x1 (REG_ACCESS). 216 */ 217 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8); 218 219 /* emad_op_tlv_tid 220 * EMAD transaction ID. Used for pairing request and response EMADs. 221 */ 222 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64); 223 224 /* emad_reg_tlv_type 225 * Type of the TLV. 226 * Must be set to 0x3 (register TLV). 227 */ 228 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5); 229 230 /* emad_reg_tlv_len 231 * Length of the operation TLV in u32. 232 */ 233 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11); 234 235 /* emad_end_tlv_type 236 * Type of the TLV. 237 * Must be set to 0x0 (end TLV). 238 */ 239 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5); 240 241 /* emad_end_tlv_len 242 * Length of the end TLV in u32. 243 * Must be set to 1. 244 */ 245 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11); 246 247 enum mlxsw_core_reg_access_type { 248 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 249 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 250 }; 251 252 static inline const char * 253 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type) 254 { 255 switch (type) { 256 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY: 257 return "query"; 258 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE: 259 return "write"; 260 } 261 BUG(); 262 } 263 264 static void mlxsw_emad_pack_end_tlv(char *end_tlv) 265 { 266 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END); 267 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN); 268 } 269 270 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, 271 const struct mlxsw_reg_info *reg, 272 char *payload) 273 { 274 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG); 275 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1); 276 memcpy(reg_tlv + sizeof(u32), payload, reg->len); 277 } 278 279 static void mlxsw_emad_pack_op_tlv(char *op_tlv, 280 const struct mlxsw_reg_info *reg, 281 enum mlxsw_core_reg_access_type type, 282 struct mlxsw_core *mlxsw_core) 283 { 284 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP); 285 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN); 286 mlxsw_emad_op_tlv_dr_set(op_tlv, 0); 287 mlxsw_emad_op_tlv_status_set(op_tlv, 0); 288 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id); 289 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST); 290 if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type) 291 mlxsw_emad_op_tlv_method_set(op_tlv, 292 MLXSW_EMAD_OP_TLV_METHOD_QUERY); 293 else 294 mlxsw_emad_op_tlv_method_set(op_tlv, 295 MLXSW_EMAD_OP_TLV_METHOD_WRITE); 296 mlxsw_emad_op_tlv_class_set(op_tlv, 297 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); 298 mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid); 299 } 300 301 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) 302 { 303 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN); 304 305 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC); 306 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC); 307 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE); 308 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO); 309 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION); 310 311 skb_reset_mac_header(skb); 312 313 return 0; 314 } 315 316 static void mlxsw_emad_construct(struct sk_buff *skb, 317 const struct mlxsw_reg_info *reg, 318 char *payload, 319 enum mlxsw_core_reg_access_type type, 320 struct mlxsw_core *mlxsw_core) 321 { 322 char *buf; 323 324 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32)); 325 mlxsw_emad_pack_end_tlv(buf); 326 327 buf = skb_push(skb, reg->len + sizeof(u32)); 328 mlxsw_emad_pack_reg_tlv(buf, reg, payload); 329 330 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); 331 mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core); 332 333 mlxsw_emad_construct_eth_hdr(skb); 334 } 335 336 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb) 337 { 338 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN)); 339 } 340 341 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb) 342 { 343 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN + 344 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32))); 345 } 346 347 static char *mlxsw_emad_reg_payload(const char *op_tlv) 348 { 349 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32))); 350 } 351 352 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb) 353 { 354 char *op_tlv; 355 356 op_tlv = mlxsw_emad_op_tlv(skb); 357 return mlxsw_emad_op_tlv_tid_get(op_tlv); 358 } 359 360 static bool mlxsw_emad_is_resp(const struct sk_buff *skb) 361 { 362 char *op_tlv; 363 364 op_tlv = mlxsw_emad_op_tlv(skb); 365 return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv)); 366 } 367 368 #define MLXSW_EMAD_TIMEOUT_MS 200 369 370 static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 371 struct sk_buff *skb, 372 const struct mlxsw_tx_info *tx_info) 373 { 374 int err; 375 int ret; 376 377 err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info); 378 if (err) { 379 dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n", 380 mlxsw_core->emad.tid); 381 dev_kfree_skb(skb); 382 return err; 383 } 384 385 mlxsw_core->emad.trans_active = true; 386 ret = wait_event_timeout(mlxsw_core->emad.wait, 387 !(mlxsw_core->emad.trans_active), 388 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS)); 389 if (!ret) { 390 dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n", 391 mlxsw_core->emad.tid); 392 mlxsw_core->emad.trans_active = false; 393 return -EIO; 394 } 395 396 return 0; 397 } 398 399 static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core, 400 char *op_tlv) 401 { 402 enum mlxsw_emad_op_tlv_status status; 403 u64 tid; 404 405 status = mlxsw_emad_op_tlv_status_get(op_tlv); 406 tid = mlxsw_emad_op_tlv_tid_get(op_tlv); 407 408 switch (status) { 409 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: 410 return 0; 411 case MLXSW_EMAD_OP_TLV_STATUS_BUSY: 412 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: 413 dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n", 414 tid, status, mlxsw_emad_op_tlv_status_str(status)); 415 return -EAGAIN; 416 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: 417 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: 418 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED: 419 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED: 420 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED: 421 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER: 422 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: 423 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: 424 default: 425 dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n", 426 tid, status, mlxsw_emad_op_tlv_status_str(status)); 427 return -EIO; 428 } 429 } 430 431 static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core, 432 struct sk_buff *skb) 433 { 434 return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb)); 435 } 436 437 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 438 struct sk_buff *skb, 439 const struct mlxsw_tx_info *tx_info) 440 { 441 struct sk_buff *trans_skb; 442 int n_retry; 443 int err; 444 445 n_retry = 0; 446 retry: 447 /* We copy the EMAD to a new skb, since we might need 448 * to retransmit it in case of failure. 449 */ 450 trans_skb = skb_copy(skb, GFP_KERNEL); 451 if (!trans_skb) { 452 err = -ENOMEM; 453 goto out; 454 } 455 456 err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info); 457 if (!err) { 458 struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb; 459 460 err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb); 461 if (err) 462 dev_kfree_skb(resp_skb); 463 if (!err || err != -EAGAIN) 464 goto out; 465 } 466 if (n_retry++ < MLXSW_EMAD_MAX_RETRY) 467 goto retry; 468 469 out: 470 dev_kfree_skb(skb); 471 mlxsw_core->emad.tid++; 472 return err; 473 } 474 475 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, 476 void *priv) 477 { 478 struct mlxsw_core *mlxsw_core = priv; 479 480 if (mlxsw_emad_is_resp(skb) && 481 mlxsw_core->emad.trans_active && 482 mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) { 483 mlxsw_core->emad.resp_skb = skb; 484 mlxsw_core->emad.trans_active = false; 485 wake_up(&mlxsw_core->emad.wait); 486 } else { 487 dev_kfree_skb(skb); 488 } 489 } 490 491 static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = { 492 .func = mlxsw_emad_rx_listener_func, 493 .local_port = MLXSW_PORT_DONT_CARE, 494 .trap_id = MLXSW_TRAP_ID_ETHEMAD, 495 }; 496 497 static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core) 498 { 499 char htgt_pl[MLXSW_REG_HTGT_LEN]; 500 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 501 int err; 502 503 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD); 504 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 505 if (err) 506 return err; 507 508 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, 509 MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 510 MLXSW_TRAP_ID_ETHEMAD); 511 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 512 } 513 514 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) 515 { 516 int err; 517 518 /* Set the upper 32 bits of the transaction ID field to a random 519 * number. This allows us to discard EMADs addressed to other 520 * devices. 521 */ 522 get_random_bytes(&mlxsw_core->emad.tid, 4); 523 mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32; 524 525 init_waitqueue_head(&mlxsw_core->emad.wait); 526 mlxsw_core->emad.trans_active = false; 527 mutex_init(&mlxsw_core->emad.lock); 528 529 err = mlxsw_core_rx_listener_register(mlxsw_core, 530 &mlxsw_emad_rx_listener, 531 mlxsw_core); 532 if (err) 533 return err; 534 535 err = mlxsw_emad_traps_set(mlxsw_core); 536 if (err) 537 goto err_emad_trap_set; 538 539 mlxsw_core->emad.use_emad = true; 540 541 return 0; 542 543 err_emad_trap_set: 544 mlxsw_core_rx_listener_unregister(mlxsw_core, 545 &mlxsw_emad_rx_listener, 546 mlxsw_core); 547 return err; 548 } 549 550 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) 551 { 552 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 553 554 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, 555 MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 556 MLXSW_TRAP_ID_ETHEMAD); 557 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 558 559 mlxsw_core_rx_listener_unregister(mlxsw_core, 560 &mlxsw_emad_rx_listener, 561 mlxsw_core); 562 } 563 564 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, 565 u16 reg_len) 566 { 567 struct sk_buff *skb; 568 u16 emad_len; 569 570 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN + 571 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) * 572 sizeof(u32) + mlxsw_core->driver->txhdr_len); 573 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN) 574 return NULL; 575 576 skb = netdev_alloc_skb(NULL, emad_len); 577 if (!skb) 578 return NULL; 579 memset(skb->data, 0, emad_len); 580 skb_reserve(skb, emad_len); 581 582 return skb; 583 } 584 585 /***************** 586 * Core functions 587 *****************/ 588 589 static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data) 590 { 591 struct mlxsw_core *mlxsw_core = file->private; 592 struct mlxsw_core_pcpu_stats *p; 593 u64 rx_packets, rx_bytes; 594 u64 tmp_rx_packets, tmp_rx_bytes; 595 u32 rx_dropped, rx_invalid; 596 unsigned int start; 597 int i; 598 int j; 599 static const char hdr[] = 600 " NUM RX_PACKETS RX_BYTES RX_DROPPED\n"; 601 602 seq_printf(file, hdr); 603 for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) { 604 rx_packets = 0; 605 rx_bytes = 0; 606 rx_dropped = 0; 607 for_each_possible_cpu(j) { 608 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); 609 do { 610 start = u64_stats_fetch_begin(&p->syncp); 611 tmp_rx_packets = p->trap_rx_packets[i]; 612 tmp_rx_bytes = p->trap_rx_bytes[i]; 613 } while (u64_stats_fetch_retry(&p->syncp, start)); 614 615 rx_packets += tmp_rx_packets; 616 rx_bytes += tmp_rx_bytes; 617 rx_dropped += p->trap_rx_dropped[i]; 618 } 619 seq_printf(file, "trap %3d %12llu %12llu %10u\n", 620 i, rx_packets, rx_bytes, rx_dropped); 621 } 622 rx_invalid = 0; 623 for_each_possible_cpu(j) { 624 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); 625 rx_invalid += p->trap_rx_invalid; 626 } 627 seq_printf(file, "trap INV %10u\n", 628 rx_invalid); 629 630 for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) { 631 rx_packets = 0; 632 rx_bytes = 0; 633 rx_dropped = 0; 634 for_each_possible_cpu(j) { 635 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); 636 do { 637 start = u64_stats_fetch_begin(&p->syncp); 638 tmp_rx_packets = p->port_rx_packets[i]; 639 tmp_rx_bytes = p->port_rx_bytes[i]; 640 } while (u64_stats_fetch_retry(&p->syncp, start)); 641 642 rx_packets += tmp_rx_packets; 643 rx_bytes += tmp_rx_bytes; 644 rx_dropped += p->port_rx_dropped[i]; 645 } 646 seq_printf(file, "port %3d %12llu %12llu %10u\n", 647 i, rx_packets, rx_bytes, rx_dropped); 648 } 649 rx_invalid = 0; 650 for_each_possible_cpu(j) { 651 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); 652 rx_invalid += p->port_rx_invalid; 653 } 654 seq_printf(file, "port INV %10u\n", 655 rx_invalid); 656 return 0; 657 } 658 659 static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f) 660 { 661 struct mlxsw_core *mlxsw_core = inode->i_private; 662 663 return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core); 664 } 665 666 static const struct file_operations mlxsw_core_rx_stats_dbg_ops = { 667 .owner = THIS_MODULE, 668 .open = mlxsw_core_rx_stats_dbg_open, 669 .release = single_release, 670 .read = seq_read, 671 .llseek = seq_lseek 672 }; 673 674 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, 675 const char *buf, size_t size) 676 { 677 __be32 *m = (__be32 *) buf; 678 int i; 679 int count = size / sizeof(__be32); 680 681 for (i = count - 1; i >= 0; i--) 682 if (m[i]) 683 break; 684 i++; 685 count = i ? i : 1; 686 for (i = 0; i < count; i += 4) 687 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", 688 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), 689 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); 690 } 691 692 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) 693 { 694 spin_lock(&mlxsw_core_driver_list_lock); 695 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list); 696 spin_unlock(&mlxsw_core_driver_list_lock); 697 return 0; 698 } 699 EXPORT_SYMBOL(mlxsw_core_driver_register); 700 701 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver) 702 { 703 spin_lock(&mlxsw_core_driver_list_lock); 704 list_del(&mlxsw_driver->list); 705 spin_unlock(&mlxsw_core_driver_list_lock); 706 } 707 EXPORT_SYMBOL(mlxsw_core_driver_unregister); 708 709 static struct mlxsw_driver *__driver_find(const char *kind) 710 { 711 struct mlxsw_driver *mlxsw_driver; 712 713 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) { 714 if (strcmp(mlxsw_driver->kind, kind) == 0) 715 return mlxsw_driver; 716 } 717 return NULL; 718 } 719 720 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind) 721 { 722 struct mlxsw_driver *mlxsw_driver; 723 724 spin_lock(&mlxsw_core_driver_list_lock); 725 mlxsw_driver = __driver_find(kind); 726 if (!mlxsw_driver) { 727 spin_unlock(&mlxsw_core_driver_list_lock); 728 request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind); 729 spin_lock(&mlxsw_core_driver_list_lock); 730 mlxsw_driver = __driver_find(kind); 731 } 732 if (mlxsw_driver) { 733 if (!try_module_get(mlxsw_driver->owner)) 734 mlxsw_driver = NULL; 735 } 736 737 spin_unlock(&mlxsw_core_driver_list_lock); 738 return mlxsw_driver; 739 } 740 741 static void mlxsw_core_driver_put(const char *kind) 742 { 743 struct mlxsw_driver *mlxsw_driver; 744 745 spin_lock(&mlxsw_core_driver_list_lock); 746 mlxsw_driver = __driver_find(kind); 747 spin_unlock(&mlxsw_core_driver_list_lock); 748 if (!mlxsw_driver) 749 return; 750 module_put(mlxsw_driver->owner); 751 } 752 753 static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core) 754 { 755 const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info; 756 757 mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name, 758 mlxsw_core_dbg_root); 759 if (!mlxsw_core->dbg_dir) 760 return -ENOMEM; 761 debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir, 762 mlxsw_core, &mlxsw_core_rx_stats_dbg_ops); 763 mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd; 764 mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd); 765 debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir, 766 &mlxsw_core->dbg.vsd_blob); 767 mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid; 768 mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid); 769 debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir, 770 &mlxsw_core->dbg.psid_blob); 771 return 0; 772 } 773 774 static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core) 775 { 776 debugfs_remove_recursive(mlxsw_core->dbg_dir); 777 } 778 779 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, 780 const struct mlxsw_bus *mlxsw_bus, 781 void *bus_priv) 782 { 783 const char *device_kind = mlxsw_bus_info->device_kind; 784 struct mlxsw_core *mlxsw_core; 785 struct mlxsw_driver *mlxsw_driver; 786 size_t alloc_size; 787 int err; 788 789 mlxsw_driver = mlxsw_core_driver_get(device_kind); 790 if (!mlxsw_driver) 791 return -EINVAL; 792 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; 793 mlxsw_core = kzalloc(alloc_size, GFP_KERNEL); 794 if (!mlxsw_core) { 795 err = -ENOMEM; 796 goto err_core_alloc; 797 } 798 799 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list); 800 INIT_LIST_HEAD(&mlxsw_core->event_listener_list); 801 mlxsw_core->driver = mlxsw_driver; 802 mlxsw_core->bus = mlxsw_bus; 803 mlxsw_core->bus_priv = bus_priv; 804 mlxsw_core->bus_info = mlxsw_bus_info; 805 806 mlxsw_core->pcpu_stats = 807 netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats); 808 if (!mlxsw_core->pcpu_stats) { 809 err = -ENOMEM; 810 goto err_alloc_stats; 811 } 812 813 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile); 814 if (err) 815 goto err_bus_init; 816 817 err = mlxsw_emad_init(mlxsw_core); 818 if (err) 819 goto err_emad_init; 820 821 err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core, 822 mlxsw_bus_info); 823 if (err) 824 goto err_driver_init; 825 826 err = mlxsw_core_debugfs_init(mlxsw_core); 827 if (err) 828 goto err_debugfs_init; 829 830 return 0; 831 832 err_debugfs_init: 833 mlxsw_core->driver->fini(mlxsw_core->driver_priv); 834 err_driver_init: 835 mlxsw_emad_fini(mlxsw_core); 836 err_emad_init: 837 mlxsw_bus->fini(bus_priv); 838 err_bus_init: 839 free_percpu(mlxsw_core->pcpu_stats); 840 err_alloc_stats: 841 kfree(mlxsw_core); 842 err_core_alloc: 843 mlxsw_core_driver_put(device_kind); 844 return err; 845 } 846 EXPORT_SYMBOL(mlxsw_core_bus_device_register); 847 848 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) 849 { 850 const char *device_kind = mlxsw_core->bus_info->device_kind; 851 852 mlxsw_core_debugfs_fini(mlxsw_core); 853 mlxsw_core->driver->fini(mlxsw_core->driver_priv); 854 mlxsw_emad_fini(mlxsw_core); 855 mlxsw_core->bus->fini(mlxsw_core->bus_priv); 856 free_percpu(mlxsw_core->pcpu_stats); 857 kfree(mlxsw_core); 858 mlxsw_core_driver_put(device_kind); 859 } 860 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); 861 862 static struct mlxsw_core *__mlxsw_core_get(void *driver_priv) 863 { 864 return container_of(driver_priv, struct mlxsw_core, driver_priv); 865 } 866 867 bool mlxsw_core_skb_transmit_busy(void *driver_priv, 868 const struct mlxsw_tx_info *tx_info) 869 { 870 struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv); 871 872 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv, 873 tx_info); 874 } 875 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); 876 877 int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb, 878 const struct mlxsw_tx_info *tx_info) 879 { 880 struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv); 881 882 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, 883 tx_info); 884 } 885 EXPORT_SYMBOL(mlxsw_core_skb_transmit); 886 887 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a, 888 const struct mlxsw_rx_listener *rxl_b) 889 { 890 return (rxl_a->func == rxl_b->func && 891 rxl_a->local_port == rxl_b->local_port && 892 rxl_a->trap_id == rxl_b->trap_id); 893 } 894 895 static struct mlxsw_rx_listener_item * 896 __find_rx_listener_item(struct mlxsw_core *mlxsw_core, 897 const struct mlxsw_rx_listener *rxl, 898 void *priv) 899 { 900 struct mlxsw_rx_listener_item *rxl_item; 901 902 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) { 903 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) && 904 rxl_item->priv == priv) 905 return rxl_item; 906 } 907 return NULL; 908 } 909 910 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core, 911 const struct mlxsw_rx_listener *rxl, 912 void *priv) 913 { 914 struct mlxsw_rx_listener_item *rxl_item; 915 916 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); 917 if (rxl_item) 918 return -EEXIST; 919 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL); 920 if (!rxl_item) 921 return -ENOMEM; 922 rxl_item->rxl = *rxl; 923 rxl_item->priv = priv; 924 925 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list); 926 return 0; 927 } 928 EXPORT_SYMBOL(mlxsw_core_rx_listener_register); 929 930 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core, 931 const struct mlxsw_rx_listener *rxl, 932 void *priv) 933 { 934 struct mlxsw_rx_listener_item *rxl_item; 935 936 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); 937 if (!rxl_item) 938 return; 939 list_del_rcu(&rxl_item->list); 940 synchronize_rcu(); 941 kfree(rxl_item); 942 } 943 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister); 944 945 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port, 946 void *priv) 947 { 948 struct mlxsw_event_listener_item *event_listener_item = priv; 949 struct mlxsw_reg_info reg; 950 char *payload; 951 char *op_tlv = mlxsw_emad_op_tlv(skb); 952 char *reg_tlv = mlxsw_emad_reg_tlv(skb); 953 954 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv); 955 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32); 956 payload = mlxsw_emad_reg_payload(op_tlv); 957 event_listener_item->el.func(®, payload, event_listener_item->priv); 958 dev_kfree_skb(skb); 959 } 960 961 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a, 962 const struct mlxsw_event_listener *el_b) 963 { 964 return (el_a->func == el_b->func && 965 el_a->trap_id == el_b->trap_id); 966 } 967 968 static struct mlxsw_event_listener_item * 969 __find_event_listener_item(struct mlxsw_core *mlxsw_core, 970 const struct mlxsw_event_listener *el, 971 void *priv) 972 { 973 struct mlxsw_event_listener_item *el_item; 974 975 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) { 976 if (__is_event_listener_equal(&el_item->el, el) && 977 el_item->priv == priv) 978 return el_item; 979 } 980 return NULL; 981 } 982 983 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core, 984 const struct mlxsw_event_listener *el, 985 void *priv) 986 { 987 int err; 988 struct mlxsw_event_listener_item *el_item; 989 const struct mlxsw_rx_listener rxl = { 990 .func = mlxsw_core_event_listener_func, 991 .local_port = MLXSW_PORT_DONT_CARE, 992 .trap_id = el->trap_id, 993 }; 994 995 el_item = __find_event_listener_item(mlxsw_core, el, priv); 996 if (el_item) 997 return -EEXIST; 998 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL); 999 if (!el_item) 1000 return -ENOMEM; 1001 el_item->el = *el; 1002 el_item->priv = priv; 1003 1004 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item); 1005 if (err) 1006 goto err_rx_listener_register; 1007 1008 /* No reason to save item if we did not manage to register an RX 1009 * listener for it. 1010 */ 1011 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list); 1012 1013 return 0; 1014 1015 err_rx_listener_register: 1016 kfree(el_item); 1017 return err; 1018 } 1019 EXPORT_SYMBOL(mlxsw_core_event_listener_register); 1020 1021 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, 1022 const struct mlxsw_event_listener *el, 1023 void *priv) 1024 { 1025 struct mlxsw_event_listener_item *el_item; 1026 const struct mlxsw_rx_listener rxl = { 1027 .func = mlxsw_core_event_listener_func, 1028 .local_port = MLXSW_PORT_DONT_CARE, 1029 .trap_id = el->trap_id, 1030 }; 1031 1032 el_item = __find_event_listener_item(mlxsw_core, el, priv); 1033 if (!el_item) 1034 return; 1035 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item); 1036 list_del(&el_item->list); 1037 kfree(el_item); 1038 } 1039 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); 1040 1041 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, 1042 const struct mlxsw_reg_info *reg, 1043 char *payload, 1044 enum mlxsw_core_reg_access_type type) 1045 { 1046 int err; 1047 char *op_tlv; 1048 struct sk_buff *skb; 1049 struct mlxsw_tx_info tx_info = { 1050 .local_port = MLXSW_PORT_CPU_PORT, 1051 .is_emad = true, 1052 }; 1053 1054 skb = mlxsw_emad_alloc(mlxsw_core, reg->len); 1055 if (!skb) 1056 return -ENOMEM; 1057 1058 mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core); 1059 mlxsw_core->driver->txhdr_construct(skb, &tx_info); 1060 1061 dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n", 1062 mlxsw_core->emad.tid); 1063 mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len); 1064 1065 err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info); 1066 if (!err) { 1067 op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb); 1068 memcpy(payload, mlxsw_emad_reg_payload(op_tlv), 1069 reg->len); 1070 1071 dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n", 1072 mlxsw_core->emad.tid - 1); 1073 mlxsw_core_buf_dump_dbg(mlxsw_core, 1074 mlxsw_core->emad.resp_skb->data, 1075 mlxsw_core->emad.resp_skb->len); 1076 1077 dev_kfree_skb(mlxsw_core->emad.resp_skb); 1078 } 1079 1080 return err; 1081 } 1082 1083 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, 1084 const struct mlxsw_reg_info *reg, 1085 char *payload, 1086 enum mlxsw_core_reg_access_type type) 1087 { 1088 int err, n_retry; 1089 char *in_mbox, *out_mbox, *tmp; 1090 1091 in_mbox = mlxsw_cmd_mbox_alloc(); 1092 if (!in_mbox) 1093 return -ENOMEM; 1094 1095 out_mbox = mlxsw_cmd_mbox_alloc(); 1096 if (!out_mbox) { 1097 err = -ENOMEM; 1098 goto free_in_mbox; 1099 } 1100 1101 mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core); 1102 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); 1103 mlxsw_emad_pack_reg_tlv(tmp, reg, payload); 1104 1105 n_retry = 0; 1106 retry: 1107 err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox); 1108 if (!err) { 1109 err = mlxsw_emad_process_status(mlxsw_core, out_mbox); 1110 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) 1111 goto retry; 1112 } 1113 1114 if (!err) 1115 memcpy(payload, mlxsw_emad_reg_payload(out_mbox), 1116 reg->len); 1117 1118 mlxsw_core->emad.tid++; 1119 mlxsw_cmd_mbox_free(out_mbox); 1120 free_in_mbox: 1121 mlxsw_cmd_mbox_free(in_mbox); 1122 return err; 1123 } 1124 1125 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, 1126 const struct mlxsw_reg_info *reg, 1127 char *payload, 1128 enum mlxsw_core_reg_access_type type) 1129 { 1130 u64 cur_tid; 1131 int err; 1132 1133 if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) { 1134 dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n", 1135 reg->id, mlxsw_reg_id_str(reg->id), 1136 mlxsw_core_reg_access_type_str(type)); 1137 return -EINTR; 1138 } 1139 1140 cur_tid = mlxsw_core->emad.tid; 1141 dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", 1142 cur_tid, reg->id, mlxsw_reg_id_str(reg->id), 1143 mlxsw_core_reg_access_type_str(type)); 1144 1145 /* During initialization EMAD interface is not available to us, 1146 * so we default to command interface. We switch to EMAD interface 1147 * after setting the appropriate traps. 1148 */ 1149 if (!mlxsw_core->emad.use_emad) 1150 err = mlxsw_core_reg_access_cmd(mlxsw_core, reg, 1151 payload, type); 1152 else 1153 err = mlxsw_core_reg_access_emad(mlxsw_core, reg, 1154 payload, type); 1155 1156 if (err) 1157 dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n", 1158 cur_tid, reg->id, mlxsw_reg_id_str(reg->id), 1159 mlxsw_core_reg_access_type_str(type)); 1160 1161 mutex_unlock(&mlxsw_core->emad.lock); 1162 return err; 1163 } 1164 1165 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, 1166 const struct mlxsw_reg_info *reg, char *payload) 1167 { 1168 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 1169 MLXSW_CORE_REG_ACCESS_TYPE_QUERY); 1170 } 1171 EXPORT_SYMBOL(mlxsw_reg_query); 1172 1173 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, 1174 const struct mlxsw_reg_info *reg, char *payload) 1175 { 1176 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 1177 MLXSW_CORE_REG_ACCESS_TYPE_WRITE); 1178 } 1179 EXPORT_SYMBOL(mlxsw_reg_write); 1180 1181 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 1182 struct mlxsw_rx_info *rx_info) 1183 { 1184 struct mlxsw_rx_listener_item *rxl_item; 1185 const struct mlxsw_rx_listener *rxl; 1186 struct mlxsw_core_pcpu_stats *pcpu_stats; 1187 u8 local_port = rx_info->sys_port; 1188 bool found = false; 1189 1190 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n", 1191 __func__, rx_info->sys_port, rx_info->trap_id); 1192 1193 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || 1194 (local_port >= MLXSW_PORT_MAX_PORTS)) 1195 goto drop; 1196 1197 rcu_read_lock(); 1198 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) { 1199 rxl = &rxl_item->rxl; 1200 if ((rxl->local_port == MLXSW_PORT_DONT_CARE || 1201 rxl->local_port == local_port) && 1202 rxl->trap_id == rx_info->trap_id) { 1203 found = true; 1204 break; 1205 } 1206 } 1207 rcu_read_unlock(); 1208 if (!found) 1209 goto drop; 1210 1211 pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats); 1212 u64_stats_update_begin(&pcpu_stats->syncp); 1213 pcpu_stats->port_rx_packets[local_port]++; 1214 pcpu_stats->port_rx_bytes[local_port] += skb->len; 1215 pcpu_stats->trap_rx_packets[rx_info->trap_id]++; 1216 pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len; 1217 u64_stats_update_end(&pcpu_stats->syncp); 1218 1219 rxl->func(skb, local_port, rxl_item->priv); 1220 return; 1221 1222 drop: 1223 if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX) 1224 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid); 1225 else 1226 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]); 1227 if (local_port >= MLXSW_PORT_MAX_PORTS) 1228 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid); 1229 else 1230 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]); 1231 dev_kfree_skb(skb); 1232 } 1233 EXPORT_SYMBOL(mlxsw_core_skb_receive); 1234 1235 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, 1236 u32 in_mod, bool out_mbox_direct, 1237 char *in_mbox, size_t in_mbox_size, 1238 char *out_mbox, size_t out_mbox_size) 1239 { 1240 u8 status; 1241 int err; 1242 1243 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32)); 1244 if (!mlxsw_core->bus->cmd_exec) 1245 return -EOPNOTSUPP; 1246 1247 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 1248 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod); 1249 if (in_mbox) { 1250 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n"); 1251 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size); 1252 } 1253 1254 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode, 1255 opcode_mod, in_mod, out_mbox_direct, 1256 in_mbox, in_mbox_size, 1257 out_mbox, out_mbox_size, &status); 1258 1259 if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { 1260 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n", 1261 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 1262 in_mod, status, mlxsw_cmd_status_str(status)); 1263 } else if (err == -ETIMEDOUT) { 1264 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 1265 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 1266 in_mod); 1267 } 1268 1269 if (!err && out_mbox) { 1270 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); 1271 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); 1272 } 1273 return err; 1274 } 1275 EXPORT_SYMBOL(mlxsw_cmd_exec); 1276 1277 static int __init mlxsw_core_module_init(void) 1278 { 1279 mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL); 1280 if (!mlxsw_core_dbg_root) 1281 return -ENOMEM; 1282 return 0; 1283 } 1284 1285 static void __exit mlxsw_core_module_exit(void) 1286 { 1287 debugfs_remove_recursive(mlxsw_core_dbg_root); 1288 } 1289 1290 module_init(mlxsw_core_module_init); 1291 module_exit(mlxsw_core_module_exit); 1292 1293 MODULE_LICENSE("Dual BSD/GPL"); 1294 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 1295 MODULE_DESCRIPTION("Mellanox switch device core driver"); 1296