1 /*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #ifndef MLX5_DRIVER_H 29 #define MLX5_DRIVER_H 30 31 #include <linux/kernel.h> 32 #include <linux/completion.h> 33 #include <linux/pci.h> 34 #include <linux/cache.h> 35 #include <linux/rbtree.h> 36 #include <linux/if_ether.h> 37 #include <linux/semaphore.h> 38 #include <linux/slab.h> 39 #include <linux/vmalloc.h> 40 #include <linux/radix-tree.h> 41 42 #include <dev/mlx5/device.h> 43 #include <dev/mlx5/doorbell.h> 44 #include <dev/mlx5/srq.h> 45 46 #define MLX5_QCOUNTER_SETS_NETDEV 64 47 #define MLX5_MAX_NUMBER_OF_VFS 128 48 49 enum { 50 MLX5_BOARD_ID_LEN = 64, 51 MLX5_MAX_NAME_LEN = 16, 52 }; 53 54 enum { 55 MLX5_CMD_TIMEOUT_MSEC = 8 * 60 * 1000, 56 MLX5_CMD_WQ_MAX_NAME = 32, 57 }; 58 59 enum { 60 CMD_OWNER_SW = 0x0, 61 CMD_OWNER_HW = 0x1, 62 CMD_STATUS_SUCCESS = 0, 63 }; 64 65 enum mlx5_sqp_t { 66 MLX5_SQP_SMI = 0, 67 MLX5_SQP_GSI = 1, 68 MLX5_SQP_IEEE_1588 = 2, 69 MLX5_SQP_SNIFFER = 3, 70 MLX5_SQP_SYNC_UMR = 4, 71 }; 72 73 enum { 74 MLX5_MAX_PORTS = 2, 75 }; 76 77 enum { 78 MLX5_EQ_VEC_PAGES = 0, 79 MLX5_EQ_VEC_CMD = 1, 80 MLX5_EQ_VEC_ASYNC = 2, 81 MLX5_EQ_VEC_COMP_BASE, 82 }; 83 84 enum { 85 MLX5_MAX_IRQ_NAME = 32 86 }; 87 88 enum { 89 MLX5_ATOMIC_MODE_OFF = 16, 90 MLX5_ATOMIC_MODE_NONE = 0 << MLX5_ATOMIC_MODE_OFF, 91 MLX5_ATOMIC_MODE_IB_COMP = 1 << MLX5_ATOMIC_MODE_OFF, 92 MLX5_ATOMIC_MODE_CX = 2 << MLX5_ATOMIC_MODE_OFF, 93 MLX5_ATOMIC_MODE_8B = 3 << MLX5_ATOMIC_MODE_OFF, 94 MLX5_ATOMIC_MODE_16B = 4 << MLX5_ATOMIC_MODE_OFF, 95 MLX5_ATOMIC_MODE_32B = 5 << MLX5_ATOMIC_MODE_OFF, 96 MLX5_ATOMIC_MODE_64B = 6 << MLX5_ATOMIC_MODE_OFF, 97 MLX5_ATOMIC_MODE_128B = 7 << MLX5_ATOMIC_MODE_OFF, 98 MLX5_ATOMIC_MODE_256B = 8 << MLX5_ATOMIC_MODE_OFF, 99 }; 100 101 enum { 102 MLX5_ATOMIC_MODE_DCT_OFF = 20, 103 MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF, 104 MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF, 105 MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF, 106 MLX5_ATOMIC_MODE_DCT_8B = 3 << MLX5_ATOMIC_MODE_DCT_OFF, 107 MLX5_ATOMIC_MODE_DCT_16B = 4 << MLX5_ATOMIC_MODE_DCT_OFF, 108 MLX5_ATOMIC_MODE_DCT_32B = 5 << MLX5_ATOMIC_MODE_DCT_OFF, 109 MLX5_ATOMIC_MODE_DCT_64B = 6 << MLX5_ATOMIC_MODE_DCT_OFF, 110 MLX5_ATOMIC_MODE_DCT_128B = 7 << MLX5_ATOMIC_MODE_DCT_OFF, 111 MLX5_ATOMIC_MODE_DCT_256B = 8 << MLX5_ATOMIC_MODE_DCT_OFF, 112 }; 113 114 enum { 115 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, 116 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, 117 MLX5_ATOMIC_OPS_MASKED_CMP_SWAP = 1 << 2, 118 MLX5_ATOMIC_OPS_MASKED_FETCH_ADD = 1 << 3, 119 }; 120 121 enum { 122 MLX5_REG_QETCR = 0x4005, 123 MLX5_REG_QPDP = 0x4007, 124 MLX5_REG_QTCT = 0x400A, 125 MLX5_REG_QHLL = 0x4016, 126 MLX5_REG_DCBX_PARAM = 0x4020, 127 MLX5_REG_DCBX_APP = 0x4021, 128 MLX5_REG_PCAP = 0x5001, 129 MLX5_REG_PMTU = 0x5003, 130 MLX5_REG_PTYS = 0x5004, 131 MLX5_REG_PAOS = 0x5006, 132 MLX5_REG_PFCC = 0x5007, 133 MLX5_REG_PPCNT = 0x5008, 134 MLX5_REG_PMAOS = 0x5012, 135 MLX5_REG_PUDE = 0x5009, 136 MLX5_REG_PPTB = 0x500B, 137 MLX5_REG_PBMC = 0x500C, 138 MLX5_REG_PMPE = 0x5010, 139 MLX5_REG_PELC = 0x500e, 140 MLX5_REG_PVLC = 0x500f, 141 MLX5_REG_PMLP = 0x5002, 142 MLX5_REG_NODE_DESC = 0x6001, 143 MLX5_REG_HOST_ENDIANNESS = 0x7004, 144 MLX5_REG_MCIA = 0x9014, 145 MLX5_REG_MPCNT = 0x9051, 146 }; 147 148 enum dbg_rsc_type { 149 MLX5_DBG_RSC_QP, 150 MLX5_DBG_RSC_EQ, 151 MLX5_DBG_RSC_CQ, 152 }; 153 154 enum { 155 MLX5_INTERFACE_PROTOCOL_IB = 0, 156 MLX5_INTERFACE_PROTOCOL_ETH = 1, 157 MLX5_INTERFACE_NUMBER = 2, 158 }; 159 160 struct mlx5_field_desc { 161 struct dentry *dent; 162 int i; 163 }; 164 165 struct mlx5_rsc_debug { 166 struct mlx5_core_dev *dev; 167 void *object; 168 enum dbg_rsc_type type; 169 struct dentry *root; 170 struct mlx5_field_desc fields[0]; 171 }; 172 173 enum mlx5_dev_event { 174 MLX5_DEV_EVENT_SYS_ERROR, 175 MLX5_DEV_EVENT_PORT_UP, 176 MLX5_DEV_EVENT_PORT_DOWN, 177 MLX5_DEV_EVENT_PORT_INITIALIZED, 178 MLX5_DEV_EVENT_LID_CHANGE, 179 MLX5_DEV_EVENT_PKEY_CHANGE, 180 MLX5_DEV_EVENT_GUID_CHANGE, 181 MLX5_DEV_EVENT_CLIENT_REREG, 182 MLX5_DEV_EVENT_VPORT_CHANGE, 183 MLX5_DEV_EVENT_ERROR_STATE_DCBX, 184 MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE, 185 MLX5_DEV_EVENT_LOCAL_OPER_CHANGE, 186 MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE, 187 }; 188 189 enum mlx5_port_status { 190 MLX5_PORT_UP = 1 << 0, 191 MLX5_PORT_DOWN = 1 << 1, 192 }; 193 194 enum mlx5_link_mode { 195 MLX5_1000BASE_CX_SGMII = 0, 196 MLX5_1000BASE_KX = 1, 197 MLX5_10GBASE_CX4 = 2, 198 MLX5_10GBASE_KX4 = 3, 199 MLX5_10GBASE_KR = 4, 200 MLX5_20GBASE_KR2 = 5, 201 MLX5_40GBASE_CR4 = 6, 202 MLX5_40GBASE_KR4 = 7, 203 MLX5_56GBASE_R4 = 8, 204 MLX5_10GBASE_CR = 12, 205 MLX5_10GBASE_SR = 13, 206 MLX5_10GBASE_ER = 14, 207 MLX5_40GBASE_SR4 = 15, 208 MLX5_40GBASE_LR4 = 16, 209 MLX5_100GBASE_CR4 = 20, 210 MLX5_100GBASE_SR4 = 21, 211 MLX5_100GBASE_KR4 = 22, 212 MLX5_100GBASE_LR4 = 23, 213 MLX5_100BASE_TX = 24, 214 MLX5_1000BASE_T = 25, 215 MLX5_10GBASE_T = 26, 216 MLX5_25GBASE_CR = 27, 217 MLX5_25GBASE_KR = 28, 218 MLX5_25GBASE_SR = 29, 219 MLX5_50GBASE_CR2 = 30, 220 MLX5_50GBASE_KR2 = 31, 221 MLX5_LINK_MODES_NUMBER, 222 }; 223 224 enum { 225 MLX5_VSC_SPACE_SUPPORTED = 0x1, 226 MLX5_VSC_SPACE_OFFSET = 0x4, 227 MLX5_VSC_COUNTER_OFFSET = 0x8, 228 MLX5_VSC_SEMA_OFFSET = 0xC, 229 MLX5_VSC_ADDR_OFFSET = 0x10, 230 MLX5_VSC_DATA_OFFSET = 0x14, 231 MLX5_VSC_MAX_RETRIES = 0x1000, 232 }; 233 234 #define MLX5_PROT_MASK(link_mode) (1 << link_mode) 235 236 struct mlx5_uuar_info { 237 struct mlx5_uar *uars; 238 int num_uars; 239 int num_low_latency_uuars; 240 unsigned long *bitmap; 241 unsigned int *count; 242 struct mlx5_bf *bfs; 243 244 /* 245 * protect uuar allocation data structs 246 */ 247 struct mutex lock; 248 u32 ver; 249 }; 250 251 struct mlx5_bf { 252 void __iomem *reg; 253 void __iomem *regreg; 254 int buf_size; 255 struct mlx5_uar *uar; 256 unsigned long offset; 257 int need_lock; 258 /* protect blue flame buffer selection when needed 259 */ 260 spinlock_t lock; 261 262 /* serialize 64 bit writes when done as two 32 bit accesses 263 */ 264 spinlock_t lock32; 265 int uuarn; 266 }; 267 268 struct mlx5_cmd_first { 269 __be32 data[4]; 270 }; 271 272 struct cache_ent; 273 struct mlx5_fw_page { 274 union { 275 struct rb_node rb_node; 276 struct list_head list; 277 }; 278 struct mlx5_cmd_first first; 279 struct mlx5_core_dev *dev; 280 bus_dmamap_t dma_map; 281 bus_addr_t dma_addr; 282 void *virt_addr; 283 struct cache_ent *cache; 284 u32 numpages; 285 u16 load_done; 286 #define MLX5_LOAD_ST_NONE 0 287 #define MLX5_LOAD_ST_SUCCESS 1 288 #define MLX5_LOAD_ST_FAILURE 2 289 u16 func_id; 290 }; 291 #define mlx5_cmd_msg mlx5_fw_page 292 293 struct mlx5_cmd_debug { 294 struct dentry *dbg_root; 295 struct dentry *dbg_in; 296 struct dentry *dbg_out; 297 struct dentry *dbg_outlen; 298 struct dentry *dbg_status; 299 struct dentry *dbg_run; 300 void *in_msg; 301 void *out_msg; 302 u8 status; 303 u16 inlen; 304 u16 outlen; 305 }; 306 307 struct cache_ent { 308 /* protect block chain allocations 309 */ 310 spinlock_t lock; 311 struct list_head head; 312 }; 313 314 struct cmd_msg_cache { 315 struct cache_ent large; 316 struct cache_ent med; 317 318 }; 319 320 struct mlx5_traffic_counter { 321 u64 packets; 322 u64 octets; 323 }; 324 325 struct mlx5_cmd_stats { 326 u64 sum; 327 u64 n; 328 struct dentry *root; 329 struct dentry *avg; 330 struct dentry *count; 331 /* protect command average calculations */ 332 spinlock_t lock; 333 }; 334 335 struct mlx5_cmd { 336 struct mlx5_fw_page *cmd_page; 337 bus_dma_tag_t dma_tag; 338 struct sx dma_sx; 339 struct mtx dma_mtx; 340 #define MLX5_DMA_OWNED(dev) mtx_owned(&(dev)->cmd.dma_mtx) 341 #define MLX5_DMA_LOCK(dev) mtx_lock(&(dev)->cmd.dma_mtx) 342 #define MLX5_DMA_UNLOCK(dev) mtx_unlock(&(dev)->cmd.dma_mtx) 343 struct cv dma_cv; 344 #define MLX5_DMA_DONE(dev) cv_broadcast(&(dev)->cmd.dma_cv) 345 #define MLX5_DMA_WAIT(dev) cv_wait(&(dev)->cmd.dma_cv, &(dev)->cmd.dma_mtx) 346 void *cmd_buf; 347 dma_addr_t dma; 348 u16 cmdif_rev; 349 u8 log_sz; 350 u8 log_stride; 351 int max_reg_cmds; 352 int events; 353 u32 __iomem *vector; 354 355 /* protect command queue allocations 356 */ 357 spinlock_t alloc_lock; 358 359 /* protect token allocations 360 */ 361 spinlock_t token_lock; 362 u8 token; 363 unsigned long bitmask; 364 char wq_name[MLX5_CMD_WQ_MAX_NAME]; 365 struct workqueue_struct *wq; 366 struct semaphore sem; 367 struct semaphore pages_sem; 368 int mode; 369 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; 370 struct mlx5_cmd_debug dbg; 371 struct cmd_msg_cache cache; 372 int checksum_disabled; 373 struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; 374 }; 375 376 struct mlx5_port_caps { 377 int gid_table_len; 378 int pkey_table_len; 379 u8 ext_port_cap; 380 }; 381 382 struct mlx5_buf { 383 bus_dma_tag_t dma_tag; 384 bus_dmamap_t dma_map; 385 struct mlx5_core_dev *dev; 386 struct { 387 void *buf; 388 } direct; 389 u64 *page_list; 390 int npages; 391 int size; 392 u8 page_shift; 393 u8 load_done; 394 }; 395 396 struct mlx5_eq { 397 struct mlx5_core_dev *dev; 398 __be32 __iomem *doorbell; 399 u32 cons_index; 400 struct mlx5_buf buf; 401 int size; 402 u8 irqn; 403 u8 eqn; 404 int nent; 405 u64 mask; 406 struct list_head list; 407 int index; 408 struct mlx5_rsc_debug *dbg; 409 }; 410 411 struct mlx5_core_psv { 412 u32 psv_idx; 413 struct psv_layout { 414 u32 pd; 415 u16 syndrome; 416 u16 reserved; 417 u16 bg; 418 u16 app_tag; 419 u32 ref_tag; 420 } psv; 421 }; 422 423 struct mlx5_core_sig_ctx { 424 struct mlx5_core_psv psv_memory; 425 struct mlx5_core_psv psv_wire; 426 #if (__FreeBSD_version >= 1100000) 427 struct ib_sig_err err_item; 428 #endif 429 bool sig_status_checked; 430 bool sig_err_exists; 431 u32 sigerr_count; 432 }; 433 434 struct mlx5_core_mr { 435 u64 iova; 436 u64 size; 437 u32 key; 438 u32 pd; 439 }; 440 441 enum mlx5_res_type { 442 MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP, 443 MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ, 444 MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ, 445 MLX5_RES_SRQ = 3, 446 MLX5_RES_XSRQ = 4, 447 MLX5_RES_DCT = 5, 448 }; 449 450 struct mlx5_core_rsc_common { 451 enum mlx5_res_type res; 452 atomic_t refcount; 453 struct completion free; 454 }; 455 456 struct mlx5_core_srq { 457 struct mlx5_core_rsc_common common; /* must be first */ 458 u32 srqn; 459 int max; 460 int max_gs; 461 int max_avail_gather; 462 int wqe_shift; 463 void (*event)(struct mlx5_core_srq *, int); 464 atomic_t refcount; 465 struct completion free; 466 }; 467 468 struct mlx5_eq_table { 469 void __iomem *update_ci; 470 void __iomem *update_arm_ci; 471 struct list_head comp_eqs_list; 472 struct mlx5_eq pages_eq; 473 struct mlx5_eq async_eq; 474 struct mlx5_eq cmd_eq; 475 int num_comp_vectors; 476 /* protect EQs list 477 */ 478 spinlock_t lock; 479 }; 480 481 struct mlx5_uar { 482 u32 index; 483 void __iomem *bf_map; 484 void __iomem *map; 485 }; 486 487 488 struct mlx5_core_health { 489 struct mlx5_health_buffer __iomem *health; 490 __be32 __iomem *health_counter; 491 struct timer_list timer; 492 u32 prev; 493 int miss_counter; 494 u32 fatal_error; 495 /* wq spinlock to synchronize draining */ 496 spinlock_t wq_lock; 497 struct workqueue_struct *wq; 498 unsigned long flags; 499 struct work_struct work; 500 struct delayed_work recover_work; 501 }; 502 503 #define MLX5_CQ_LINEAR_ARRAY_SIZE 1024 504 505 struct mlx5_cq_linear_array_entry { 506 spinlock_t lock; 507 struct mlx5_core_cq * volatile cq; 508 }; 509 510 struct mlx5_cq_table { 511 /* protect radix tree 512 */ 513 spinlock_t lock; 514 struct radix_tree_root tree; 515 struct mlx5_cq_linear_array_entry linear_array[MLX5_CQ_LINEAR_ARRAY_SIZE]; 516 }; 517 518 struct mlx5_qp_table { 519 /* protect radix tree 520 */ 521 spinlock_t lock; 522 struct radix_tree_root tree; 523 }; 524 525 struct mlx5_srq_table { 526 /* protect radix tree 527 */ 528 spinlock_t lock; 529 struct radix_tree_root tree; 530 }; 531 532 struct mlx5_mr_table { 533 /* protect radix tree 534 */ 535 spinlock_t lock; 536 struct radix_tree_root tree; 537 }; 538 539 struct mlx5_irq_info { 540 char name[MLX5_MAX_IRQ_NAME]; 541 }; 542 543 struct mlx5_priv { 544 char name[MLX5_MAX_NAME_LEN]; 545 struct mlx5_eq_table eq_table; 546 struct msix_entry *msix_arr; 547 struct mlx5_irq_info *irq_info; 548 struct mlx5_uuar_info uuari; 549 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); 550 551 struct io_mapping *bf_mapping; 552 553 /* pages stuff */ 554 struct workqueue_struct *pg_wq; 555 struct rb_root page_root; 556 s64 fw_pages; 557 atomic_t reg_pages; 558 s64 pages_per_func[MLX5_MAX_NUMBER_OF_VFS]; 559 struct mlx5_core_health health; 560 561 struct mlx5_srq_table srq_table; 562 563 /* start: qp staff */ 564 struct mlx5_qp_table qp_table; 565 struct dentry *qp_debugfs; 566 struct dentry *eq_debugfs; 567 struct dentry *cq_debugfs; 568 struct dentry *cmdif_debugfs; 569 /* end: qp staff */ 570 571 /* start: cq staff */ 572 struct mlx5_cq_table cq_table; 573 /* end: cq staff */ 574 575 /* start: mr staff */ 576 struct mlx5_mr_table mr_table; 577 /* end: mr staff */ 578 579 /* start: alloc staff */ 580 int numa_node; 581 582 struct mutex pgdir_mutex; 583 struct list_head pgdir_list; 584 /* end: alloc staff */ 585 struct dentry *dbg_root; 586 587 /* protect mkey key part */ 588 spinlock_t mkey_lock; 589 u8 mkey_key; 590 591 struct list_head dev_list; 592 struct list_head ctx_list; 593 spinlock_t ctx_lock; 594 unsigned long pci_dev_data; 595 }; 596 597 enum mlx5_device_state { 598 MLX5_DEVICE_STATE_UP, 599 MLX5_DEVICE_STATE_INTERNAL_ERROR, 600 }; 601 602 enum mlx5_interface_state { 603 MLX5_INTERFACE_STATE_DOWN = BIT(0), 604 MLX5_INTERFACE_STATE_UP = BIT(1), 605 MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2), 606 }; 607 608 enum mlx5_pci_status { 609 MLX5_PCI_STATUS_DISABLED, 610 MLX5_PCI_STATUS_ENABLED, 611 }; 612 613 struct mlx5_special_contexts { 614 int resd_lkey; 615 }; 616 617 struct mlx5_flow_root_namespace; 618 struct mlx5_dump_data; 619 struct mlx5_core_dev { 620 struct pci_dev *pdev; 621 /* sync pci state */ 622 struct mutex pci_status_mutex; 623 enum mlx5_pci_status pci_status; 624 char board_id[MLX5_BOARD_ID_LEN]; 625 struct mlx5_cmd cmd; 626 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; 627 u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; 628 u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; 629 phys_addr_t iseg_base; 630 struct mlx5_init_seg __iomem *iseg; 631 enum mlx5_device_state state; 632 /* sync interface state */ 633 struct mutex intf_state_mutex; 634 unsigned long intf_state; 635 void (*event) (struct mlx5_core_dev *dev, 636 enum mlx5_dev_event event, 637 unsigned long param); 638 struct mlx5_priv priv; 639 struct mlx5_profile *profile; 640 atomic_t num_qps; 641 u32 vsc_addr; 642 u32 issi; 643 struct mlx5_special_contexts special_contexts; 644 unsigned int module_status[MLX5_MAX_PORTS]; 645 struct mlx5_flow_root_namespace *root_ns; 646 struct mlx5_flow_root_namespace *fdb_root_ns; 647 struct mlx5_flow_root_namespace *esw_egress_root_ns; 648 struct mlx5_flow_root_namespace *esw_ingress_root_ns; 649 struct mlx5_flow_root_namespace *sniffer_rx_root_ns; 650 struct mlx5_flow_root_namespace *sniffer_tx_root_ns; 651 u32 num_q_counter_allocated[MLX5_INTERFACE_NUMBER]; 652 struct mlx5_dump_data *dump_data; 653 u32 vsec_addr; 654 }; 655 656 enum { 657 MLX5_WOL_DISABLE = 0, 658 MLX5_WOL_SECURED_MAGIC = 1 << 1, 659 MLX5_WOL_MAGIC = 1 << 2, 660 MLX5_WOL_ARP = 1 << 3, 661 MLX5_WOL_BROADCAST = 1 << 4, 662 MLX5_WOL_MULTICAST = 1 << 5, 663 MLX5_WOL_UNICAST = 1 << 6, 664 MLX5_WOL_PHY_ACTIVITY = 1 << 7, 665 }; 666 667 struct mlx5_db { 668 __be32 *db; 669 union { 670 struct mlx5_db_pgdir *pgdir; 671 struct mlx5_ib_user_db_page *user_page; 672 } u; 673 dma_addr_t dma; 674 int index; 675 }; 676 677 struct mlx5_net_counters { 678 u64 packets; 679 u64 octets; 680 }; 681 682 struct mlx5_ptys_reg { 683 u8 an_dis_admin; 684 u8 an_dis_ap; 685 u8 local_port; 686 u8 proto_mask; 687 u32 eth_proto_cap; 688 u16 ib_link_width_cap; 689 u16 ib_proto_cap; 690 u32 eth_proto_admin; 691 u16 ib_link_width_admin; 692 u16 ib_proto_admin; 693 u32 eth_proto_oper; 694 u16 ib_link_width_oper; 695 u16 ib_proto_oper; 696 u32 eth_proto_lp_advertise; 697 }; 698 699 struct mlx5_pvlc_reg { 700 u8 local_port; 701 u8 vl_hw_cap; 702 u8 vl_admin; 703 u8 vl_operational; 704 }; 705 706 struct mlx5_pmtu_reg { 707 u8 local_port; 708 u16 max_mtu; 709 u16 admin_mtu; 710 u16 oper_mtu; 711 }; 712 713 struct mlx5_vport_counters { 714 struct mlx5_net_counters received_errors; 715 struct mlx5_net_counters transmit_errors; 716 struct mlx5_net_counters received_ib_unicast; 717 struct mlx5_net_counters transmitted_ib_unicast; 718 struct mlx5_net_counters received_ib_multicast; 719 struct mlx5_net_counters transmitted_ib_multicast; 720 struct mlx5_net_counters received_eth_broadcast; 721 struct mlx5_net_counters transmitted_eth_broadcast; 722 struct mlx5_net_counters received_eth_unicast; 723 struct mlx5_net_counters transmitted_eth_unicast; 724 struct mlx5_net_counters received_eth_multicast; 725 struct mlx5_net_counters transmitted_eth_multicast; 726 }; 727 728 enum { 729 MLX5_DB_PER_PAGE = MLX5_ADAPTER_PAGE_SIZE / L1_CACHE_BYTES, 730 }; 731 732 struct mlx5_core_dct { 733 struct mlx5_core_rsc_common common; /* must be first */ 734 void (*event)(struct mlx5_core_dct *, int); 735 int dctn; 736 struct completion drained; 737 struct mlx5_rsc_debug *dbg; 738 int pid; 739 }; 740 741 enum { 742 MLX5_COMP_EQ_SIZE = 1024, 743 }; 744 745 enum { 746 MLX5_PTYS_IB = 1 << 0, 747 MLX5_PTYS_EN = 1 << 2, 748 }; 749 750 struct mlx5_db_pgdir { 751 struct list_head list; 752 DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); 753 struct mlx5_fw_page *fw_page; 754 __be32 *db_page; 755 dma_addr_t db_dma; 756 }; 757 758 typedef void (*mlx5_cmd_cbk_t)(int status, void *context); 759 760 struct mlx5_cmd_work_ent { 761 struct mlx5_cmd_msg *in; 762 struct mlx5_cmd_msg *out; 763 int uin_size; 764 void *uout; 765 int uout_size; 766 mlx5_cmd_cbk_t callback; 767 struct delayed_work cb_timeout_work; 768 void *context; 769 int idx; 770 struct completion done; 771 struct mlx5_cmd *cmd; 772 struct work_struct work; 773 struct mlx5_cmd_layout *lay; 774 int ret; 775 int page_queue; 776 u8 status; 777 u8 token; 778 u64 ts1; 779 u64 ts2; 780 u16 op; 781 u8 busy; 782 bool polling; 783 }; 784 785 struct mlx5_pas { 786 u64 pa; 787 u8 log_sz; 788 }; 789 790 enum port_state_policy { 791 MLX5_POLICY_DOWN = 0, 792 MLX5_POLICY_UP = 1, 793 MLX5_POLICY_FOLLOW = 2, 794 MLX5_POLICY_INVALID = 0xffffffff 795 }; 796 797 static inline void * 798 mlx5_buf_offset(struct mlx5_buf *buf, int offset) 799 { 800 return ((char *)buf->direct.buf + offset); 801 } 802 803 804 extern struct workqueue_struct *mlx5_core_wq; 805 806 #define STRUCT_FIELD(header, field) \ 807 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ 808 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field 809 810 static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev) 811 { 812 return pci_get_drvdata(pdev); 813 } 814 815 extern struct dentry *mlx5_debugfs_root; 816 817 static inline u16 fw_rev_maj(struct mlx5_core_dev *dev) 818 { 819 return ioread32be(&dev->iseg->fw_rev) & 0xffff; 820 } 821 822 static inline u16 fw_rev_min(struct mlx5_core_dev *dev) 823 { 824 return ioread32be(&dev->iseg->fw_rev) >> 16; 825 } 826 827 static inline u16 fw_rev_sub(struct mlx5_core_dev *dev) 828 { 829 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; 830 } 831 832 static inline u16 cmdif_rev_get(struct mlx5_core_dev *dev) 833 { 834 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 835 } 836 837 static inline int mlx5_get_gid_table_len(u16 param) 838 { 839 if (param > 4) { 840 printf("M4_CORE_DRV_NAME: WARN: ""gid table length is zero\n"); 841 return 0; 842 } 843 844 return 8 * (1 << param); 845 } 846 847 static inline void *mlx5_vzalloc(unsigned long size) 848 { 849 void *rtn; 850 851 rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 852 return rtn; 853 } 854 855 static inline void *mlx5_vmalloc(unsigned long size) 856 { 857 void *rtn; 858 859 rtn = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); 860 if (!rtn) 861 rtn = vmalloc(size); 862 return rtn; 863 } 864 865 static inline u32 mlx5_base_mkey(const u32 key) 866 { 867 return key & 0xffffff00u; 868 } 869 870 int mlx5_cmd_init(struct mlx5_core_dev *dev); 871 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); 872 void mlx5_cmd_use_events(struct mlx5_core_dev *dev); 873 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); 874 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); 875 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); 876 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 877 int out_size); 878 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 879 void *out, int out_size, mlx5_cmd_cbk_t callback, 880 void *context); 881 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, 882 void *out, int out_size); 883 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); 884 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); 885 int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); 886 int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); 887 int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); 888 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); 889 void mlx5_health_cleanup(struct mlx5_core_dev *dev); 890 int mlx5_health_init(struct mlx5_core_dev *dev); 891 void mlx5_start_health_poll(struct mlx5_core_dev *dev); 892 void mlx5_stop_health_poll(struct mlx5_core_dev *dev); 893 void mlx5_drain_health_wq(struct mlx5_core_dev *dev); 894 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); 895 void mlx5_trigger_health_work(struct mlx5_core_dev *dev); 896 897 #define mlx5_buf_alloc_node(dev, size, direct, buf, node) \ 898 mlx5_buf_alloc(dev, size, direct, buf) 899 int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, 900 struct mlx5_buf *buf); 901 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); 902 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 903 struct mlx5_srq_attr *in); 904 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); 905 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 906 struct mlx5_srq_attr *out); 907 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); 908 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 909 u16 lwm, int is_srq); 910 void mlx5_init_mr_table(struct mlx5_core_dev *dev); 911 void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev); 912 int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, 913 struct mlx5_core_mr *mkey, 914 u32 *in, int inlen, 915 u32 *out, int outlen, 916 mlx5_cmd_cbk_t callback, void *context); 917 int mlx5_core_create_mkey(struct mlx5_core_dev *dev, 918 struct mlx5_core_mr *mr, 919 u32 *in, int inlen); 920 int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mkey); 921 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mkey, 922 u32 *out, int outlen); 923 int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, 924 u32 *mkey); 925 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); 926 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); 927 int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, 928 u16 opmod, u8 port); 929 void mlx5_fwp_flush(struct mlx5_fw_page *fwp); 930 void mlx5_fwp_invalidate(struct mlx5_fw_page *fwp); 931 struct mlx5_fw_page *mlx5_fwp_alloc(struct mlx5_core_dev *dev, gfp_t flags, unsigned num); 932 void mlx5_fwp_free(struct mlx5_fw_page *fwp); 933 u64 mlx5_fwp_get_dma(struct mlx5_fw_page *fwp, size_t offset); 934 void *mlx5_fwp_get_virt(struct mlx5_fw_page *fwp, size_t offset); 935 void mlx5_pagealloc_init(struct mlx5_core_dev *dev); 936 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); 937 int mlx5_pagealloc_start(struct mlx5_core_dev *dev); 938 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); 939 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, 940 s32 npages); 941 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); 942 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); 943 s64 mlx5_wait_for_reclaim_vfs_pages(struct mlx5_core_dev *dev); 944 void mlx5_register_debugfs(void); 945 void mlx5_unregister_debugfs(void); 946 int mlx5_eq_init(struct mlx5_core_dev *dev); 947 void mlx5_eq_cleanup(struct mlx5_core_dev *dev); 948 void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); 949 void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); 950 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); 951 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); 952 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); 953 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector); 954 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); 955 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, 956 int nent, u64 mask, const char *name, struct mlx5_uar *uar); 957 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 958 int mlx5_start_eqs(struct mlx5_core_dev *dev); 959 int mlx5_stop_eqs(struct mlx5_core_dev *dev); 960 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn); 961 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); 962 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); 963 int mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev *dev, int enable, 964 u64 addr); 965 966 int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); 967 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); 968 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, 969 int size_in, void *data_out, int size_out, 970 u16 reg_num, int arg, int write); 971 972 void mlx5_toggle_port_link(struct mlx5_core_dev *dev); 973 974 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 975 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 976 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 977 u32 *out, int outlen); 978 int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); 979 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); 980 int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); 981 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); 982 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); 983 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, 984 int node); 985 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); 986 987 const char *mlx5_command_str(int command); 988 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); 989 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); 990 int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, 991 int npsvs, u32 *sig_index); 992 int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); 993 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); 994 u8 mlx5_is_wol_supported(struct mlx5_core_dev *dev); 995 int mlx5_set_wol(struct mlx5_core_dev *dev, u8 wol_mode); 996 int mlx5_set_dropless_mode(struct mlx5_core_dev *dev, u16 timeout); 997 int mlx5_query_dropless_mode(struct mlx5_core_dev *dev, u16 *timeout); 998 int mlx5_query_wol(struct mlx5_core_dev *dev, u8 *wol_mode); 999 int mlx5_core_access_pvlc(struct mlx5_core_dev *dev, 1000 struct mlx5_pvlc_reg *pvlc, int write); 1001 int mlx5_core_access_ptys(struct mlx5_core_dev *dev, 1002 struct mlx5_ptys_reg *ptys, int write); 1003 int mlx5_core_access_pmtu(struct mlx5_core_dev *dev, 1004 struct mlx5_pmtu_reg *pmtu, int write); 1005 int mlx5_vxlan_udp_port_add(struct mlx5_core_dev *dev, u16 port); 1006 int mlx5_vxlan_udp_port_delete(struct mlx5_core_dev *dev, u16 port); 1007 int mlx5_query_port_cong_status(struct mlx5_core_dev *mdev, int protocol, 1008 int priority, int *is_enable); 1009 int mlx5_modify_port_cong_status(struct mlx5_core_dev *mdev, int protocol, 1010 int priority, int enable); 1011 int mlx5_query_port_cong_params(struct mlx5_core_dev *mdev, int protocol, 1012 void *out, int out_size); 1013 int mlx5_modify_port_cong_params(struct mlx5_core_dev *mdev, 1014 void *in, int in_size); 1015 int mlx5_query_port_cong_statistics(struct mlx5_core_dev *mdev, int clear, 1016 void *out, int out_size); 1017 int mlx5_set_diagnostic_params(struct mlx5_core_dev *mdev, void *in, 1018 int in_size); 1019 int mlx5_query_diagnostic_counters(struct mlx5_core_dev *mdev, 1020 u8 num_of_samples, u16 sample_index, 1021 void *out, int out_size); 1022 int mlx5_vsc_find_cap(struct mlx5_core_dev *mdev); 1023 int mlx5_vsc_lock(struct mlx5_core_dev *mdev); 1024 void mlx5_vsc_unlock(struct mlx5_core_dev *mdev); 1025 int mlx5_vsc_set_space(struct mlx5_core_dev *mdev, u16 space); 1026 int mlx5_vsc_write(struct mlx5_core_dev *mdev, u32 addr, u32 *data); 1027 int mlx5_vsc_read(struct mlx5_core_dev *mdev, u32 addr, u32 *data); 1028 static inline u32 mlx5_mkey_to_idx(u32 mkey) 1029 { 1030 return mkey >> 8; 1031 } 1032 1033 static inline u32 mlx5_idx_to_mkey(u32 mkey_idx) 1034 { 1035 return mkey_idx << 8; 1036 } 1037 1038 static inline u8 mlx5_mkey_variant(u32 mkey) 1039 { 1040 return mkey & 0xff; 1041 } 1042 1043 enum { 1044 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, 1045 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, 1046 }; 1047 1048 enum { 1049 MAX_MR_CACHE_ENTRIES = 15, 1050 }; 1051 1052 struct mlx5_interface { 1053 void * (*add)(struct mlx5_core_dev *dev); 1054 void (*remove)(struct mlx5_core_dev *dev, void *context); 1055 void (*event)(struct mlx5_core_dev *dev, void *context, 1056 enum mlx5_dev_event event, unsigned long param); 1057 void * (*get_dev)(void *context); 1058 int protocol; 1059 struct list_head list; 1060 }; 1061 1062 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); 1063 int mlx5_register_interface(struct mlx5_interface *intf); 1064 void mlx5_unregister_interface(struct mlx5_interface *intf); 1065 1066 struct mlx5_profile { 1067 u64 mask; 1068 u8 log_max_qp; 1069 struct { 1070 int size; 1071 int limit; 1072 } mr_cache[MAX_MR_CACHE_ENTRIES]; 1073 }; 1074 1075 enum { 1076 MLX5_PCI_DEV_IS_VF = 1 << 0, 1077 }; 1078 1079 enum { 1080 MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, 1081 }; 1082 1083 static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev) 1084 { 1085 return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); 1086 } 1087 1088 #endif /* MLX5_DRIVER_H */ 1089