1 /*- 2 * Copyright (c) 2013-2019, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #ifndef MLX5_DRIVER_H 29 #define MLX5_DRIVER_H 30 31 #include "opt_ratelimit.h" 32 33 #include <linux/kernel.h> 34 #include <linux/completion.h> 35 #include <linux/pci.h> 36 #include <linux/cache.h> 37 #include <linux/rbtree.h> 38 #include <linux/if_ether.h> 39 #include <linux/semaphore.h> 40 #include <linux/slab.h> 41 #include <linux/vmalloc.h> 42 #include <linux/radix-tree.h> 43 #include <linux/idr.h> 44 45 #include <dev/mlx5/device.h> 46 #include <dev/mlx5/doorbell.h> 47 #include <dev/mlx5/srq.h> 48 49 #define MLX5_QCOUNTER_SETS_NETDEV 64 50 #define MLX5_MAX_NUMBER_OF_VFS 128 51 52 enum { 53 MLX5_BOARD_ID_LEN = 64, 54 MLX5_MAX_NAME_LEN = 16, 55 }; 56 57 enum { 58 MLX5_CMD_TIMEOUT_MSEC = 60 * 1000, 59 }; 60 61 enum { 62 CMD_OWNER_SW = 0x0, 63 CMD_OWNER_HW = 0x1, 64 CMD_STATUS_SUCCESS = 0, 65 }; 66 67 enum mlx5_sqp_t { 68 MLX5_SQP_SMI = 0, 69 MLX5_SQP_GSI = 1, 70 MLX5_SQP_IEEE_1588 = 2, 71 MLX5_SQP_SNIFFER = 3, 72 MLX5_SQP_SYNC_UMR = 4, 73 }; 74 75 enum { 76 MLX5_MAX_PORTS = 2, 77 }; 78 79 enum { 80 MLX5_EQ_VEC_PAGES = 0, 81 MLX5_EQ_VEC_CMD = 1, 82 MLX5_EQ_VEC_ASYNC = 2, 83 MLX5_EQ_VEC_COMP_BASE, 84 }; 85 86 enum { 87 MLX5_MAX_IRQ_NAME = 32 88 }; 89 90 enum { 91 MLX5_ATOMIC_MODE_OFF = 16, 92 MLX5_ATOMIC_MODE_NONE = 0 << MLX5_ATOMIC_MODE_OFF, 93 MLX5_ATOMIC_MODE_IB_COMP = 1 << MLX5_ATOMIC_MODE_OFF, 94 MLX5_ATOMIC_MODE_CX = 2 << MLX5_ATOMIC_MODE_OFF, 95 MLX5_ATOMIC_MODE_8B = 3 << MLX5_ATOMIC_MODE_OFF, 96 MLX5_ATOMIC_MODE_16B = 4 << MLX5_ATOMIC_MODE_OFF, 97 MLX5_ATOMIC_MODE_32B = 5 << MLX5_ATOMIC_MODE_OFF, 98 MLX5_ATOMIC_MODE_64B = 6 << MLX5_ATOMIC_MODE_OFF, 99 MLX5_ATOMIC_MODE_128B = 7 << MLX5_ATOMIC_MODE_OFF, 100 MLX5_ATOMIC_MODE_256B = 8 << MLX5_ATOMIC_MODE_OFF, 101 }; 102 103 enum { 104 MLX5_ATOMIC_MODE_DCT_OFF = 20, 105 MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF, 106 MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF, 107 MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF, 108 MLX5_ATOMIC_MODE_DCT_8B = 3 << MLX5_ATOMIC_MODE_DCT_OFF, 109 MLX5_ATOMIC_MODE_DCT_16B = 4 << MLX5_ATOMIC_MODE_DCT_OFF, 110 MLX5_ATOMIC_MODE_DCT_32B = 5 << MLX5_ATOMIC_MODE_DCT_OFF, 111 MLX5_ATOMIC_MODE_DCT_64B = 6 << MLX5_ATOMIC_MODE_DCT_OFF, 112 MLX5_ATOMIC_MODE_DCT_128B = 7 << MLX5_ATOMIC_MODE_DCT_OFF, 113 MLX5_ATOMIC_MODE_DCT_256B = 8 << MLX5_ATOMIC_MODE_DCT_OFF, 114 }; 115 116 enum { 117 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, 118 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, 119 MLX5_ATOMIC_OPS_MASKED_CMP_SWAP = 1 << 2, 120 MLX5_ATOMIC_OPS_MASKED_FETCH_ADD = 1 << 3, 121 }; 122 123 enum { 124 MLX5_REG_QPTS = 0x4002, 125 MLX5_REG_QETCR = 0x4005, 126 MLX5_REG_QPDP = 0x4007, 127 MLX5_REG_QTCT = 0x400A, 128 MLX5_REG_QPDPM = 0x4013, 129 MLX5_REG_QHLL = 0x4016, 130 MLX5_REG_QCAM = 0x4019, 131 MLX5_REG_DCBX_PARAM = 0x4020, 132 MLX5_REG_DCBX_APP = 0x4021, 133 MLX5_REG_PCAP = 0x5001, 134 MLX5_REG_FPGA_CAP = 0x4022, 135 MLX5_REG_FPGA_CTRL = 0x4023, 136 MLX5_REG_FPGA_ACCESS_REG = 0x4024, 137 MLX5_REG_FPGA_SHELL_CNTR = 0x4025, 138 MLX5_REG_PMTU = 0x5003, 139 MLX5_REG_PTYS = 0x5004, 140 MLX5_REG_PAOS = 0x5006, 141 MLX5_REG_PFCC = 0x5007, 142 MLX5_REG_PPCNT = 0x5008, 143 MLX5_REG_PMAOS = 0x5012, 144 MLX5_REG_PUDE = 0x5009, 145 MLX5_REG_PPTB = 0x500B, 146 MLX5_REG_PBMC = 0x500C, 147 MLX5_REG_PMPE = 0x5010, 148 MLX5_REG_PELC = 0x500e, 149 MLX5_REG_PVLC = 0x500f, 150 MLX5_REG_PMLP = 0x5002, 151 MLX5_REG_PCAM = 0x507f, 152 MLX5_REG_NODE_DESC = 0x6001, 153 MLX5_REG_HOST_ENDIANNESS = 0x7004, 154 MLX5_REG_MTMP = 0x900a, 155 MLX5_REG_MCIA = 0x9014, 156 MLX5_REG_MFRL = 0x9028, 157 MLX5_REG_MPCNT = 0x9051, 158 MLX5_REG_MCQI = 0x9061, 159 MLX5_REG_MCC = 0x9062, 160 MLX5_REG_MCDA = 0x9063, 161 MLX5_REG_MCAM = 0x907f, 162 }; 163 164 enum dbg_rsc_type { 165 MLX5_DBG_RSC_QP, 166 MLX5_DBG_RSC_EQ, 167 MLX5_DBG_RSC_CQ, 168 }; 169 170 enum { 171 MLX5_INTERFACE_PROTOCOL_IB = 0, 172 MLX5_INTERFACE_PROTOCOL_ETH = 1, 173 MLX5_INTERFACE_NUMBER = 2, 174 }; 175 176 struct mlx5_field_desc { 177 struct dentry *dent; 178 int i; 179 }; 180 181 struct mlx5_rsc_debug { 182 struct mlx5_core_dev *dev; 183 void *object; 184 enum dbg_rsc_type type; 185 struct dentry *root; 186 struct mlx5_field_desc fields[0]; 187 }; 188 189 enum mlx5_dev_event { 190 MLX5_DEV_EVENT_SYS_ERROR, 191 MLX5_DEV_EVENT_PORT_UP, 192 MLX5_DEV_EVENT_PORT_DOWN, 193 MLX5_DEV_EVENT_PORT_INITIALIZED, 194 MLX5_DEV_EVENT_LID_CHANGE, 195 MLX5_DEV_EVENT_PKEY_CHANGE, 196 MLX5_DEV_EVENT_GUID_CHANGE, 197 MLX5_DEV_EVENT_CLIENT_REREG, 198 MLX5_DEV_EVENT_VPORT_CHANGE, 199 MLX5_DEV_EVENT_ERROR_STATE_DCBX, 200 MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE, 201 MLX5_DEV_EVENT_LOCAL_OPER_CHANGE, 202 MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE, 203 }; 204 205 enum mlx5_port_status { 206 MLX5_PORT_UP = 1 << 0, 207 MLX5_PORT_DOWN = 1 << 1, 208 }; 209 210 enum { 211 MLX5_VSC_SPACE_SUPPORTED = 0x1, 212 MLX5_VSC_SPACE_OFFSET = 0x4, 213 MLX5_VSC_COUNTER_OFFSET = 0x8, 214 MLX5_VSC_SEMA_OFFSET = 0xC, 215 MLX5_VSC_ADDR_OFFSET = 0x10, 216 MLX5_VSC_DATA_OFFSET = 0x14, 217 MLX5_VSC_MAX_RETRIES = 0x1000, 218 }; 219 220 #define MLX5_PROT_MASK(link_mode) (1 << link_mode) 221 222 struct mlx5_uuar_info { 223 struct mlx5_uar *uars; 224 int num_uars; 225 int num_low_latency_uuars; 226 unsigned long *bitmap; 227 unsigned int *count; 228 struct mlx5_bf *bfs; 229 230 /* 231 * protect uuar allocation data structs 232 */ 233 struct mutex lock; 234 u32 ver; 235 }; 236 237 struct mlx5_bf { 238 void __iomem *reg; 239 void __iomem *regreg; 240 int buf_size; 241 struct mlx5_uar *uar; 242 unsigned long offset; 243 int need_lock; 244 /* protect blue flame buffer selection when needed 245 */ 246 spinlock_t lock; 247 248 /* serialize 64 bit writes when done as two 32 bit accesses 249 */ 250 spinlock_t lock32; 251 int uuarn; 252 }; 253 254 struct mlx5_cmd_first { 255 __be32 data[4]; 256 }; 257 258 struct cache_ent; 259 struct mlx5_fw_page { 260 union { 261 struct rb_node rb_node; 262 struct list_head list; 263 }; 264 struct mlx5_cmd_first first; 265 struct mlx5_core_dev *dev; 266 bus_dmamap_t dma_map; 267 bus_addr_t dma_addr; 268 void *virt_addr; 269 struct cache_ent *cache; 270 u32 numpages; 271 u16 load_done; 272 #define MLX5_LOAD_ST_NONE 0 273 #define MLX5_LOAD_ST_SUCCESS 1 274 #define MLX5_LOAD_ST_FAILURE 2 275 u16 func_id; 276 }; 277 #define mlx5_cmd_msg mlx5_fw_page 278 279 struct mlx5_cmd_debug { 280 struct dentry *dbg_root; 281 struct dentry *dbg_in; 282 struct dentry *dbg_out; 283 struct dentry *dbg_outlen; 284 struct dentry *dbg_status; 285 struct dentry *dbg_run; 286 void *in_msg; 287 void *out_msg; 288 u8 status; 289 u16 inlen; 290 u16 outlen; 291 }; 292 293 struct cache_ent { 294 /* protect block chain allocations 295 */ 296 spinlock_t lock; 297 struct list_head head; 298 }; 299 300 struct cmd_msg_cache { 301 struct cache_ent large; 302 struct cache_ent med; 303 304 }; 305 306 struct mlx5_traffic_counter { 307 u64 packets; 308 u64 octets; 309 }; 310 311 enum mlx5_cmd_mode { 312 MLX5_CMD_MODE_POLLING, 313 MLX5_CMD_MODE_EVENTS 314 }; 315 316 struct mlx5_cmd_stats { 317 u64 sum; 318 u64 n; 319 struct dentry *root; 320 struct dentry *avg; 321 struct dentry *count; 322 /* protect command average calculations */ 323 spinlock_t lock; 324 }; 325 326 struct mlx5_cmd { 327 struct mlx5_fw_page *cmd_page; 328 bus_dma_tag_t dma_tag; 329 struct sx dma_sx; 330 struct mtx dma_mtx; 331 #define MLX5_DMA_OWNED(dev) mtx_owned(&(dev)->cmd.dma_mtx) 332 #define MLX5_DMA_LOCK(dev) mtx_lock(&(dev)->cmd.dma_mtx) 333 #define MLX5_DMA_UNLOCK(dev) mtx_unlock(&(dev)->cmd.dma_mtx) 334 struct cv dma_cv; 335 #define MLX5_DMA_DONE(dev) cv_broadcast(&(dev)->cmd.dma_cv) 336 #define MLX5_DMA_WAIT(dev) cv_wait(&(dev)->cmd.dma_cv, &(dev)->cmd.dma_mtx) 337 void *cmd_buf; 338 dma_addr_t dma; 339 u16 cmdif_rev; 340 u8 log_sz; 341 u8 log_stride; 342 int max_reg_cmds; 343 int events; 344 u32 __iomem *vector; 345 346 /* protect command queue allocations 347 */ 348 spinlock_t alloc_lock; 349 350 /* protect token allocations 351 */ 352 spinlock_t token_lock; 353 u8 token; 354 unsigned long bitmask; 355 struct semaphore sem; 356 struct semaphore pages_sem; 357 enum mlx5_cmd_mode mode; 358 struct mlx5_cmd_work_ent * volatile ent_arr[MLX5_MAX_COMMANDS]; 359 volatile enum mlx5_cmd_mode ent_mode[MLX5_MAX_COMMANDS]; 360 struct mlx5_cmd_debug dbg; 361 struct cmd_msg_cache cache; 362 int checksum_disabled; 363 struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; 364 }; 365 366 struct mlx5_port_caps { 367 int gid_table_len; 368 int pkey_table_len; 369 u8 ext_port_cap; 370 }; 371 372 struct mlx5_buf { 373 bus_dma_tag_t dma_tag; 374 bus_dmamap_t dma_map; 375 struct mlx5_core_dev *dev; 376 struct { 377 void *buf; 378 } direct; 379 u64 *page_list; 380 int npages; 381 int size; 382 u8 page_shift; 383 u8 load_done; 384 }; 385 386 struct mlx5_frag_buf { 387 struct mlx5_buf_list *frags; 388 int npages; 389 int size; 390 u8 page_shift; 391 }; 392 393 struct mlx5_eq { 394 struct mlx5_core_dev *dev; 395 __be32 __iomem *doorbell; 396 u32 cons_index; 397 struct mlx5_buf buf; 398 int size; 399 u8 irqn; 400 u8 eqn; 401 int nent; 402 u64 mask; 403 struct list_head list; 404 int index; 405 struct mlx5_rsc_debug *dbg; 406 }; 407 408 struct mlx5_core_psv { 409 u32 psv_idx; 410 struct psv_layout { 411 u32 pd; 412 u16 syndrome; 413 u16 reserved; 414 u16 bg; 415 u16 app_tag; 416 u32 ref_tag; 417 } psv; 418 }; 419 420 struct mlx5_core_sig_ctx { 421 struct mlx5_core_psv psv_memory; 422 struct mlx5_core_psv psv_wire; 423 #if (__FreeBSD_version >= 1100000) 424 struct ib_sig_err err_item; 425 #endif 426 bool sig_status_checked; 427 bool sig_err_exists; 428 u32 sigerr_count; 429 }; 430 431 enum { 432 MLX5_MKEY_MR = 1, 433 MLX5_MKEY_MW, 434 MLX5_MKEY_MR_USER, 435 }; 436 437 struct mlx5_core_mkey { 438 u64 iova; 439 u64 size; 440 u32 key; 441 u32 pd; 442 u32 type; 443 }; 444 445 struct mlx5_core_mr { 446 u64 iova; 447 u64 size; 448 u32 key; 449 u32 pd; 450 }; 451 452 enum mlx5_res_type { 453 MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP, 454 MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ, 455 MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ, 456 MLX5_RES_SRQ = 3, 457 MLX5_RES_XSRQ = 4, 458 MLX5_RES_DCT = 5, 459 }; 460 461 struct mlx5_core_rsc_common { 462 enum mlx5_res_type res; 463 atomic_t refcount; 464 struct completion free; 465 }; 466 467 struct mlx5_core_srq { 468 struct mlx5_core_rsc_common common; /* must be first */ 469 u32 srqn; 470 int max; 471 size_t max_gs; 472 size_t max_avail_gather; 473 int wqe_shift; 474 void (*event)(struct mlx5_core_srq *, int); 475 atomic_t refcount; 476 struct completion free; 477 }; 478 479 struct mlx5_eq_table { 480 void __iomem *update_ci; 481 void __iomem *update_arm_ci; 482 struct list_head comp_eqs_list; 483 struct mlx5_eq pages_eq; 484 struct mlx5_eq async_eq; 485 struct mlx5_eq cmd_eq; 486 int num_comp_vectors; 487 /* protect EQs list 488 */ 489 spinlock_t lock; 490 }; 491 492 struct mlx5_uar { 493 u32 index; 494 void __iomem *bf_map; 495 void __iomem *map; 496 }; 497 498 499 struct mlx5_core_health { 500 struct mlx5_health_buffer __iomem *health; 501 __be32 __iomem *health_counter; 502 struct timer_list timer; 503 u32 prev; 504 int miss_counter; 505 u32 fatal_error; 506 struct workqueue_struct *wq_watchdog; 507 struct work_struct work_watchdog; 508 /* wq spinlock to synchronize draining */ 509 spinlock_t wq_lock; 510 struct workqueue_struct *wq; 511 unsigned long flags; 512 struct work_struct work; 513 struct delayed_work recover_work; 514 unsigned int last_reset_req; 515 struct work_struct work_cmd_completion; 516 struct workqueue_struct *wq_cmd; 517 }; 518 519 #ifdef RATELIMIT 520 #define MLX5_CQ_LINEAR_ARRAY_SIZE (128 * 1024) 521 #else 522 #define MLX5_CQ_LINEAR_ARRAY_SIZE 1024 523 #endif 524 525 struct mlx5_cq_linear_array_entry { 526 spinlock_t lock; 527 struct mlx5_core_cq * volatile cq; 528 }; 529 530 struct mlx5_cq_table { 531 /* protect radix tree 532 */ 533 spinlock_t lock; 534 struct radix_tree_root tree; 535 struct mlx5_cq_linear_array_entry linear_array[MLX5_CQ_LINEAR_ARRAY_SIZE]; 536 }; 537 538 struct mlx5_qp_table { 539 /* protect radix tree 540 */ 541 spinlock_t lock; 542 struct radix_tree_root tree; 543 }; 544 545 struct mlx5_srq_table { 546 /* protect radix tree 547 */ 548 spinlock_t lock; 549 struct radix_tree_root tree; 550 }; 551 552 struct mlx5_mr_table { 553 /* protect radix tree 554 */ 555 spinlock_t lock; 556 struct radix_tree_root tree; 557 }; 558 559 struct mlx5_irq_info { 560 char name[MLX5_MAX_IRQ_NAME]; 561 }; 562 563 #ifdef RATELIMIT 564 struct mlx5_rl_entry { 565 u32 rate; 566 u16 burst; 567 u16 index; 568 u32 refcount; 569 }; 570 571 struct mlx5_rl_table { 572 struct mutex rl_lock; 573 u16 max_size; 574 u32 max_rate; 575 u32 min_rate; 576 struct mlx5_rl_entry *rl_entry; 577 }; 578 #endif 579 580 struct mlx5_priv { 581 char name[MLX5_MAX_NAME_LEN]; 582 struct mlx5_eq_table eq_table; 583 struct msix_entry *msix_arr; 584 struct mlx5_irq_info *irq_info; 585 struct mlx5_uuar_info uuari; 586 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); 587 int disable_irqs; 588 589 struct io_mapping *bf_mapping; 590 591 /* pages stuff */ 592 struct workqueue_struct *pg_wq; 593 struct rb_root page_root; 594 s64 fw_pages; 595 atomic_t reg_pages; 596 s64 pages_per_func[MLX5_MAX_NUMBER_OF_VFS]; 597 struct mlx5_core_health health; 598 599 struct mlx5_srq_table srq_table; 600 601 /* start: qp staff */ 602 struct mlx5_qp_table qp_table; 603 struct dentry *qp_debugfs; 604 struct dentry *eq_debugfs; 605 struct dentry *cq_debugfs; 606 struct dentry *cmdif_debugfs; 607 /* end: qp staff */ 608 609 /* start: cq staff */ 610 struct mlx5_cq_table cq_table; 611 /* end: cq staff */ 612 613 /* start: mr staff */ 614 struct mlx5_mr_table mr_table; 615 /* end: mr staff */ 616 617 /* start: alloc staff */ 618 int numa_node; 619 620 struct mutex pgdir_mutex; 621 struct list_head pgdir_list; 622 /* end: alloc staff */ 623 struct dentry *dbg_root; 624 625 /* protect mkey key part */ 626 spinlock_t mkey_lock; 627 u8 mkey_key; 628 629 struct list_head dev_list; 630 struct list_head ctx_list; 631 spinlock_t ctx_lock; 632 unsigned long pci_dev_data; 633 #ifdef RATELIMIT 634 struct mlx5_rl_table rl_table; 635 #endif 636 }; 637 638 enum mlx5_device_state { 639 MLX5_DEVICE_STATE_UP, 640 MLX5_DEVICE_STATE_INTERNAL_ERROR, 641 }; 642 643 enum mlx5_interface_state { 644 MLX5_INTERFACE_STATE_UP, 645 }; 646 647 enum mlx5_pci_status { 648 MLX5_PCI_STATUS_DISABLED, 649 MLX5_PCI_STATUS_ENABLED, 650 }; 651 652 #define MLX5_MAX_RESERVED_GIDS 8 653 654 struct mlx5_rsvd_gids { 655 unsigned int start; 656 unsigned int count; 657 struct ida ida; 658 }; 659 660 struct mlx5_special_contexts { 661 int resd_lkey; 662 }; 663 664 struct mlx5_flow_root_namespace; 665 struct mlx5_core_dev { 666 struct pci_dev *pdev; 667 /* sync pci state */ 668 struct mutex pci_status_mutex; 669 enum mlx5_pci_status pci_status; 670 char board_id[MLX5_BOARD_ID_LEN]; 671 struct mlx5_cmd cmd; 672 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; 673 u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; 674 u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; 675 struct { 676 u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; 677 u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; 678 u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; 679 u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; 680 } caps; 681 phys_addr_t iseg_base; 682 struct mlx5_init_seg __iomem *iseg; 683 enum mlx5_device_state state; 684 /* sync interface state */ 685 struct mutex intf_state_mutex; 686 unsigned long intf_state; 687 void (*event) (struct mlx5_core_dev *dev, 688 enum mlx5_dev_event event, 689 unsigned long param); 690 struct mlx5_priv priv; 691 struct mlx5_profile *profile; 692 atomic_t num_qps; 693 u32 vsc_addr; 694 u32 issi; 695 struct mlx5_special_contexts special_contexts; 696 unsigned int module_status[MLX5_MAX_PORTS]; 697 struct mlx5_flow_root_namespace *root_ns; 698 struct mlx5_flow_root_namespace *fdb_root_ns; 699 struct mlx5_flow_root_namespace *esw_egress_root_ns; 700 struct mlx5_flow_root_namespace *esw_ingress_root_ns; 701 struct mlx5_flow_root_namespace *sniffer_rx_root_ns; 702 struct mlx5_flow_root_namespace *sniffer_tx_root_ns; 703 u32 num_q_counter_allocated[MLX5_INTERFACE_NUMBER]; 704 const struct mlx5_crspace_regmap *dump_rege; 705 uint32_t *dump_data; 706 unsigned dump_size; 707 bool dump_valid; 708 bool dump_copyout; 709 struct mtx dump_lock; 710 711 struct sysctl_ctx_list sysctl_ctx; 712 int msix_eqvec; 713 int pwr_status; 714 int pwr_value; 715 716 struct { 717 struct mlx5_rsvd_gids reserved_gids; 718 atomic_t roce_en; 719 } roce; 720 #ifdef CONFIG_MLX5_FPGA 721 struct mlx5_fpga_device *fpga; 722 #endif 723 }; 724 725 enum { 726 MLX5_WOL_DISABLE = 0, 727 MLX5_WOL_SECURED_MAGIC = 1 << 1, 728 MLX5_WOL_MAGIC = 1 << 2, 729 MLX5_WOL_ARP = 1 << 3, 730 MLX5_WOL_BROADCAST = 1 << 4, 731 MLX5_WOL_MULTICAST = 1 << 5, 732 MLX5_WOL_UNICAST = 1 << 6, 733 MLX5_WOL_PHY_ACTIVITY = 1 << 7, 734 }; 735 736 struct mlx5_db { 737 __be32 *db; 738 union { 739 struct mlx5_db_pgdir *pgdir; 740 struct mlx5_ib_user_db_page *user_page; 741 } u; 742 dma_addr_t dma; 743 int index; 744 }; 745 746 struct mlx5_net_counters { 747 u64 packets; 748 u64 octets; 749 }; 750 751 struct mlx5_ptys_reg { 752 u8 an_dis_admin; 753 u8 an_dis_ap; 754 u8 local_port; 755 u8 proto_mask; 756 u32 eth_proto_cap; 757 u16 ib_link_width_cap; 758 u16 ib_proto_cap; 759 u32 eth_proto_admin; 760 u16 ib_link_width_admin; 761 u16 ib_proto_admin; 762 u32 eth_proto_oper; 763 u16 ib_link_width_oper; 764 u16 ib_proto_oper; 765 u32 eth_proto_lp_advertise; 766 }; 767 768 struct mlx5_pvlc_reg { 769 u8 local_port; 770 u8 vl_hw_cap; 771 u8 vl_admin; 772 u8 vl_operational; 773 }; 774 775 struct mlx5_pmtu_reg { 776 u8 local_port; 777 u16 max_mtu; 778 u16 admin_mtu; 779 u16 oper_mtu; 780 }; 781 782 struct mlx5_vport_counters { 783 struct mlx5_net_counters received_errors; 784 struct mlx5_net_counters transmit_errors; 785 struct mlx5_net_counters received_ib_unicast; 786 struct mlx5_net_counters transmitted_ib_unicast; 787 struct mlx5_net_counters received_ib_multicast; 788 struct mlx5_net_counters transmitted_ib_multicast; 789 struct mlx5_net_counters received_eth_broadcast; 790 struct mlx5_net_counters transmitted_eth_broadcast; 791 struct mlx5_net_counters received_eth_unicast; 792 struct mlx5_net_counters transmitted_eth_unicast; 793 struct mlx5_net_counters received_eth_multicast; 794 struct mlx5_net_counters transmitted_eth_multicast; 795 }; 796 797 enum { 798 MLX5_DB_PER_PAGE = MLX5_ADAPTER_PAGE_SIZE / L1_CACHE_BYTES, 799 }; 800 801 struct mlx5_core_dct { 802 struct mlx5_core_rsc_common common; /* must be first */ 803 void (*event)(struct mlx5_core_dct *, int); 804 int dctn; 805 struct completion drained; 806 struct mlx5_rsc_debug *dbg; 807 int pid; 808 }; 809 810 enum { 811 MLX5_COMP_EQ_SIZE = 1024, 812 }; 813 814 enum { 815 MLX5_PTYS_IB = 1 << 0, 816 MLX5_PTYS_EN = 1 << 2, 817 }; 818 819 struct mlx5_db_pgdir { 820 struct list_head list; 821 DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); 822 struct mlx5_fw_page *fw_page; 823 __be32 *db_page; 824 dma_addr_t db_dma; 825 }; 826 827 typedef void (*mlx5_cmd_cbk_t)(int status, void *context); 828 829 struct mlx5_cmd_work_ent { 830 struct mlx5_cmd_msg *in; 831 struct mlx5_cmd_msg *out; 832 int uin_size; 833 void *uout; 834 int uout_size; 835 mlx5_cmd_cbk_t callback; 836 struct delayed_work cb_timeout_work; 837 void *context; 838 int idx; 839 struct completion done; 840 struct mlx5_cmd *cmd; 841 struct work_struct work; 842 struct mlx5_cmd_layout *lay; 843 int ret; 844 int page_queue; 845 u8 status; 846 u8 token; 847 u64 ts1; 848 u64 ts2; 849 u16 op; 850 u8 busy; 851 bool polling; 852 }; 853 854 struct mlx5_pas { 855 u64 pa; 856 u8 log_sz; 857 }; 858 859 enum port_state_policy { 860 MLX5_POLICY_DOWN = 0, 861 MLX5_POLICY_UP = 1, 862 MLX5_POLICY_FOLLOW = 2, 863 MLX5_POLICY_INVALID = 0xffffffff 864 }; 865 866 static inline void * 867 mlx5_buf_offset(struct mlx5_buf *buf, int offset) 868 { 869 return ((char *)buf->direct.buf + offset); 870 } 871 872 873 extern struct workqueue_struct *mlx5_core_wq; 874 875 #define STRUCT_FIELD(header, field) \ 876 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ 877 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field 878 879 static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev) 880 { 881 return pci_get_drvdata(pdev); 882 } 883 884 extern struct dentry *mlx5_debugfs_root; 885 886 static inline u16 fw_rev_maj(struct mlx5_core_dev *dev) 887 { 888 return ioread32be(&dev->iseg->fw_rev) & 0xffff; 889 } 890 891 static inline u16 fw_rev_min(struct mlx5_core_dev *dev) 892 { 893 return ioread32be(&dev->iseg->fw_rev) >> 16; 894 } 895 896 static inline u16 fw_rev_sub(struct mlx5_core_dev *dev) 897 { 898 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; 899 } 900 901 static inline u16 cmdif_rev_get(struct mlx5_core_dev *dev) 902 { 903 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 904 } 905 906 static inline int mlx5_get_gid_table_len(u16 param) 907 { 908 if (param > 4) { 909 printf("M4_CORE_DRV_NAME: WARN: ""gid table length is zero\n"); 910 return 0; 911 } 912 913 return 8 * (1 << param); 914 } 915 916 static inline void *mlx5_vzalloc(unsigned long size) 917 { 918 void *rtn; 919 920 rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 921 return rtn; 922 } 923 924 static inline void *mlx5_vmalloc(unsigned long size) 925 { 926 void *rtn; 927 928 rtn = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); 929 if (!rtn) 930 rtn = vmalloc(size); 931 return rtn; 932 } 933 934 static inline u32 mlx5_base_mkey(const u32 key) 935 { 936 return key & 0xffffff00u; 937 } 938 939 int mlx5_cmd_init(struct mlx5_core_dev *dev); 940 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); 941 void mlx5_cmd_use_events(struct mlx5_core_dev *dev); 942 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); 943 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); 944 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); 945 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 946 int out_size); 947 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 948 void *out, int out_size, mlx5_cmd_cbk_t callback, 949 void *context); 950 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, 951 void *out, int out_size); 952 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); 953 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); 954 int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); 955 int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); 956 int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); 957 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); 958 void mlx5_health_cleanup(struct mlx5_core_dev *dev); 959 int mlx5_health_init(struct mlx5_core_dev *dev); 960 void mlx5_start_health_poll(struct mlx5_core_dev *dev); 961 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); 962 void mlx5_drain_health_wq(struct mlx5_core_dev *dev); 963 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); 964 void mlx5_trigger_health_work(struct mlx5_core_dev *dev); 965 void mlx5_trigger_health_watchdog(struct mlx5_core_dev *dev); 966 967 #define mlx5_buf_alloc_node(dev, size, direct, buf, node) \ 968 mlx5_buf_alloc(dev, size, direct, buf) 969 int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, 970 struct mlx5_buf *buf); 971 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); 972 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 973 struct mlx5_srq_attr *in); 974 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); 975 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 976 struct mlx5_srq_attr *out); 977 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); 978 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 979 u16 lwm, int is_srq); 980 void mlx5_init_mr_table(struct mlx5_core_dev *dev); 981 void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev); 982 int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, 983 struct mlx5_core_mr *mkey, 984 u32 *in, int inlen, 985 u32 *out, int outlen, 986 mlx5_cmd_cbk_t callback, void *context); 987 int mlx5_core_create_mkey(struct mlx5_core_dev *dev, 988 struct mlx5_core_mr *mr, 989 u32 *in, int inlen); 990 int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mkey); 991 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mkey, 992 u32 *out, int outlen); 993 int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, 994 u32 *mkey); 995 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); 996 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); 997 int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, 998 u16 opmod, u8 port); 999 void mlx5_fwp_flush(struct mlx5_fw_page *fwp); 1000 void mlx5_fwp_invalidate(struct mlx5_fw_page *fwp); 1001 struct mlx5_fw_page *mlx5_fwp_alloc(struct mlx5_core_dev *dev, gfp_t flags, unsigned num); 1002 void mlx5_fwp_free(struct mlx5_fw_page *fwp); 1003 u64 mlx5_fwp_get_dma(struct mlx5_fw_page *fwp, size_t offset); 1004 void *mlx5_fwp_get_virt(struct mlx5_fw_page *fwp, size_t offset); 1005 void mlx5_pagealloc_init(struct mlx5_core_dev *dev); 1006 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); 1007 int mlx5_pagealloc_start(struct mlx5_core_dev *dev); 1008 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); 1009 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, 1010 s32 npages); 1011 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); 1012 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); 1013 s64 mlx5_wait_for_reclaim_vfs_pages(struct mlx5_core_dev *dev); 1014 void mlx5_register_debugfs(void); 1015 void mlx5_unregister_debugfs(void); 1016 int mlx5_eq_init(struct mlx5_core_dev *dev); 1017 void mlx5_eq_cleanup(struct mlx5_core_dev *dev); 1018 void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); 1019 void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); 1020 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); 1021 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); 1022 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); 1023 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vector, enum mlx5_cmd_mode mode); 1024 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); 1025 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, 1026 int nent, u64 mask, const char *name, struct mlx5_uar *uar); 1027 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 1028 int mlx5_start_eqs(struct mlx5_core_dev *dev); 1029 int mlx5_stop_eqs(struct mlx5_core_dev *dev); 1030 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn); 1031 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); 1032 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); 1033 int mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev *dev, int enable, 1034 u64 addr); 1035 1036 int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); 1037 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); 1038 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, 1039 int size_in, void *data_out, int size_out, 1040 u16 reg_num, int arg, int write); 1041 1042 void mlx5_toggle_port_link(struct mlx5_core_dev *dev); 1043 1044 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 1045 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 1046 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 1047 u32 *out, int outlen); 1048 int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); 1049 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); 1050 int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); 1051 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); 1052 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); 1053 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, 1054 int node); 1055 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); 1056 1057 const char *mlx5_command_str(int command); 1058 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); 1059 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); 1060 int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, 1061 int npsvs, u32 *sig_index); 1062 int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); 1063 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); 1064 u8 mlx5_is_wol_supported(struct mlx5_core_dev *dev); 1065 int mlx5_set_wol(struct mlx5_core_dev *dev, u8 wol_mode); 1066 int mlx5_set_dropless_mode(struct mlx5_core_dev *dev, u16 timeout); 1067 int mlx5_query_dropless_mode(struct mlx5_core_dev *dev, u16 *timeout); 1068 int mlx5_query_wol(struct mlx5_core_dev *dev, u8 *wol_mode); 1069 int mlx5_core_access_pvlc(struct mlx5_core_dev *dev, 1070 struct mlx5_pvlc_reg *pvlc, int write); 1071 int mlx5_core_access_ptys(struct mlx5_core_dev *dev, 1072 struct mlx5_ptys_reg *ptys, int write); 1073 int mlx5_core_access_pmtu(struct mlx5_core_dev *dev, 1074 struct mlx5_pmtu_reg *pmtu, int write); 1075 int mlx5_vxlan_udp_port_add(struct mlx5_core_dev *dev, u16 port); 1076 int mlx5_vxlan_udp_port_delete(struct mlx5_core_dev *dev, u16 port); 1077 int mlx5_query_port_cong_status(struct mlx5_core_dev *mdev, int protocol, 1078 int priority, int *is_enable); 1079 int mlx5_modify_port_cong_status(struct mlx5_core_dev *mdev, int protocol, 1080 int priority, int enable); 1081 int mlx5_query_port_cong_params(struct mlx5_core_dev *mdev, int protocol, 1082 void *out, int out_size); 1083 int mlx5_modify_port_cong_params(struct mlx5_core_dev *mdev, 1084 void *in, int in_size); 1085 int mlx5_query_port_cong_statistics(struct mlx5_core_dev *mdev, int clear, 1086 void *out, int out_size); 1087 int mlx5_set_diagnostic_params(struct mlx5_core_dev *mdev, void *in, 1088 int in_size); 1089 int mlx5_query_diagnostic_counters(struct mlx5_core_dev *mdev, 1090 u8 num_of_samples, u16 sample_index, 1091 void *out, int out_size); 1092 int mlx5_vsc_find_cap(struct mlx5_core_dev *mdev); 1093 int mlx5_vsc_lock(struct mlx5_core_dev *mdev); 1094 void mlx5_vsc_unlock(struct mlx5_core_dev *mdev); 1095 int mlx5_vsc_set_space(struct mlx5_core_dev *mdev, u16 space); 1096 int mlx5_vsc_write(struct mlx5_core_dev *mdev, u32 addr, const u32 *data); 1097 int mlx5_vsc_read(struct mlx5_core_dev *mdev, u32 addr, u32 *data); 1098 int mlx5_vsc_lock_addr_space(struct mlx5_core_dev *mdev, u32 addr); 1099 int mlx5_vsc_unlock_addr_space(struct mlx5_core_dev *mdev, u32 addr); 1100 int mlx5_pci_read_power_status(struct mlx5_core_dev *mdev, 1101 u16 *p_power, u8 *p_status); 1102 1103 static inline u32 mlx5_mkey_to_idx(u32 mkey) 1104 { 1105 return mkey >> 8; 1106 } 1107 1108 static inline u32 mlx5_idx_to_mkey(u32 mkey_idx) 1109 { 1110 return mkey_idx << 8; 1111 } 1112 1113 static inline u8 mlx5_mkey_variant(u32 mkey) 1114 { 1115 return mkey & 0xff; 1116 } 1117 1118 enum { 1119 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, 1120 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, 1121 }; 1122 1123 enum { 1124 MAX_MR_CACHE_ENTRIES = 15, 1125 }; 1126 1127 struct mlx5_interface { 1128 void * (*add)(struct mlx5_core_dev *dev); 1129 void (*remove)(struct mlx5_core_dev *dev, void *context); 1130 void (*event)(struct mlx5_core_dev *dev, void *context, 1131 enum mlx5_dev_event event, unsigned long param); 1132 void * (*get_dev)(void *context); 1133 int protocol; 1134 struct list_head list; 1135 }; 1136 1137 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); 1138 int mlx5_register_interface(struct mlx5_interface *intf); 1139 void mlx5_unregister_interface(struct mlx5_interface *intf); 1140 1141 unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev); 1142 int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, 1143 u8 roce_version, u8 roce_l3_type, const u8 *gid, 1144 const u8 *mac, bool vlan, u16 vlan_id); 1145 1146 struct mlx5_profile { 1147 u64 mask; 1148 u8 log_max_qp; 1149 struct { 1150 int size; 1151 int limit; 1152 } mr_cache[MAX_MR_CACHE_ENTRIES]; 1153 }; 1154 1155 enum { 1156 MLX5_PCI_DEV_IS_VF = 1 << 0, 1157 }; 1158 1159 enum { 1160 MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, 1161 }; 1162 1163 static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev) 1164 { 1165 return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); 1166 } 1167 #ifdef RATELIMIT 1168 int mlx5_init_rl_table(struct mlx5_core_dev *dev); 1169 void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); 1170 int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u32 burst, u16 *index); 1171 void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate, u32 burst); 1172 bool mlx5_rl_is_in_range(const struct mlx5_core_dev *dev, u32 rate, u32 burst); 1173 1174 static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev) 1175 { 1176 return !!(dev->priv.rl_table.max_size); 1177 } 1178 #endif 1179 1180 #endif /* MLX5_DRIVER_H */ 1181