1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 5 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 */ 36 37 #ifndef MLX4_H 38 #define MLX4_H 39 40 #include <linux/mutex.h> 41 #include <linux/radix-tree.h> 42 #include <linux/rbtree.h> 43 #include <linux/timer.h> 44 #include <linux/semaphore.h> 45 #include <linux/workqueue.h> 46 #include <linux/interrupt.h> 47 #include <linux/spinlock.h> 48 #include <net/devlink.h> 49 #include <linux/rwsem.h> 50 51 #include <linux/mlx4/device.h> 52 #include <linux/mlx4/driver.h> 53 #include <linux/mlx4/doorbell.h> 54 #include <linux/mlx4/cmd.h> 55 #include "fw_qos.h" 56 57 #define DRV_NAME "mlx4_core" 58 #define PFX DRV_NAME ": " 59 #define DRV_VERSION "2.2-1" 60 #define DRV_RELDATE "Feb, 2014" 61 62 #define MLX4_FS_UDP_UC_EN (1 << 1) 63 #define MLX4_FS_TCP_UC_EN (1 << 2) 64 #define MLX4_FS_NUM_OF_L2_ADDR 8 65 #define MLX4_FS_MGM_LOG_ENTRY_SIZE 7 66 #define MLX4_FS_NUM_MCG (1 << 17) 67 68 #define INIT_HCA_TPT_MW_ENABLE (1 << 7) 69 70 #define MLX4_QUERY_IF_STAT_RESET BIT(31) 71 72 enum { 73 MLX4_HCR_BASE = 0x80680, 74 MLX4_HCR_SIZE = 0x0001c, 75 MLX4_CLR_INT_SIZE = 0x00008, 76 MLX4_SLAVE_COMM_BASE = 0x0, 77 MLX4_COMM_PAGESIZE = 0x1000, 78 MLX4_CLOCK_SIZE = 0x00008, 79 MLX4_COMM_CHAN_CAPS = 0x8, 80 MLX4_COMM_CHAN_FLAGS = 0xc 81 }; 82 83 enum { 84 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE = 10, 85 MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7, 86 MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12, 87 MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2), 88 MLX4_MTT_ENTRY_PER_SEG = 8, 89 }; 90 91 enum { 92 MLX4_NUM_PDS = 1 << 15 93 }; 94 95 enum { 96 MLX4_CMPT_TYPE_QP = 0, 97 MLX4_CMPT_TYPE_SRQ = 1, 98 MLX4_CMPT_TYPE_CQ = 2, 99 MLX4_CMPT_TYPE_EQ = 3, 100 MLX4_CMPT_NUM_TYPE 101 }; 102 103 enum { 104 MLX4_CMPT_SHIFT = 24, 105 MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT 106 }; 107 108 enum mlx4_mpt_state { 109 MLX4_MPT_DISABLED = 0, 110 MLX4_MPT_EN_HW, 111 MLX4_MPT_EN_SW 112 }; 113 114 #define MLX4_COMM_TIME 10000 115 #define MLX4_COMM_OFFLINE_TIME_OUT 30000 116 #define MLX4_COMM_CMD_NA_OP 0x0 117 118 119 enum { 120 MLX4_COMM_CMD_RESET, 121 MLX4_COMM_CMD_VHCR0, 122 MLX4_COMM_CMD_VHCR1, 123 MLX4_COMM_CMD_VHCR2, 124 MLX4_COMM_CMD_VHCR_EN, 125 MLX4_COMM_CMD_VHCR_POST, 126 MLX4_COMM_CMD_FLR = 254 127 }; 128 129 enum { 130 MLX4_VF_SMI_DISABLED, 131 MLX4_VF_SMI_ENABLED 132 }; 133 134 /*The flag indicates that the slave should delay the RESET cmd*/ 135 #define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb 136 /*indicates how many retries will be done if we are in the middle of FLR*/ 137 #define NUM_OF_RESET_RETRIES 10 138 #define SLEEP_TIME_IN_RESET (2 * 1000) 139 enum mlx4_resource { 140 RES_QP, 141 RES_CQ, 142 RES_SRQ, 143 RES_XRCD, 144 RES_MPT, 145 RES_MTT, 146 RES_MAC, 147 RES_VLAN, 148 RES_EQ, 149 RES_COUNTER, 150 RES_FS_RULE, 151 MLX4_NUM_OF_RESOURCE_TYPE 152 }; 153 154 enum mlx4_alloc_mode { 155 RES_OP_RESERVE, 156 RES_OP_RESERVE_AND_MAP, 157 RES_OP_MAP_ICM, 158 }; 159 160 enum mlx4_res_tracker_free_type { 161 RES_TR_FREE_ALL, 162 RES_TR_FREE_SLAVES_ONLY, 163 RES_TR_FREE_STRUCTS_ONLY, 164 }; 165 166 /* 167 *Virtual HCR structures. 168 * mlx4_vhcr is the sw representation, in machine endianness 169 * 170 * mlx4_vhcr_cmd is the formalized structure, the one that is passed 171 * to FW to go through communication channel. 172 * It is big endian, and has the same structure as the physical HCR 173 * used by command interface 174 */ 175 struct mlx4_vhcr { 176 u64 in_param; 177 u64 out_param; 178 u32 in_modifier; 179 u32 errno; 180 u16 op; 181 u16 token; 182 u8 op_modifier; 183 u8 e_bit; 184 }; 185 186 struct mlx4_vhcr_cmd { 187 __be64 in_param; 188 __be32 in_modifier; 189 u32 reserved1; 190 __be64 out_param; 191 __be16 token; 192 u16 reserved; 193 u8 status; 194 u8 flags; 195 __be16 opcode; 196 }; 197 198 struct mlx4_cmd_info { 199 u16 opcode; 200 bool has_inbox; 201 bool has_outbox; 202 bool out_is_imm; 203 bool encode_slave_id; 204 int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, 205 struct mlx4_cmd_mailbox *inbox); 206 int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, 207 struct mlx4_cmd_mailbox *inbox, 208 struct mlx4_cmd_mailbox *outbox, 209 struct mlx4_cmd_info *cmd); 210 }; 211 212 #ifdef CONFIG_MLX4_DEBUG 213 extern int mlx4_debug_level; 214 #else /* CONFIG_MLX4_DEBUG */ 215 #define mlx4_debug_level (0) 216 #endif /* CONFIG_MLX4_DEBUG */ 217 218 #define mlx4_dbg(mdev, format, ...) \ 219 do { \ 220 if (mlx4_debug_level) \ 221 dev_printk(KERN_DEBUG, \ 222 &(mdev)->persist->pdev->dev, format, \ 223 ##__VA_ARGS__); \ 224 } while (0) 225 226 #define mlx4_err(mdev, format, ...) \ 227 dev_err(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__) 228 #define mlx4_info(mdev, format, ...) \ 229 dev_info(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__) 230 #define mlx4_warn(mdev, format, ...) \ 231 dev_warn(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__) 232 233 extern int mlx4_log_num_mgm_entry_size; 234 extern int log_mtts_per_seg; 235 extern int mlx4_internal_err_reset; 236 237 #define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \ 238 MLX4_MFUNC_MAX)) 239 #define ALL_SLAVES 0xff 240 241 struct mlx4_bitmap { 242 u32 last; 243 u32 top; 244 u32 max; 245 u32 reserved_top; 246 u32 mask; 247 u32 avail; 248 u32 effective_len; 249 spinlock_t lock; 250 unsigned long *table; 251 }; 252 253 struct mlx4_buddy { 254 unsigned long **bits; 255 unsigned int *num_free; 256 u32 max_order; 257 spinlock_t lock; 258 }; 259 260 struct mlx4_icm; 261 262 struct mlx4_icm_table { 263 u64 virt; 264 int num_icm; 265 u32 num_obj; 266 int obj_size; 267 int lowmem; 268 int coherent; 269 struct mutex mutex; 270 struct mlx4_icm **icm; 271 }; 272 273 #define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) 274 #define MLX4_MPT_FLAG_FREE (0x3UL << 28) 275 #define MLX4_MPT_FLAG_MIO (1 << 17) 276 #define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15) 277 #define MLX4_MPT_FLAG_PHYSICAL (1 << 9) 278 #define MLX4_MPT_FLAG_REGION (1 << 8) 279 280 #define MLX4_MPT_PD_MASK (0x1FFFFUL) 281 #define MLX4_MPT_PD_VF_MASK (0xFE0000UL) 282 #define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) 283 #define MLX4_MPT_PD_FLAG_RAE (1 << 28) 284 #define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) 285 286 #define MLX4_MPT_QP_FLAG_BOUND_QP (1 << 7) 287 288 #define MLX4_MPT_STATUS_SW 0xF0 289 #define MLX4_MPT_STATUS_HW 0x00 290 291 #define MLX4_CQE_SIZE_MASK_STRIDE 0x3 292 #define MLX4_EQE_SIZE_MASK_STRIDE 0x30 293 294 #define MLX4_EQ_ASYNC 0 295 #define MLX4_EQ_TO_CQ_VECTOR(vector) ((vector) - \ 296 !!((int)(vector) >= MLX4_EQ_ASYNC)) 297 #define MLX4_CQ_TO_EQ_VECTOR(vector) ((vector) + \ 298 !!((int)(vector) >= MLX4_EQ_ASYNC)) 299 300 /* 301 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. 302 */ 303 struct mlx4_mpt_entry { 304 __be32 flags; 305 __be32 qpn; 306 __be32 key; 307 __be32 pd_flags; 308 __be64 start; 309 __be64 length; 310 __be32 lkey; 311 __be32 win_cnt; 312 u8 reserved1[3]; 313 u8 mtt_rep; 314 __be64 mtt_addr; 315 __be32 mtt_sz; 316 __be32 entity_size; 317 __be32 first_byte_offset; 318 } __packed; 319 320 /* 321 * Must be packed because start is 64 bits but only aligned to 32 bits. 322 */ 323 struct mlx4_eq_context { 324 __be32 flags; 325 u16 reserved1[3]; 326 __be16 page_offset; 327 u8 log_eq_size; 328 u8 reserved2[4]; 329 u8 eq_period; 330 u8 reserved3; 331 u8 eq_max_count; 332 u8 reserved4[3]; 333 u8 intr; 334 u8 log_page_size; 335 u8 reserved5[2]; 336 u8 mtt_base_addr_h; 337 __be32 mtt_base_addr_l; 338 u32 reserved6[2]; 339 __be32 consumer_index; 340 __be32 producer_index; 341 u32 reserved7[4]; 342 }; 343 344 struct mlx4_cq_context { 345 __be32 flags; 346 u16 reserved1[3]; 347 __be16 page_offset; 348 __be32 logsize_usrpage; 349 __be16 cq_period; 350 __be16 cq_max_count; 351 u8 reserved2[3]; 352 u8 comp_eqn; 353 u8 log_page_size; 354 u8 reserved3[2]; 355 u8 mtt_base_addr_h; 356 __be32 mtt_base_addr_l; 357 __be32 last_notified_index; 358 __be32 solicit_producer_index; 359 __be32 consumer_index; 360 __be32 producer_index; 361 u32 reserved4[2]; 362 __be64 db_rec_addr; 363 }; 364 365 struct mlx4_srq_context { 366 __be32 state_logsize_srqn; 367 u8 logstride; 368 u8 reserved1; 369 __be16 xrcd; 370 __be32 pg_offset_cqn; 371 u32 reserved2; 372 u8 log_page_size; 373 u8 reserved3[2]; 374 u8 mtt_base_addr_h; 375 __be32 mtt_base_addr_l; 376 __be32 pd; 377 __be16 limit_watermark; 378 __be16 wqe_cnt; 379 u16 reserved4; 380 __be16 wqe_counter; 381 u32 reserved5; 382 __be64 db_rec_addr; 383 }; 384 385 struct mlx4_eq_tasklet { 386 struct list_head list; 387 struct list_head process_list; 388 struct tasklet_struct task; 389 /* lock on completion tasklet list */ 390 spinlock_t lock; 391 }; 392 393 struct mlx4_eq { 394 struct mlx4_dev *dev; 395 void __iomem *doorbell; 396 int eqn; 397 u32 cons_index; 398 u16 irq; 399 u16 have_irq; 400 int nent; 401 struct mlx4_buf_list *page_list; 402 struct mlx4_mtt mtt; 403 struct mlx4_eq_tasklet tasklet_ctx; 404 struct mlx4_active_ports actv_ports; 405 u32 ref_count; 406 cpumask_var_t affinity_mask; 407 }; 408 409 struct mlx4_slave_eqe { 410 u8 type; 411 u8 port; 412 u32 param; 413 }; 414 415 struct mlx4_slave_event_eq_info { 416 int eqn; 417 u16 token; 418 }; 419 420 struct mlx4_profile { 421 int num_qp; 422 int rdmarc_per_qp; 423 int num_srq; 424 int num_cq; 425 int num_mcg; 426 int num_mpt; 427 unsigned num_mtt; 428 }; 429 430 struct mlx4_fw { 431 u64 clr_int_base; 432 u64 catas_offset; 433 u64 comm_base; 434 u64 clock_offset; 435 struct mlx4_icm *fw_icm; 436 struct mlx4_icm *aux_icm; 437 u32 catas_size; 438 u16 fw_pages; 439 u8 clr_int_bar; 440 u8 catas_bar; 441 u8 comm_bar; 442 u8 clock_bar; 443 }; 444 445 struct mlx4_comm { 446 u32 slave_write; 447 u32 slave_read; 448 }; 449 450 enum { 451 MLX4_MCAST_CONFIG = 0, 452 MLX4_MCAST_DISABLE = 1, 453 MLX4_MCAST_ENABLE = 2, 454 }; 455 456 #define VLAN_FLTR_SIZE 128 457 458 struct mlx4_vlan_fltr { 459 __be32 entry[VLAN_FLTR_SIZE]; 460 }; 461 462 struct mlx4_mcast_entry { 463 struct list_head list; 464 u64 addr; 465 }; 466 467 struct mlx4_promisc_qp { 468 struct list_head list; 469 u32 qpn; 470 }; 471 472 struct mlx4_steer_index { 473 struct list_head list; 474 unsigned int index; 475 struct list_head duplicates; 476 }; 477 478 #define MLX4_EVENT_TYPES_NUM 64 479 480 struct mlx4_slave_state { 481 u8 comm_toggle; 482 u8 last_cmd; 483 u8 init_port_mask; 484 bool active; 485 bool old_vlan_api; 486 bool vst_qinq_supported; 487 u8 function; 488 dma_addr_t vhcr_dma; 489 u16 mtu[MLX4_MAX_PORTS + 1]; 490 __be32 ib_cap_mask[MLX4_MAX_PORTS + 1]; 491 struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES]; 492 struct list_head mcast_filters[MLX4_MAX_PORTS + 1]; 493 struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1]; 494 /* event type to eq number lookup */ 495 struct mlx4_slave_event_eq_info event_eq[MLX4_EVENT_TYPES_NUM]; 496 u16 eq_pi; 497 u16 eq_ci; 498 spinlock_t lock; 499 /*initialized via the kzalloc*/ 500 u8 is_slave_going_down; 501 u32 cookie; 502 enum slave_port_state port_state[MLX4_MAX_PORTS + 1]; 503 }; 504 505 #define MLX4_VGT 4095 506 #define NO_INDX (-1) 507 508 struct mlx4_vport_state { 509 u64 mac; 510 u16 default_vlan; 511 u8 default_qos; 512 __be16 vlan_proto; 513 u32 tx_rate; 514 bool spoofchk; 515 u32 link_state; 516 u8 qos_vport; 517 __be64 guid; 518 }; 519 520 struct mlx4_vf_admin_state { 521 struct mlx4_vport_state vport[MLX4_MAX_PORTS + 1]; 522 u8 enable_smi[MLX4_MAX_PORTS + 1]; 523 }; 524 525 struct mlx4_vport_oper_state { 526 struct mlx4_vport_state state; 527 int mac_idx; 528 int vlan_idx; 529 }; 530 531 struct mlx4_vf_oper_state { 532 struct mlx4_vport_oper_state vport[MLX4_MAX_PORTS + 1]; 533 u8 smi_enabled[MLX4_MAX_PORTS + 1]; 534 }; 535 536 struct slave_list { 537 struct mutex mutex; 538 struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE]; 539 }; 540 541 struct resource_allocator { 542 spinlock_t alloc_lock; /* protect quotas */ 543 union { 544 int res_reserved; 545 int res_port_rsvd[MLX4_MAX_PORTS]; 546 }; 547 union { 548 int res_free; 549 int res_port_free[MLX4_MAX_PORTS]; 550 }; 551 int *quota; 552 int *allocated; 553 int *guaranteed; 554 }; 555 556 struct mlx4_resource_tracker { 557 spinlock_t lock; 558 /* tree for each resources */ 559 struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE]; 560 /* num_of_slave's lists, one per slave */ 561 struct slave_list *slave_list; 562 struct resource_allocator res_alloc[MLX4_NUM_OF_RESOURCE_TYPE]; 563 }; 564 565 #define SLAVE_EVENT_EQ_SIZE 128 566 struct mlx4_slave_event_eq { 567 u32 eqn; 568 u32 cons; 569 u32 prod; 570 spinlock_t event_lock; 571 struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE]; 572 }; 573 574 struct mlx4_qos_manager { 575 int num_of_qos_vfs; 576 DECLARE_BITMAP(priority_bm, MLX4_NUM_UP); 577 }; 578 579 struct mlx4_master_qp0_state { 580 int proxy_qp0_active; 581 int qp0_active; 582 int port_active; 583 }; 584 585 struct mlx4_mfunc_master_ctx { 586 struct mlx4_slave_state *slave_state; 587 struct mlx4_vf_admin_state *vf_admin; 588 struct mlx4_vf_oper_state *vf_oper; 589 struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; 590 int init_port_ref[MLX4_MAX_PORTS + 1]; 591 u16 max_mtu[MLX4_MAX_PORTS + 1]; 592 u8 pptx; 593 u8 pprx; 594 int disable_mcast_ref[MLX4_MAX_PORTS + 1]; 595 struct mlx4_resource_tracker res_tracker; 596 struct workqueue_struct *comm_wq; 597 struct work_struct comm_work; 598 struct work_struct slave_event_work; 599 struct work_struct slave_flr_event_work; 600 spinlock_t slave_state_lock; 601 __be32 comm_arm_bit_vector[4]; 602 struct mlx4_eqe cmd_eqe; 603 struct mlx4_slave_event_eq slave_eq; 604 struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX]; 605 struct mlx4_qos_manager qos_ctl[MLX4_MAX_PORTS + 1]; 606 }; 607 608 struct mlx4_mfunc { 609 struct mlx4_comm __iomem *comm; 610 struct mlx4_vhcr_cmd *vhcr; 611 dma_addr_t vhcr_dma; 612 613 struct mlx4_mfunc_master_ctx master; 614 }; 615 616 #define MGM_QPN_MASK 0x00FFFFFF 617 #define MGM_BLCK_LB_BIT 30 618 619 struct mlx4_mgm { 620 __be32 next_gid_index; 621 __be32 members_count; 622 u32 reserved[2]; 623 u8 gid[16]; 624 __be32 qp[MLX4_MAX_QP_PER_MGM]; 625 }; 626 627 struct mlx4_cmd { 628 struct pci_pool *pool; 629 void __iomem *hcr; 630 struct mutex slave_cmd_mutex; 631 struct semaphore poll_sem; 632 struct semaphore event_sem; 633 struct rw_semaphore switch_sem; 634 int max_cmds; 635 spinlock_t context_lock; 636 int free_head; 637 struct mlx4_cmd_context *context; 638 u16 token_mask; 639 u8 use_events; 640 u8 toggle; 641 u8 comm_toggle; 642 u8 initialized; 643 }; 644 645 enum { 646 MLX4_VF_IMMED_VLAN_FLAG_VLAN = 1 << 0, 647 MLX4_VF_IMMED_VLAN_FLAG_QOS = 1 << 1, 648 MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE = 1 << 2, 649 }; 650 struct mlx4_vf_immed_vlan_work { 651 struct work_struct work; 652 struct mlx4_priv *priv; 653 int flags; 654 int slave; 655 int vlan_ix; 656 int orig_vlan_ix; 657 u8 port; 658 u8 qos; 659 u8 qos_vport; 660 u16 vlan_id; 661 u16 orig_vlan_id; 662 __be16 vlan_proto; 663 }; 664 665 666 struct mlx4_uar_table { 667 struct mlx4_bitmap bitmap; 668 }; 669 670 struct mlx4_mr_table { 671 struct mlx4_bitmap mpt_bitmap; 672 struct mlx4_buddy mtt_buddy; 673 u64 mtt_base; 674 u64 mpt_base; 675 struct mlx4_icm_table mtt_table; 676 struct mlx4_icm_table dmpt_table; 677 }; 678 679 struct mlx4_cq_table { 680 struct mlx4_bitmap bitmap; 681 spinlock_t lock; 682 struct radix_tree_root tree; 683 struct mlx4_icm_table table; 684 struct mlx4_icm_table cmpt_table; 685 }; 686 687 struct mlx4_eq_table { 688 struct mlx4_bitmap bitmap; 689 char *irq_names; 690 void __iomem *clr_int; 691 void __iomem **uar_map; 692 u32 clr_mask; 693 struct mlx4_eq *eq; 694 struct mlx4_icm_table table; 695 struct mlx4_icm_table cmpt_table; 696 int have_irq; 697 u8 inta_pin; 698 }; 699 700 struct mlx4_srq_table { 701 struct mlx4_bitmap bitmap; 702 spinlock_t lock; 703 struct radix_tree_root tree; 704 struct mlx4_icm_table table; 705 struct mlx4_icm_table cmpt_table; 706 }; 707 708 enum mlx4_qp_table_zones { 709 MLX4_QP_TABLE_ZONE_GENERAL, 710 MLX4_QP_TABLE_ZONE_RSS, 711 MLX4_QP_TABLE_ZONE_RAW_ETH, 712 MLX4_QP_TABLE_ZONE_NUM 713 }; 714 715 struct mlx4_qp_table { 716 struct mlx4_bitmap *bitmap_gen; 717 struct mlx4_zone_allocator *zones; 718 u32 zones_uids[MLX4_QP_TABLE_ZONE_NUM]; 719 u32 rdmarc_base; 720 int rdmarc_shift; 721 spinlock_t lock; 722 struct mlx4_icm_table qp_table; 723 struct mlx4_icm_table auxc_table; 724 struct mlx4_icm_table altc_table; 725 struct mlx4_icm_table rdmarc_table; 726 struct mlx4_icm_table cmpt_table; 727 }; 728 729 struct mlx4_mcg_table { 730 struct mutex mutex; 731 struct mlx4_bitmap bitmap; 732 struct mlx4_icm_table table; 733 }; 734 735 struct mlx4_catas_err { 736 u32 __iomem *map; 737 struct timer_list timer; 738 struct list_head list; 739 }; 740 741 #define MLX4_MAX_MAC_NUM 128 742 #define MLX4_MAC_TABLE_SIZE (MLX4_MAX_MAC_NUM << 3) 743 744 struct mlx4_mac_table { 745 __be64 entries[MLX4_MAX_MAC_NUM]; 746 int refs[MLX4_MAX_MAC_NUM]; 747 bool is_dup[MLX4_MAX_MAC_NUM]; 748 struct mutex mutex; 749 int total; 750 int max; 751 }; 752 753 #define MLX4_ROCE_GID_ENTRY_SIZE 16 754 755 struct mlx4_roce_gid_entry { 756 u8 raw[MLX4_ROCE_GID_ENTRY_SIZE]; 757 }; 758 759 struct mlx4_roce_gid_table { 760 struct mlx4_roce_gid_entry roce_gids[MLX4_ROCE_MAX_GIDS]; 761 struct mutex mutex; 762 }; 763 764 #define MLX4_MAX_VLAN_NUM 128 765 #define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2) 766 767 struct mlx4_vlan_table { 768 __be32 entries[MLX4_MAX_VLAN_NUM]; 769 int refs[MLX4_MAX_VLAN_NUM]; 770 int is_dup[MLX4_MAX_VLAN_NUM]; 771 struct mutex mutex; 772 int total; 773 int max; 774 }; 775 776 #define SET_PORT_GEN_ALL_VALID 0x7 777 #define SET_PORT_PROMISC_SHIFT 31 778 #define SET_PORT_MC_PROMISC_SHIFT 30 779 780 enum { 781 MCAST_DIRECT_ONLY = 0, 782 MCAST_DIRECT = 1, 783 MCAST_DEFAULT = 2 784 }; 785 786 787 struct mlx4_set_port_general_context { 788 u16 reserved1; 789 u8 v_ignore_fcs; 790 u8 flags; 791 union { 792 u8 ignore_fcs; 793 u8 roce_mode; 794 }; 795 u8 reserved2; 796 __be16 mtu; 797 u8 pptx; 798 u8 pfctx; 799 u16 reserved3; 800 u8 pprx; 801 u8 pfcrx; 802 u16 reserved4; 803 u32 reserved5; 804 u8 phv_en; 805 u8 reserved6[3]; 806 }; 807 808 struct mlx4_set_port_rqp_calc_context { 809 __be32 base_qpn; 810 u8 rererved; 811 u8 n_mac; 812 u8 n_vlan; 813 u8 n_prio; 814 u8 reserved2[3]; 815 u8 mac_miss; 816 u8 intra_no_vlan; 817 u8 no_vlan; 818 u8 intra_vlan_miss; 819 u8 vlan_miss; 820 u8 reserved3[3]; 821 u8 no_vlan_prio; 822 __be32 promisc; 823 __be32 mcast; 824 }; 825 826 struct mlx4_port_info { 827 struct mlx4_dev *dev; 828 int port; 829 char dev_name[16]; 830 struct device_attribute port_attr; 831 enum mlx4_port_type tmp_type; 832 char dev_mtu_name[16]; 833 struct device_attribute port_mtu_attr; 834 struct mlx4_mac_table mac_table; 835 struct mlx4_vlan_table vlan_table; 836 struct mlx4_roce_gid_table gid_table; 837 int base_qpn; 838 struct cpu_rmap *rmap; 839 struct devlink_port devlink_port; 840 }; 841 842 struct mlx4_sense { 843 struct mlx4_dev *dev; 844 u8 do_sense_port[MLX4_MAX_PORTS + 1]; 845 u8 sense_allowed[MLX4_MAX_PORTS + 1]; 846 struct delayed_work sense_poll; 847 }; 848 849 struct mlx4_msix_ctl { 850 DECLARE_BITMAP(pool_bm, MAX_MSIX); 851 struct mutex pool_lock; 852 }; 853 854 struct mlx4_steer { 855 struct list_head promisc_qps[MLX4_NUM_STEERS]; 856 struct list_head steer_entries[MLX4_NUM_STEERS]; 857 }; 858 859 enum { 860 MLX4_PCI_DEV_IS_VF = 1 << 0, 861 MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1, 862 }; 863 864 enum { 865 MLX4_NO_RR = 0, 866 MLX4_USE_RR = 1, 867 }; 868 869 struct mlx4_priv { 870 struct mlx4_dev dev; 871 872 struct list_head dev_list; 873 struct list_head ctx_list; 874 spinlock_t ctx_lock; 875 876 int pci_dev_data; 877 int removed; 878 879 struct list_head pgdir_list; 880 struct mutex pgdir_mutex; 881 882 struct mlx4_fw fw; 883 struct mlx4_cmd cmd; 884 struct mlx4_mfunc mfunc; 885 886 struct mlx4_bitmap pd_bitmap; 887 struct mlx4_bitmap xrcd_bitmap; 888 struct mlx4_uar_table uar_table; 889 struct mlx4_mr_table mr_table; 890 struct mlx4_cq_table cq_table; 891 struct mlx4_eq_table eq_table; 892 struct mlx4_srq_table srq_table; 893 struct mlx4_qp_table qp_table; 894 struct mlx4_mcg_table mcg_table; 895 struct mlx4_bitmap counters_bitmap; 896 int def_counter[MLX4_MAX_PORTS]; 897 898 struct mlx4_catas_err catas_err; 899 900 void __iomem *clr_base; 901 902 struct mlx4_uar driver_uar; 903 void __iomem *kar; 904 struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; 905 struct mlx4_sense sense; 906 struct mutex port_mutex; 907 struct mlx4_msix_ctl msix_ctl; 908 struct mlx4_steer *steer; 909 struct list_head bf_list; 910 struct mutex bf_mutex; 911 struct io_mapping *bf_mapping; 912 void __iomem *clock_mapping; 913 int reserved_mtts; 914 int fs_hash_mode; 915 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; 916 struct mlx4_port_map v2p; /* cached port mapping configuration */ 917 struct mutex bond_mutex; /* for bond mode */ 918 __be64 slave_node_guids[MLX4_MFUNC_MAX]; 919 920 atomic_t opreq_count; 921 struct work_struct opreq_task; 922 }; 923 924 static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) 925 { 926 return container_of(dev, struct mlx4_priv, dev); 927 } 928 929 #define MLX4_SENSE_RANGE (HZ * 3) 930 931 extern struct workqueue_struct *mlx4_wq; 932 933 u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); 934 void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr); 935 u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, 936 int align, u32 skip_mask); 937 void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt, 938 int use_rr); 939 u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap); 940 int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, 941 u32 reserved_bot, u32 resetrved_top); 942 void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); 943 944 int mlx4_reset(struct mlx4_dev *dev); 945 946 int mlx4_alloc_eq_table(struct mlx4_dev *dev); 947 void mlx4_free_eq_table(struct mlx4_dev *dev); 948 949 int mlx4_init_pd_table(struct mlx4_dev *dev); 950 int mlx4_init_xrcd_table(struct mlx4_dev *dev); 951 int mlx4_init_uar_table(struct mlx4_dev *dev); 952 int mlx4_init_mr_table(struct mlx4_dev *dev); 953 int mlx4_init_eq_table(struct mlx4_dev *dev); 954 int mlx4_init_cq_table(struct mlx4_dev *dev); 955 int mlx4_init_qp_table(struct mlx4_dev *dev); 956 int mlx4_init_srq_table(struct mlx4_dev *dev); 957 int mlx4_init_mcg_table(struct mlx4_dev *dev); 958 959 void mlx4_cleanup_pd_table(struct mlx4_dev *dev); 960 void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev); 961 void mlx4_cleanup_uar_table(struct mlx4_dev *dev); 962 void mlx4_cleanup_mr_table(struct mlx4_dev *dev); 963 void mlx4_cleanup_eq_table(struct mlx4_dev *dev); 964 void mlx4_cleanup_cq_table(struct mlx4_dev *dev); 965 void mlx4_cleanup_qp_table(struct mlx4_dev *dev); 966 void mlx4_cleanup_srq_table(struct mlx4_dev *dev); 967 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); 968 int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp); 969 void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn); 970 int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn); 971 void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn); 972 int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn); 973 void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn); 974 int __mlx4_mpt_reserve(struct mlx4_dev *dev); 975 void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index); 976 int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp); 977 void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index); 978 u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order); 979 void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order); 980 981 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, 982 struct mlx4_vhcr *vhcr, 983 struct mlx4_cmd_mailbox *inbox, 984 struct mlx4_cmd_mailbox *outbox, 985 struct mlx4_cmd_info *cmd); 986 int mlx4_SYNC_TPT_wrapper(struct mlx4_dev *dev, int slave, 987 struct mlx4_vhcr *vhcr, 988 struct mlx4_cmd_mailbox *inbox, 989 struct mlx4_cmd_mailbox *outbox, 990 struct mlx4_cmd_info *cmd); 991 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, 992 struct mlx4_vhcr *vhcr, 993 struct mlx4_cmd_mailbox *inbox, 994 struct mlx4_cmd_mailbox *outbox, 995 struct mlx4_cmd_info *cmd); 996 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, 997 struct mlx4_vhcr *vhcr, 998 struct mlx4_cmd_mailbox *inbox, 999 struct mlx4_cmd_mailbox *outbox, 1000 struct mlx4_cmd_info *cmd); 1001 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, 1002 struct mlx4_vhcr *vhcr, 1003 struct mlx4_cmd_mailbox *inbox, 1004 struct mlx4_cmd_mailbox *outbox, 1005 struct mlx4_cmd_info *cmd); 1006 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, 1007 struct mlx4_vhcr *vhcr, 1008 struct mlx4_cmd_mailbox *inbox, 1009 struct mlx4_cmd_mailbox *outbox, 1010 struct mlx4_cmd_info *cmd); 1011 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave, 1012 struct mlx4_vhcr *vhcr, 1013 struct mlx4_cmd_mailbox *inbox, 1014 struct mlx4_cmd_mailbox *outbox, 1015 struct mlx4_cmd_info *cmd); 1016 int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, 1017 struct mlx4_vhcr *vhcr, 1018 struct mlx4_cmd_mailbox *inbox, 1019 struct mlx4_cmd_mailbox *outbox, 1020 struct mlx4_cmd_info *cmd); 1021 int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, 1022 int *base, u8 flags); 1023 void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); 1024 int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); 1025 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); 1026 int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 1027 int start_index, int npages, u64 *page_list); 1028 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); 1029 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx); 1030 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port, 1031 struct mlx4_counter *data); 1032 int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn); 1033 void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn); 1034 1035 void mlx4_start_catas_poll(struct mlx4_dev *dev); 1036 void mlx4_stop_catas_poll(struct mlx4_dev *dev); 1037 int mlx4_catas_init(struct mlx4_dev *dev); 1038 void mlx4_catas_end(struct mlx4_dev *dev); 1039 int mlx4_restart_one(struct pci_dev *pdev); 1040 int mlx4_register_device(struct mlx4_dev *dev); 1041 void mlx4_unregister_device(struct mlx4_dev *dev); 1042 void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, 1043 unsigned long param); 1044 1045 struct mlx4_dev_cap; 1046 struct mlx4_init_hca_param; 1047 1048 u64 mlx4_make_profile(struct mlx4_dev *dev, 1049 struct mlx4_profile *request, 1050 struct mlx4_dev_cap *dev_cap, 1051 struct mlx4_init_hca_param *init_hca); 1052 void mlx4_master_comm_channel(struct work_struct *work); 1053 void mlx4_gen_slave_eqe(struct work_struct *work); 1054 void mlx4_master_handle_slave_flr(struct work_struct *work); 1055 1056 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, 1057 struct mlx4_vhcr *vhcr, 1058 struct mlx4_cmd_mailbox *inbox, 1059 struct mlx4_cmd_mailbox *outbox, 1060 struct mlx4_cmd_info *cmd); 1061 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, 1062 struct mlx4_vhcr *vhcr, 1063 struct mlx4_cmd_mailbox *inbox, 1064 struct mlx4_cmd_mailbox *outbox, 1065 struct mlx4_cmd_info *cmd); 1066 int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, 1067 struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, 1068 struct mlx4_cmd_mailbox *outbox, 1069 struct mlx4_cmd_info *cmd); 1070 int mlx4_COMM_INT_wrapper(struct mlx4_dev *dev, int slave, 1071 struct mlx4_vhcr *vhcr, 1072 struct mlx4_cmd_mailbox *inbox, 1073 struct mlx4_cmd_mailbox *outbox, 1074 struct mlx4_cmd_info *cmd); 1075 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, 1076 struct mlx4_vhcr *vhcr, 1077 struct mlx4_cmd_mailbox *inbox, 1078 struct mlx4_cmd_mailbox *outbox, 1079 struct mlx4_cmd_info *cmd); 1080 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, 1081 struct mlx4_vhcr *vhcr, 1082 struct mlx4_cmd_mailbox *inbox, 1083 struct mlx4_cmd_mailbox *outbox, 1084 struct mlx4_cmd_info *cmd); 1085 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, 1086 struct mlx4_vhcr *vhcr, 1087 struct mlx4_cmd_mailbox *inbox, 1088 struct mlx4_cmd_mailbox *outbox, 1089 struct mlx4_cmd_info *cmd); 1090 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, 1091 struct mlx4_vhcr *vhcr, 1092 struct mlx4_cmd_mailbox *inbox, 1093 struct mlx4_cmd_mailbox *outbox, 1094 struct mlx4_cmd_info *cmd); 1095 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, 1096 struct mlx4_vhcr *vhcr, 1097 struct mlx4_cmd_mailbox *inbox, 1098 struct mlx4_cmd_mailbox *outbox, 1099 struct mlx4_cmd_info *cmd); 1100 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, 1101 struct mlx4_vhcr *vhcr, 1102 struct mlx4_cmd_mailbox *inbox, 1103 struct mlx4_cmd_mailbox *outbox, 1104 struct mlx4_cmd_info *cmd); 1105 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, 1106 struct mlx4_vhcr *vhcr, 1107 struct mlx4_cmd_mailbox *inbox, 1108 struct mlx4_cmd_mailbox *outbox, 1109 struct mlx4_cmd_info *cmd); 1110 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, 1111 struct mlx4_vhcr *vhcr, 1112 struct mlx4_cmd_mailbox *inbox, 1113 struct mlx4_cmd_mailbox *outbox, 1114 struct mlx4_cmd_info *cmd); 1115 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, 1116 struct mlx4_vhcr *vhcr, 1117 struct mlx4_cmd_mailbox *inbox, 1118 struct mlx4_cmd_mailbox *outbox, 1119 struct mlx4_cmd_info *cmd); 1120 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, 1121 struct mlx4_vhcr *vhcr, 1122 struct mlx4_cmd_mailbox *inbox, 1123 struct mlx4_cmd_mailbox *outbox, 1124 struct mlx4_cmd_info *cmd); 1125 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, 1126 struct mlx4_vhcr *vhcr, 1127 struct mlx4_cmd_mailbox *inbox, 1128 struct mlx4_cmd_mailbox *outbox, 1129 struct mlx4_cmd_info *cmd); 1130 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, 1131 struct mlx4_vhcr *vhcr, 1132 struct mlx4_cmd_mailbox *inbox, 1133 struct mlx4_cmd_mailbox *outbox, 1134 struct mlx4_cmd_info *cmd); 1135 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, 1136 struct mlx4_vhcr *vhcr, 1137 struct mlx4_cmd_mailbox *inbox, 1138 struct mlx4_cmd_mailbox *outbox, 1139 struct mlx4_cmd_info *cmd); 1140 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, 1141 struct mlx4_vhcr *vhcr, 1142 struct mlx4_cmd_mailbox *inbox, 1143 struct mlx4_cmd_mailbox *outbox, 1144 struct mlx4_cmd_info *cmd); 1145 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 1146 struct mlx4_vhcr *vhcr, 1147 struct mlx4_cmd_mailbox *inbox, 1148 struct mlx4_cmd_mailbox *outbox, 1149 struct mlx4_cmd_info *cmd); 1150 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 1151 struct mlx4_vhcr *vhcr, 1152 struct mlx4_cmd_mailbox *inbox, 1153 struct mlx4_cmd_mailbox *outbox, 1154 struct mlx4_cmd_info *cmd); 1155 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 1156 struct mlx4_vhcr *vhcr, 1157 struct mlx4_cmd_mailbox *inbox, 1158 struct mlx4_cmd_mailbox *outbox, 1159 struct mlx4_cmd_info *cmd); 1160 int mlx4_2ERR_QP_wrapper(struct mlx4_dev *dev, int slave, 1161 struct mlx4_vhcr *vhcr, 1162 struct mlx4_cmd_mailbox *inbox, 1163 struct mlx4_cmd_mailbox *outbox, 1164 struct mlx4_cmd_info *cmd); 1165 int mlx4_RTS2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, 1166 struct mlx4_vhcr *vhcr, 1167 struct mlx4_cmd_mailbox *inbox, 1168 struct mlx4_cmd_mailbox *outbox, 1169 struct mlx4_cmd_info *cmd); 1170 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, 1171 struct mlx4_vhcr *vhcr, 1172 struct mlx4_cmd_mailbox *inbox, 1173 struct mlx4_cmd_mailbox *outbox, 1174 struct mlx4_cmd_info *cmd); 1175 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 1176 struct mlx4_vhcr *vhcr, 1177 struct mlx4_cmd_mailbox *inbox, 1178 struct mlx4_cmd_mailbox *outbox, 1179 struct mlx4_cmd_info *cmd); 1180 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, 1181 struct mlx4_vhcr *vhcr, 1182 struct mlx4_cmd_mailbox *inbox, 1183 struct mlx4_cmd_mailbox *outbox, 1184 struct mlx4_cmd_info *cmd); 1185 int mlx4_QUERY_QP_wrapper(struct mlx4_dev *dev, int slave, 1186 struct mlx4_vhcr *vhcr, 1187 struct mlx4_cmd_mailbox *inbox, 1188 struct mlx4_cmd_mailbox *outbox, 1189 struct mlx4_cmd_info *cmd); 1190 1191 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe); 1192 1193 enum { 1194 MLX4_CMD_CLEANUP_STRUCT = 1UL << 0, 1195 MLX4_CMD_CLEANUP_POOL = 1UL << 1, 1196 MLX4_CMD_CLEANUP_HCR = 1UL << 2, 1197 MLX4_CMD_CLEANUP_VHCR = 1UL << 3, 1198 MLX4_CMD_CLEANUP_ALL = (MLX4_CMD_CLEANUP_VHCR << 1) - 1 1199 }; 1200 1201 int mlx4_cmd_init(struct mlx4_dev *dev); 1202 void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask); 1203 int mlx4_multi_func_init(struct mlx4_dev *dev); 1204 int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev); 1205 void mlx4_multi_func_cleanup(struct mlx4_dev *dev); 1206 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); 1207 int mlx4_cmd_use_events(struct mlx4_dev *dev); 1208 void mlx4_cmd_use_polling(struct mlx4_dev *dev); 1209 1210 int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, 1211 u16 op, unsigned long timeout); 1212 1213 void mlx4_cq_tasklet_cb(unsigned long data); 1214 void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn); 1215 void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type); 1216 1217 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type); 1218 1219 void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); 1220 1221 void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); 1222 1223 int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, 1224 enum mlx4_port_type *type); 1225 void mlx4_do_sense_ports(struct mlx4_dev *dev, 1226 enum mlx4_port_type *stype, 1227 enum mlx4_port_type *defaults); 1228 void mlx4_start_sense(struct mlx4_dev *dev); 1229 void mlx4_stop_sense(struct mlx4_dev *dev); 1230 void mlx4_sense_init(struct mlx4_dev *dev); 1231 int mlx4_check_port_params(struct mlx4_dev *dev, 1232 enum mlx4_port_type *port_type); 1233 int mlx4_change_port_types(struct mlx4_dev *dev, 1234 enum mlx4_port_type *port_types); 1235 1236 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); 1237 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); 1238 void mlx4_init_roce_gid_table(struct mlx4_dev *dev, 1239 struct mlx4_roce_gid_table *table); 1240 void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); 1241 int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 1242 int mlx4_bond_vlan_table(struct mlx4_dev *dev); 1243 int mlx4_unbond_vlan_table(struct mlx4_dev *dev); 1244 int mlx4_bond_mac_table(struct mlx4_dev *dev); 1245 int mlx4_unbond_mac_table(struct mlx4_dev *dev); 1246 1247 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); 1248 /* resource tracker functions*/ 1249 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, 1250 enum mlx4_resource resource_type, 1251 u64 resource_id, int *slave); 1252 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); 1253 void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave); 1254 int mlx4_init_resource_tracker(struct mlx4_dev *dev); 1255 1256 void mlx4_free_resource_tracker(struct mlx4_dev *dev, 1257 enum mlx4_res_tracker_free_type type); 1258 1259 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, 1260 struct mlx4_vhcr *vhcr, 1261 struct mlx4_cmd_mailbox *inbox, 1262 struct mlx4_cmd_mailbox *outbox, 1263 struct mlx4_cmd_info *cmd); 1264 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, 1265 struct mlx4_vhcr *vhcr, 1266 struct mlx4_cmd_mailbox *inbox, 1267 struct mlx4_cmd_mailbox *outbox, 1268 struct mlx4_cmd_info *cmd); 1269 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, 1270 struct mlx4_vhcr *vhcr, 1271 struct mlx4_cmd_mailbox *inbox, 1272 struct mlx4_cmd_mailbox *outbox, 1273 struct mlx4_cmd_info *cmd); 1274 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, 1275 struct mlx4_vhcr *vhcr, 1276 struct mlx4_cmd_mailbox *inbox, 1277 struct mlx4_cmd_mailbox *outbox, 1278 struct mlx4_cmd_info *cmd); 1279 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, 1280 struct mlx4_vhcr *vhcr, 1281 struct mlx4_cmd_mailbox *inbox, 1282 struct mlx4_cmd_mailbox *outbox, 1283 struct mlx4_cmd_info *cmd); 1284 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, 1285 struct mlx4_vhcr *vhcr, 1286 struct mlx4_cmd_mailbox *inbox, 1287 struct mlx4_cmd_mailbox *outbox, 1288 struct mlx4_cmd_info *cmd); 1289 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); 1290 1291 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, 1292 int *gid_tbl_len, int *pkey_tbl_len); 1293 1294 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 1295 struct mlx4_vhcr *vhcr, 1296 struct mlx4_cmd_mailbox *inbox, 1297 struct mlx4_cmd_mailbox *outbox, 1298 struct mlx4_cmd_info *cmd); 1299 1300 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, 1301 struct mlx4_vhcr *vhcr, 1302 struct mlx4_cmd_mailbox *inbox, 1303 struct mlx4_cmd_mailbox *outbox, 1304 struct mlx4_cmd_info *cmd); 1305 1306 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, 1307 struct mlx4_vhcr *vhcr, 1308 struct mlx4_cmd_mailbox *inbox, 1309 struct mlx4_cmd_mailbox *outbox, 1310 struct mlx4_cmd_info *cmd); 1311 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1312 enum mlx4_protocol prot, enum mlx4_steer_type steer); 1313 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1314 int block_mcast_loopback, enum mlx4_protocol prot, 1315 enum mlx4_steer_type steer); 1316 int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1317 u8 gid[16], u8 port, 1318 int block_mcast_loopback, 1319 enum mlx4_protocol prot, u64 *reg_id); 1320 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, 1321 struct mlx4_vhcr *vhcr, 1322 struct mlx4_cmd_mailbox *inbox, 1323 struct mlx4_cmd_mailbox *outbox, 1324 struct mlx4_cmd_info *cmd); 1325 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, 1326 struct mlx4_vhcr *vhcr, 1327 struct mlx4_cmd_mailbox *inbox, 1328 struct mlx4_cmd_mailbox *outbox, 1329 struct mlx4_cmd_info *cmd); 1330 int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function, 1331 int port, void *buf); 1332 int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod, 1333 struct mlx4_cmd_mailbox *outbox); 1334 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, 1335 struct mlx4_vhcr *vhcr, 1336 struct mlx4_cmd_mailbox *inbox, 1337 struct mlx4_cmd_mailbox *outbox, 1338 struct mlx4_cmd_info *cmd); 1339 int mlx4_PKEY_TABLE_wrapper(struct mlx4_dev *dev, int slave, 1340 struct mlx4_vhcr *vhcr, 1341 struct mlx4_cmd_mailbox *inbox, 1342 struct mlx4_cmd_mailbox *outbox, 1343 struct mlx4_cmd_info *cmd); 1344 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, 1345 struct mlx4_vhcr *vhcr, 1346 struct mlx4_cmd_mailbox *inbox, 1347 struct mlx4_cmd_mailbox *outbox, 1348 struct mlx4_cmd_info *cmd); 1349 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 1350 struct mlx4_vhcr *vhcr, 1351 struct mlx4_cmd_mailbox *inbox, 1352 struct mlx4_cmd_mailbox *outbox, 1353 struct mlx4_cmd_info *cmd); 1354 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, 1355 struct mlx4_vhcr *vhcr, 1356 struct mlx4_cmd_mailbox *inbox, 1357 struct mlx4_cmd_mailbox *outbox, 1358 struct mlx4_cmd_info *cmd); 1359 int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, 1360 struct mlx4_vhcr *vhcr, 1361 struct mlx4_cmd_mailbox *inbox, 1362 struct mlx4_cmd_mailbox *outbox, 1363 struct mlx4_cmd_info *cmd); 1364 1365 int mlx4_get_mgm_entry_size(struct mlx4_dev *dev); 1366 int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); 1367 1368 static inline void set_param_l(u64 *arg, u32 val) 1369 { 1370 *arg = (*arg & 0xffffffff00000000ULL) | (u64) val; 1371 } 1372 1373 static inline void set_param_h(u64 *arg, u32 val) 1374 { 1375 *arg = (*arg & 0xffffffff) | ((u64) val << 32); 1376 } 1377 1378 static inline u32 get_param_l(u64 *arg) 1379 { 1380 return (u32) (*arg & 0xffffffff); 1381 } 1382 1383 static inline u32 get_param_h(u64 *arg) 1384 { 1385 return (u32)(*arg >> 32); 1386 } 1387 1388 static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev) 1389 { 1390 return &mlx4_priv(dev)->mfunc.master.res_tracker.lock; 1391 } 1392 1393 #define NOT_MASKED_PD_BITS 17 1394 1395 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work); 1396 1397 void mlx4_init_quotas(struct mlx4_dev *dev); 1398 1399 /* for VFs, replace zero MACs with randomly-generated MACs at driver start */ 1400 void mlx4_replace_zero_macs(struct mlx4_dev *dev); 1401 int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); 1402 /* Returns the VF index of slave */ 1403 int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); 1404 int mlx4_config_mad_demux(struct mlx4_dev *dev); 1405 int mlx4_do_bond(struct mlx4_dev *dev, bool enable); 1406 int mlx4_bond_fs_rules(struct mlx4_dev *dev); 1407 int mlx4_unbond_fs_rules(struct mlx4_dev *dev); 1408 1409 enum mlx4_zone_flags { 1410 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0, 1411 MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO = 1UL << 1, 1412 MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO = 1UL << 2, 1413 MLX4_ZONE_USE_RR = 1UL << 3, 1414 }; 1415 1416 enum mlx4_zone_alloc_flags { 1417 /* No two objects could overlap between zones. UID 1418 * could be left unused. If this flag is given and 1419 * two overlapped zones are used, an object will be free'd 1420 * from the smallest possible matching zone. 1421 */ 1422 MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP = 1UL << 0, 1423 }; 1424 1425 struct mlx4_zone_allocator; 1426 1427 /* Create a new zone allocator */ 1428 struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags); 1429 1430 /* Attach a mlx4_bitmap <bitmap> of priority <priority> to the zone allocator 1431 * <zone_alloc>. Allocating an object from this zone adds an offset <offset>. 1432 * Similarly, when searching for an object to free, this offset it taken into 1433 * account. The use_rr mlx4_ib parameter for allocating objects from this <bitmap> 1434 * is given through the MLX4_ZONE_USE_RR flag in <flags>. 1435 * When an allocation fails, <zone_alloc> tries to allocate from other zones 1436 * according to the policy set by <flags>. <puid> is the unique identifier 1437 * received to this zone. 1438 */ 1439 int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc, 1440 struct mlx4_bitmap *bitmap, 1441 u32 flags, 1442 int priority, 1443 int offset, 1444 u32 *puid); 1445 1446 /* Remove bitmap indicated by <uid> from <zone_alloc> */ 1447 int mlx4_zone_remove_one(struct mlx4_zone_allocator *zone_alloc, u32 uid); 1448 1449 /* Delete the zone allocator <zone_alloc. This function doesn't destroy 1450 * the attached bitmaps. 1451 */ 1452 void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc); 1453 1454 /* Allocate <count> objects with align <align> and skip_mask <skip_mask> 1455 * from the mlx4_bitmap whose uid is <uid>. The bitmap which we actually 1456 * allocated from is returned in <puid>. If the allocation fails, a negative 1457 * number is returned. Otherwise, the offset of the first object is returned. 1458 */ 1459 u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count, 1460 int align, u32 skip_mask, u32 *puid); 1461 1462 /* Free <count> objects, start from <obj> of the uid <uid> from zone_allocator 1463 * <zones>. 1464 */ 1465 u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, 1466 u32 uid, u32 obj, u32 count); 1467 1468 /* If <zones> was allocated with MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP, instead of 1469 * specifying the uid when freeing an object, zone allocator could figure it by 1470 * itself. Other parameters are similar to mlx4_zone_free. 1471 */ 1472 u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count); 1473 1474 /* Returns a pointer to mlx4_bitmap that was attached to <zones> with <uid> */ 1475 struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid); 1476 1477 #endif /* MLX4_H */ 1478