1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef _HNS_ROCE_DEVICE_H 34 #define _HNS_ROCE_DEVICE_H 35 36 #include <linux/pci.h> 37 #include <rdma/ib_verbs.h> 38 #include <rdma/hns-abi.h> 39 #include "hns_roce_debugfs.h" 40 41 #define PCI_REVISION_ID_HIP08 0x21 42 #define PCI_REVISION_ID_HIP09 0x30 43 44 #define HNS_ROCE_MAX_MSG_LEN 0x80000000 45 46 #define HNS_ROCE_IB_MIN_SQ_STRIDE 6 47 48 #define BA_BYTE_LEN 8 49 50 #define HNS_ROCE_MIN_CQE_NUM 0x40 51 #define HNS_ROCE_MIN_SRQ_WQE_NUM 1 52 53 #define HNS_ROCE_MAX_IRQ_NUM 128 54 55 #define HNS_ROCE_SGE_IN_WQE 2 56 #define HNS_ROCE_SGE_SHIFT 4 57 58 #define EQ_ENABLE 1 59 #define EQ_DISABLE 0 60 61 #define HNS_ROCE_CEQ 0 62 #define HNS_ROCE_AEQ 1 63 64 #define HNS_ROCE_CEQE_SIZE 0x4 65 #define HNS_ROCE_AEQE_SIZE 0x10 66 67 #define HNS_ROCE_V3_EQE_SIZE 0x40 68 69 #define HNS_ROCE_V2_CQE_SIZE 32 70 #define HNS_ROCE_V3_CQE_SIZE 64 71 72 #define HNS_ROCE_V2_QPC_SZ 256 73 #define HNS_ROCE_V3_QPC_SZ 512 74 75 #define HNS_ROCE_MAX_PORTS 6 76 #define HNS_ROCE_GID_SIZE 16 77 #define HNS_ROCE_SGE_SIZE 16 78 #define HNS_ROCE_DWQE_SIZE 65536 79 80 #define HNS_ROCE_HOP_NUM_0 0xff 81 82 #define MR_TYPE_MR 0x00 83 #define MR_TYPE_FRMR 0x01 84 #define MR_TYPE_DMA 0x03 85 86 #define HNS_ROCE_FRMR_MAX_PA 512 87 #define HNS_ROCE_FRMR_ALIGN_SIZE 128 88 89 #define PKEY_ID 0xffff 90 #define NODE_DESC_SIZE 64 91 #define DB_REG_OFFSET 0x1000 92 93 /* Configure to HW for PAGE_SIZE larger than 4KB */ 94 #define PG_SHIFT_OFFSET (PAGE_SHIFT - 12) 95 96 #define ATOMIC_WR_LEN 8 97 98 #define HNS_ROCE_IDX_QUE_ENTRY_SZ 4 99 #define SRQ_DB_REG 0x230 100 101 #define HNS_ROCE_QP_BANK_NUM 8 102 #define HNS_ROCE_CQ_BANK_NUM 4 103 104 #define CQ_BANKID_SHIFT 2 105 #define CQ_BANKID_MASK GENMASK(1, 0) 106 #define VALID_CQ_BANK_MASK_DEFAULT 0xF 107 #define VALID_CQ_BANK_MASK_LIMIT 0x9 108 109 #define VALID_EXT_SGE_QP_BANK_MASK_LIMIT 0x42 110 111 #define HNS_ROCE_MAX_CQ_COUNT 0xFFFF 112 #define HNS_ROCE_MAX_CQ_PERIOD 0xFFFF 113 114 enum { 115 SERV_TYPE_RC, 116 SERV_TYPE_UC, 117 SERV_TYPE_RD, 118 SERV_TYPE_UD, 119 SERV_TYPE_XRC = 5, 120 }; 121 122 enum hns_roce_event { 123 HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01, 124 HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02, 125 HNS_ROCE_EVENT_TYPE_COMM_EST = 0x03, 126 HNS_ROCE_EVENT_TYPE_SQ_DRAINED = 0x04, 127 HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, 128 HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR = 0x06, 129 HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR = 0x07, 130 HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH = 0x08, 131 HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH = 0x09, 132 HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR = 0x0a, 133 HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR = 0x0b, 134 HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW = 0x0c, 135 HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID = 0x0d, 136 HNS_ROCE_EVENT_TYPE_PORT_CHANGE = 0x0f, 137 /* 0x10 and 0x11 is unused in currently application case */ 138 HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12, 139 HNS_ROCE_EVENT_TYPE_MB = 0x13, 140 HNS_ROCE_EVENT_TYPE_FLR = 0x15, 141 HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION = 0x16, 142 HNS_ROCE_EVENT_TYPE_INVALID_XRCETH = 0x17, 143 }; 144 145 enum { 146 HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0), 147 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1), 148 HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2), 149 HNS_ROCE_CAP_FLAG_CQ_RECORD_DB = BIT(3), 150 HNS_ROCE_CAP_FLAG_QP_RECORD_DB = BIT(4), 151 HNS_ROCE_CAP_FLAG_SRQ = BIT(5), 152 HNS_ROCE_CAP_FLAG_XRC = BIT(6), 153 HNS_ROCE_CAP_FLAG_MW = BIT(7), 154 HNS_ROCE_CAP_FLAG_FRMR = BIT(8), 155 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9), 156 HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10), 157 HNS_ROCE_CAP_FLAG_DIRECT_WQE = BIT(12), 158 HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14), 159 HNS_ROCE_CAP_FLAG_STASH = BIT(17), 160 HNS_ROCE_CAP_FLAG_CQE_INLINE = BIT(19), 161 HNS_ROCE_CAP_FLAG_BOND = BIT(21), 162 HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB = BIT(22), 163 HNS_ROCE_CAP_FLAG_LIMIT_BANK = BIT(23), 164 }; 165 166 #define HNS_ROCE_DB_TYPE_COUNT 2 167 #define HNS_ROCE_DB_UNIT_SIZE 4 168 169 enum { 170 HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4 171 }; 172 173 enum hns_roce_reset_stage { 174 HNS_ROCE_STATE_NON_RST, 175 HNS_ROCE_STATE_RST_BEF_DOWN, 176 HNS_ROCE_STATE_RST_DOWN, 177 HNS_ROCE_STATE_RST_UNINIT, 178 HNS_ROCE_STATE_RST_INIT, 179 HNS_ROCE_STATE_RST_INITED, 180 }; 181 182 enum hns_roce_instance_state { 183 HNS_ROCE_STATE_NON_INIT, 184 HNS_ROCE_STATE_INIT, 185 HNS_ROCE_STATE_INITED, 186 HNS_ROCE_STATE_UNINIT, 187 HNS_ROCE_STATE_BOND_UNINIT, 188 }; 189 190 enum { 191 HNS_ROCE_RST_DIRECT_RETURN = 0, 192 }; 193 194 #define HNS_ROCE_CMD_SUCCESS 1 195 196 #define HNS_ROCE_MAX_HOP_NUM 3 197 /* The minimum page size is 4K for hardware */ 198 #define HNS_HW_PAGE_SHIFT 12 199 #define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT) 200 201 #define HNS_HW_MAX_PAGE_SHIFT 27 202 #define HNS_HW_MAX_PAGE_SIZE (1 << HNS_HW_MAX_PAGE_SHIFT) 203 204 struct hns_roce_uar { 205 u64 pfn; 206 unsigned long index; 207 unsigned long logic_idx; 208 }; 209 210 enum hns_roce_mmap_type { 211 HNS_ROCE_MMAP_TYPE_DB = 1, 212 HNS_ROCE_MMAP_TYPE_DWQE, 213 }; 214 215 struct hns_user_mmap_entry { 216 struct rdma_user_mmap_entry rdma_entry; 217 enum hns_roce_mmap_type mmap_type; 218 u64 address; 219 }; 220 221 struct hns_roce_ucontext { 222 struct ib_ucontext ibucontext; 223 struct hns_roce_uar uar; 224 struct list_head page_list; 225 struct mutex page_mutex; 226 struct hns_user_mmap_entry *db_mmap_entry; 227 u32 config; 228 u8 cq_bank_id; 229 }; 230 231 struct hns_roce_pd { 232 struct ib_pd ibpd; 233 unsigned long pdn; 234 }; 235 236 struct hns_roce_xrcd { 237 struct ib_xrcd ibxrcd; 238 u32 xrcdn; 239 }; 240 241 struct hns_roce_bitmap { 242 /* Bitmap Traversal last a bit which is 1 */ 243 unsigned long last; 244 unsigned long top; 245 unsigned long max; 246 unsigned long reserved_top; 247 unsigned long mask; 248 spinlock_t lock; 249 unsigned long *table; 250 }; 251 252 struct hns_roce_ida { 253 struct ida ida; 254 u32 min; /* Lowest ID to allocate. */ 255 u32 max; /* Highest ID to allocate. */ 256 }; 257 258 /* For Hardware Entry Memory */ 259 struct hns_roce_hem_table { 260 /* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */ 261 u32 type; 262 /* HEM array elment num */ 263 unsigned long num_hem; 264 /* Single obj size */ 265 unsigned long obj_size; 266 unsigned long table_chunk_size; 267 struct mutex mutex; 268 struct hns_roce_hem **hem; 269 u64 **bt_l1; 270 dma_addr_t *bt_l1_dma_addr; 271 u64 **bt_l0; 272 dma_addr_t *bt_l0_dma_addr; 273 }; 274 275 struct hns_roce_buf_region { 276 u32 offset; /* page offset */ 277 u32 count; /* page count */ 278 int hopnum; /* addressing hop num */ 279 }; 280 281 #define HNS_ROCE_MAX_BT_REGION 3 282 #define HNS_ROCE_MAX_BT_LEVEL 3 283 struct hns_roce_hem_list { 284 struct list_head root_bt; 285 /* link all bt dma mem by hop config */ 286 struct list_head mid_bt[HNS_ROCE_MAX_BT_REGION][HNS_ROCE_MAX_BT_LEVEL]; 287 struct list_head btm_bt; /* link all bottom bt in @mid_bt */ 288 dma_addr_t root_ba; /* pointer to the root ba table */ 289 }; 290 291 enum mtr_type { 292 MTR_DEFAULT = 0, 293 MTR_PBL, 294 }; 295 296 struct hns_roce_buf_attr { 297 struct { 298 size_t size; /* region size */ 299 int hopnum; /* multi-hop addressing hop num */ 300 } region[HNS_ROCE_MAX_BT_REGION]; 301 unsigned int region_count; /* valid region count */ 302 unsigned int page_shift; /* buffer page shift */ 303 unsigned int user_access; /* umem access flag */ 304 u64 iova; 305 enum mtr_type type; 306 bool mtt_only; /* only alloc buffer-required MTT memory */ 307 bool adaptive; /* adaptive for page_shift and hopnum */ 308 }; 309 310 struct hns_roce_hem_cfg { 311 dma_addr_t root_ba; /* root BA table's address */ 312 bool is_direct; /* addressing without BA table */ 313 unsigned int ba_pg_shift; /* BA table page shift */ 314 unsigned int buf_pg_shift; /* buffer page shift */ 315 unsigned int buf_pg_count; /* buffer page count */ 316 struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION]; 317 unsigned int region_count; 318 }; 319 320 /* memory translate region */ 321 struct hns_roce_mtr { 322 struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */ 323 struct ib_umem *umem; /* user space buffer */ 324 struct hns_roce_buf *kmem; /* kernel space buffer */ 325 struct hns_roce_hem_cfg hem_cfg; /* config for hardware addressing */ 326 }; 327 328 struct hns_roce_mr { 329 struct ib_mr ibmr; 330 u64 iova; /* MR's virtual original addr */ 331 u64 size; /* Address range of MR */ 332 u32 key; /* Key of MR */ 333 u32 pd; /* PD num of MR */ 334 u32 access; /* Access permission of MR */ 335 int enabled; /* MR's active status */ 336 int type; /* MR's register type */ 337 u32 pbl_hop_num; /* multi-hop number */ 338 struct hns_roce_mtr pbl_mtr; 339 u32 npages; 340 dma_addr_t *page_list; 341 }; 342 343 struct hns_roce_mr_table { 344 struct hns_roce_ida mtpt_ida; 345 struct hns_roce_hem_table mtpt_table; 346 }; 347 348 struct hns_roce_wq { 349 u64 *wrid; /* Work request ID */ 350 spinlock_t lock; 351 u32 wqe_cnt; /* WQE num */ 352 u32 max_gs; 353 u32 rsv_sge; 354 u32 offset; 355 u32 wqe_shift; /* WQE size */ 356 u32 head; 357 u32 tail; 358 void __iomem *db_reg; 359 u32 ext_sge_cnt; 360 }; 361 362 struct hns_roce_sge { 363 unsigned int sge_cnt; /* SGE num */ 364 u32 offset; 365 u32 sge_shift; /* SGE size */ 366 }; 367 368 struct hns_roce_buf_list { 369 void *buf; 370 dma_addr_t map; 371 }; 372 373 /* 374 * %HNS_ROCE_BUF_DIRECT indicates that the all memory must be in a continuous 375 * dma address range. 376 * 377 * %HNS_ROCE_BUF_NOSLEEP indicates that the caller cannot sleep. 378 * 379 * %HNS_ROCE_BUF_NOFAIL allocation only failed when allocated size is zero, even 380 * the allocated size is smaller than the required size. 381 */ 382 enum { 383 HNS_ROCE_BUF_DIRECT = BIT(0), 384 HNS_ROCE_BUF_NOSLEEP = BIT(1), 385 HNS_ROCE_BUF_NOFAIL = BIT(2), 386 }; 387 388 struct hns_roce_buf { 389 struct hns_roce_buf_list *trunk_list; 390 u32 ntrunks; 391 u32 npages; 392 unsigned int trunk_shift; 393 unsigned int page_shift; 394 }; 395 396 struct hns_roce_db_pgdir { 397 struct list_head list; 398 DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE); 399 DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT); 400 unsigned long *bits[HNS_ROCE_DB_TYPE_COUNT]; 401 u32 *page; 402 dma_addr_t db_dma; 403 }; 404 405 struct hns_roce_user_db_page { 406 struct list_head list; 407 struct ib_umem *umem; 408 unsigned long user_virt; 409 refcount_t refcount; 410 }; 411 412 struct hns_roce_db { 413 u32 *db_record; 414 union { 415 struct hns_roce_db_pgdir *pgdir; 416 struct hns_roce_user_db_page *user_page; 417 } u; 418 dma_addr_t dma; 419 void *virt_addr; 420 unsigned long index; 421 unsigned long order; 422 }; 423 424 struct hns_roce_cq { 425 struct ib_cq ib_cq; 426 struct hns_roce_mtr mtr; 427 struct hns_roce_db db; 428 u32 flags; 429 spinlock_t lock; 430 u32 cq_depth; 431 u32 cons_index; 432 u32 *set_ci_db; 433 void __iomem *db_reg; 434 int arm_sn; 435 int cqe_size; 436 unsigned long cqn; 437 u32 vector; 438 refcount_t refcount; 439 struct completion free; 440 struct list_head sq_list; /* all qps on this send cq */ 441 struct list_head rq_list; /* all qps on this recv cq */ 442 int is_armed; /* cq is armed */ 443 struct list_head node; /* all armed cqs are on a list */ 444 }; 445 446 struct hns_roce_idx_que { 447 struct hns_roce_mtr mtr; 448 u32 entry_shift; 449 unsigned long *bitmap; 450 u32 head; 451 u32 tail; 452 }; 453 454 struct hns_roce_srq { 455 struct ib_srq ibsrq; 456 unsigned long srqn; 457 u32 wqe_cnt; 458 int max_gs; 459 u32 rsv_sge; 460 u32 wqe_shift; 461 u32 cqn; 462 u32 xrcdn; 463 void __iomem *db_reg; 464 465 refcount_t refcount; 466 struct completion free; 467 468 struct hns_roce_mtr buf_mtr; 469 470 u64 *wrid; 471 struct hns_roce_idx_que idx_que; 472 spinlock_t lock; 473 struct mutex mutex; 474 void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event); 475 struct hns_roce_db rdb; 476 u32 cap_flags; 477 }; 478 479 struct hns_roce_uar_table { 480 struct hns_roce_bitmap bitmap; 481 }; 482 483 struct hns_roce_bank { 484 struct ida ida; 485 u32 inuse; /* Number of IDs allocated */ 486 u32 min; /* Lowest ID to allocate. */ 487 u32 max; /* Highest ID to allocate. */ 488 u32 next; /* Next ID to allocate. */ 489 }; 490 491 struct hns_roce_qp_table { 492 struct hns_roce_hem_table qp_table; 493 struct hns_roce_hem_table irrl_table; 494 struct hns_roce_hem_table trrl_table; 495 struct hns_roce_hem_table sccc_table; 496 struct mutex scc_mutex; 497 struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM]; 498 struct mutex bank_mutex; 499 struct xarray dip_xa; 500 }; 501 502 struct hns_roce_cq_table { 503 struct xarray array; 504 struct hns_roce_hem_table table; 505 struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM]; 506 struct mutex bank_mutex; 507 u32 ctx_num[HNS_ROCE_CQ_BANK_NUM]; 508 u8 valid_cq_bank_mask; 509 }; 510 511 struct hns_roce_srq_table { 512 struct hns_roce_ida srq_ida; 513 struct xarray xa; 514 struct hns_roce_hem_table table; 515 }; 516 517 struct hns_roce_av { 518 u8 port; 519 u8 gid_index; 520 u8 stat_rate; 521 u8 hop_limit; 522 u32 flowlabel; 523 u16 udp_sport; 524 u8 sl; 525 u8 tclass; 526 u8 dgid[HNS_ROCE_GID_SIZE]; 527 u8 mac[ETH_ALEN]; 528 u16 vlan_id; 529 u8 vlan_en; 530 }; 531 532 struct hns_roce_ah { 533 struct ib_ah ibah; 534 struct hns_roce_av av; 535 }; 536 537 struct hns_roce_cmd_context { 538 struct completion done; 539 int result; 540 int next; 541 u64 out_param; 542 u16 token; 543 u16 busy; 544 }; 545 546 enum hns_roce_cmdq_state { 547 HNS_ROCE_CMDQ_STATE_NORMAL, 548 HNS_ROCE_CMDQ_STATE_FATAL_ERR, 549 }; 550 551 struct hns_roce_cmdq { 552 struct dma_pool *pool; 553 struct semaphore poll_sem; 554 /* 555 * Event mode: cmd register mutex protection, 556 * ensure to not exceed max_cmds and user use limit region 557 */ 558 struct semaphore event_sem; 559 int max_cmds; 560 spinlock_t context_lock; 561 int free_head; 562 struct hns_roce_cmd_context *context; 563 /* 564 * Process whether use event mode, init default non-zero 565 * After the event queue of cmd event ready, 566 * can switch into event mode 567 * close device, switch into poll mode(non event mode) 568 */ 569 u8 use_events; 570 enum hns_roce_cmdq_state state; 571 }; 572 573 struct hns_roce_cmd_mailbox { 574 void *buf; 575 dma_addr_t dma; 576 }; 577 578 struct hns_roce_mbox_msg { 579 u64 in_param; 580 u64 out_param; 581 u8 cmd; 582 u32 tag; 583 u16 token; 584 u8 event_en; 585 }; 586 587 struct hns_roce_dev; 588 589 enum { 590 HNS_ROCE_FLUSH_FLAG = 0, 591 HNS_ROCE_STOP_FLUSH_FLAG = 1, 592 }; 593 594 struct hns_roce_work { 595 struct hns_roce_dev *hr_dev; 596 struct work_struct work; 597 int event_type; 598 int sub_type; 599 u32 queue_num; 600 }; 601 602 enum hns_roce_cong_type { 603 CONG_TYPE_DCQCN, 604 CONG_TYPE_LDCP, 605 CONG_TYPE_HC3, 606 CONG_TYPE_DIP, 607 }; 608 609 struct hns_roce_qp { 610 struct ib_qp ibqp; 611 struct hns_roce_wq rq; 612 struct hns_roce_db rdb; 613 struct hns_roce_db sdb; 614 unsigned long en_flags; 615 enum ib_sig_type sq_signal_bits; 616 struct hns_roce_wq sq; 617 618 struct hns_roce_mtr mtr; 619 620 u32 buff_size; 621 struct mutex mutex; 622 u8 port; 623 u8 phy_port; 624 u8 sl; 625 u8 resp_depth; 626 u8 state; 627 u32 atomic_rd_en; 628 u32 qkey; 629 void (*event)(struct hns_roce_qp *qp, 630 enum hns_roce_event event_type); 631 unsigned long qpn; 632 633 u32 xrcdn; 634 635 refcount_t refcount; 636 struct completion free; 637 638 struct hns_roce_sge sge; 639 u32 next_sge; 640 enum ib_mtu path_mtu; 641 u32 max_inline_data; 642 u8 free_mr_en; 643 644 /* 0: flush needed, 1: unneeded */ 645 unsigned long flush_flag; 646 struct hns_roce_work flush_work; 647 struct list_head node; /* all qps are on a list */ 648 struct list_head rq_node; /* all recv qps are on a list */ 649 struct list_head sq_node; /* all send qps are on a list */ 650 struct hns_user_mmap_entry *dwqe_mmap_entry; 651 u32 config; 652 enum hns_roce_cong_type cong_type; 653 u8 tc_mode; 654 u8 priority; 655 spinlock_t flush_lock; 656 struct hns_roce_dip *dip; 657 }; 658 659 struct hns_roce_ib_iboe { 660 spinlock_t lock; 661 struct net_device *netdevs[HNS_ROCE_MAX_PORTS]; 662 struct notifier_block nb; 663 u8 phy_port[HNS_ROCE_MAX_PORTS]; 664 }; 665 666 struct hns_roce_ceqe { 667 __le32 comp; 668 __le32 rsv[15]; 669 }; 670 671 #define CEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_ceqe, h, l) 672 673 #define CEQE_CQN CEQE_FIELD_LOC(23, 0) 674 #define CEQE_OWNER CEQE_FIELD_LOC(31, 31) 675 676 struct hns_roce_aeqe { 677 __le32 asyn; 678 union { 679 struct { 680 __le32 num; 681 u32 rsv0; 682 u32 rsv1; 683 } queue_event; 684 685 struct { 686 __le64 out_param; 687 __le16 token; 688 u8 status; 689 u8 rsv0; 690 } __packed cmd; 691 } event; 692 __le32 rsv[12]; 693 }; 694 695 #define AEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_aeqe, h, l) 696 697 #define AEQE_EVENT_TYPE AEQE_FIELD_LOC(7, 0) 698 #define AEQE_SUB_TYPE AEQE_FIELD_LOC(15, 8) 699 #define AEQE_OWNER AEQE_FIELD_LOC(31, 31) 700 #define AEQE_EVENT_QUEUE_NUM AEQE_FIELD_LOC(55, 32) 701 702 struct hns_roce_eq { 703 struct hns_roce_dev *hr_dev; 704 void __iomem *db_reg; 705 706 int type_flag; /* Aeq:1 ceq:0 */ 707 int eqn; 708 u32 entries; 709 int eqe_size; 710 int irq; 711 u32 cons_index; 712 int over_ignore; 713 int coalesce; 714 int arm_st; 715 int hop_num; 716 struct hns_roce_mtr mtr; 717 u16 eq_max_cnt; 718 u32 eq_period; 719 int shift; 720 int event_type; 721 int sub_type; 722 struct work_struct work; 723 }; 724 725 struct hns_roce_eq_table { 726 struct hns_roce_eq *eq; 727 }; 728 729 struct hns_roce_caps { 730 u64 fw_ver; 731 u8 num_ports; 732 int gid_table_len[HNS_ROCE_MAX_PORTS]; 733 int pkey_table_len[HNS_ROCE_MAX_PORTS]; 734 int local_ca_ack_delay; 735 int num_uars; 736 u32 phy_num_uars; 737 u32 max_sq_sg; 738 u32 max_sq_inline; 739 u32 max_rq_sg; 740 u32 rsv0; 741 u32 num_qps; 742 u32 reserved_qps; 743 u32 num_srqs; 744 u32 max_wqes; 745 u32 max_srq_wrs; 746 u32 max_srq_sges; 747 u32 max_sq_desc_sz; 748 u32 max_rq_desc_sz; 749 u32 rsv2; 750 int max_qp_init_rdma; 751 int max_qp_dest_rdma; 752 u32 num_cqs; 753 u32 max_cqes; 754 u32 min_cqes; 755 u32 min_wqes; 756 u32 reserved_cqs; 757 u32 reserved_srqs; 758 int num_aeq_vectors; 759 int num_comp_vectors; 760 int num_other_vectors; 761 u32 num_mtpts; 762 u32 rsv1; 763 u32 num_srqwqe_segs; 764 u32 num_idx_segs; 765 int reserved_mrws; 766 int reserved_uars; 767 int num_pds; 768 int reserved_pds; 769 u32 num_xrcds; 770 u32 reserved_xrcds; 771 u32 mtt_entry_sz; 772 u32 cqe_sz; 773 u32 page_size_cap; 774 u32 reserved_lkey; 775 int mtpt_entry_sz; 776 int qpc_sz; 777 int irrl_entry_sz; 778 int trrl_entry_sz; 779 int cqc_entry_sz; 780 int sccc_sz; 781 int qpc_timer_entry_sz; 782 int cqc_timer_entry_sz; 783 int srqc_entry_sz; 784 int idx_entry_sz; 785 u32 pbl_ba_pg_sz; 786 u32 pbl_buf_pg_sz; 787 u32 pbl_hop_num; 788 int aeqe_depth; 789 int ceqe_depth; 790 u32 aeqe_size; 791 u32 ceqe_size; 792 enum ib_mtu max_mtu; 793 u32 qpc_bt_num; 794 u32 qpc_timer_bt_num; 795 u32 srqc_bt_num; 796 u32 cqc_bt_num; 797 u32 cqc_timer_bt_num; 798 u32 mpt_bt_num; 799 u32 eqc_bt_num; 800 u32 smac_bt_num; 801 u32 sgid_bt_num; 802 u32 sccc_bt_num; 803 u32 gmv_bt_num; 804 u32 qpc_ba_pg_sz; 805 u32 qpc_buf_pg_sz; 806 u32 qpc_hop_num; 807 u32 srqc_ba_pg_sz; 808 u32 srqc_buf_pg_sz; 809 u32 srqc_hop_num; 810 u32 cqc_ba_pg_sz; 811 u32 cqc_buf_pg_sz; 812 u32 cqc_hop_num; 813 u32 mpt_ba_pg_sz; 814 u32 mpt_buf_pg_sz; 815 u32 mpt_hop_num; 816 u32 mtt_ba_pg_sz; 817 u32 mtt_buf_pg_sz; 818 u32 mtt_hop_num; 819 u32 wqe_sq_hop_num; 820 u32 wqe_sge_hop_num; 821 u32 wqe_rq_hop_num; 822 u32 sccc_ba_pg_sz; 823 u32 sccc_buf_pg_sz; 824 u32 sccc_hop_num; 825 u32 qpc_timer_ba_pg_sz; 826 u32 qpc_timer_buf_pg_sz; 827 u32 qpc_timer_hop_num; 828 u32 cqc_timer_ba_pg_sz; 829 u32 cqc_timer_buf_pg_sz; 830 u32 cqc_timer_hop_num; 831 u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */ 832 u32 cqe_buf_pg_sz; 833 u32 cqe_hop_num; 834 u32 srqwqe_ba_pg_sz; 835 u32 srqwqe_buf_pg_sz; 836 u32 srqwqe_hop_num; 837 u32 idx_ba_pg_sz; 838 u32 idx_buf_pg_sz; 839 u32 idx_hop_num; 840 u32 eqe_ba_pg_sz; 841 u32 eqe_buf_pg_sz; 842 u32 eqe_hop_num; 843 u32 gmv_entry_num; 844 u32 gmv_entry_sz; 845 u32 gmv_ba_pg_sz; 846 u32 gmv_buf_pg_sz; 847 u32 gmv_hop_num; 848 u32 sl_num; 849 u32 llm_buf_pg_sz; 850 u32 chunk_sz; /* chunk size in non multihop mode */ 851 u64 flags; 852 u16 default_ceq_max_cnt; 853 u16 default_ceq_period; 854 u16 default_aeq_max_cnt; 855 u16 default_aeq_period; 856 u16 default_aeq_arm_st; 857 u16 default_ceq_arm_st; 858 u8 cong_cap; 859 enum hns_roce_cong_type default_cong_type; 860 u32 max_ack_req_msg_len; 861 }; 862 863 enum hns_roce_device_state { 864 HNS_ROCE_DEVICE_STATE_INITED, 865 HNS_ROCE_DEVICE_STATE_RST_DOWN, 866 HNS_ROCE_DEVICE_STATE_UNINIT, 867 }; 868 869 enum hns_roce_hw_pkt_stat_index { 870 HNS_ROCE_HW_RX_RC_PKT_CNT, 871 HNS_ROCE_HW_RX_UC_PKT_CNT, 872 HNS_ROCE_HW_RX_UD_PKT_CNT, 873 HNS_ROCE_HW_RX_XRC_PKT_CNT, 874 HNS_ROCE_HW_RX_PKT_CNT, 875 HNS_ROCE_HW_RX_ERR_PKT_CNT, 876 HNS_ROCE_HW_RX_CNP_PKT_CNT, 877 HNS_ROCE_HW_TX_RC_PKT_CNT, 878 HNS_ROCE_HW_TX_UC_PKT_CNT, 879 HNS_ROCE_HW_TX_UD_PKT_CNT, 880 HNS_ROCE_HW_TX_XRC_PKT_CNT, 881 HNS_ROCE_HW_TX_PKT_CNT, 882 HNS_ROCE_HW_TX_ERR_PKT_CNT, 883 HNS_ROCE_HW_TX_CNP_PKT_CNT, 884 HNS_ROCE_HW_TRP_GET_MPT_ERR_PKT_CNT, 885 HNS_ROCE_HW_TRP_GET_IRRL_ERR_PKT_CNT, 886 HNS_ROCE_HW_ECN_DB_CNT, 887 HNS_ROCE_HW_RX_BUF_CNT, 888 HNS_ROCE_HW_TRP_RX_SOF_CNT, 889 HNS_ROCE_HW_CQ_CQE_CNT, 890 HNS_ROCE_HW_CQ_POE_CNT, 891 HNS_ROCE_HW_CQ_NOTIFY_CNT, 892 HNS_ROCE_HW_CNT_TOTAL 893 }; 894 895 enum hns_roce_sw_dfx_stat_index { 896 HNS_ROCE_DFX_AEQE_CNT, 897 HNS_ROCE_DFX_CEQE_CNT, 898 HNS_ROCE_DFX_CMDS_CNT, 899 HNS_ROCE_DFX_CMDS_ERR_CNT, 900 HNS_ROCE_DFX_MBX_POSTED_CNT, 901 HNS_ROCE_DFX_MBX_POLLED_CNT, 902 HNS_ROCE_DFX_MBX_EVENT_CNT, 903 HNS_ROCE_DFX_QP_CREATE_ERR_CNT, 904 HNS_ROCE_DFX_QP_MODIFY_ERR_CNT, 905 HNS_ROCE_DFX_CQ_CREATE_ERR_CNT, 906 HNS_ROCE_DFX_CQ_MODIFY_ERR_CNT, 907 HNS_ROCE_DFX_SRQ_CREATE_ERR_CNT, 908 HNS_ROCE_DFX_SRQ_MODIFY_ERR_CNT, 909 HNS_ROCE_DFX_XRCD_ALLOC_ERR_CNT, 910 HNS_ROCE_DFX_MR_REG_ERR_CNT, 911 HNS_ROCE_DFX_MR_REREG_ERR_CNT, 912 HNS_ROCE_DFX_AH_CREATE_ERR_CNT, 913 HNS_ROCE_DFX_MMAP_ERR_CNT, 914 HNS_ROCE_DFX_UCTX_ALLOC_ERR_CNT, 915 HNS_ROCE_DFX_CNT_TOTAL 916 }; 917 918 struct hns_roce_hw { 919 int (*cmq_init)(struct hns_roce_dev *hr_dev); 920 void (*cmq_exit)(struct hns_roce_dev *hr_dev); 921 int (*hw_profile)(struct hns_roce_dev *hr_dev); 922 int (*hw_init)(struct hns_roce_dev *hr_dev); 923 void (*hw_exit)(struct hns_roce_dev *hr_dev); 924 int (*post_mbox)(struct hns_roce_dev *hr_dev, 925 struct hns_roce_mbox_msg *mbox_msg); 926 int (*poll_mbox_done)(struct hns_roce_dev *hr_dev); 927 bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy); 928 int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index, 929 const union ib_gid *gid, const struct ib_gid_attr *attr); 930 int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, 931 const u8 *addr); 932 int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf, 933 struct hns_roce_mr *mr); 934 int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev, 935 struct hns_roce_mr *mr, int flags, 936 void *mb_buf); 937 int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr); 938 void (*write_cqc)(struct hns_roce_dev *hr_dev, 939 struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, 940 dma_addr_t dma_handle); 941 int (*set_hem)(struct hns_roce_dev *hr_dev, 942 struct hns_roce_hem_table *table, int obj, u32 step_idx); 943 int (*clear_hem)(struct hns_roce_dev *hr_dev, 944 struct hns_roce_hem_table *table, int obj, 945 u32 step_idx); 946 int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr, 947 int attr_mask, enum ib_qp_state cur_state, 948 enum ib_qp_state new_state, struct ib_udata *udata); 949 int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev, 950 struct hns_roce_qp *hr_qp); 951 void (*dereg_mr)(struct hns_roce_dev *hr_dev); 952 int (*init_eq)(struct hns_roce_dev *hr_dev); 953 void (*cleanup_eq)(struct hns_roce_dev *hr_dev); 954 int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf); 955 int (*query_cqc)(struct hns_roce_dev *hr_dev, u32 cqn, void *buffer); 956 int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer); 957 int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer); 958 int (*query_srqc)(struct hns_roce_dev *hr_dev, u32 srqn, void *buffer); 959 int (*query_sccc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer); 960 int (*query_hw_counter)(struct hns_roce_dev *hr_dev, 961 u64 *stats, u32 port, int *hw_counters); 962 int (*get_dscp)(struct hns_roce_dev *hr_dev, u8 dscp, 963 u8 *tc_mode, u8 *priority); 964 const struct ib_device_ops *hns_roce_dev_ops; 965 const struct ib_device_ops *hns_roce_dev_srq_ops; 966 }; 967 968 struct hns_roce_dev { 969 struct ib_device ib_dev; 970 struct pci_dev *pci_dev; 971 struct device *dev; 972 struct hns_roce_uar priv_uar; 973 const char *irq_names[HNS_ROCE_MAX_IRQ_NUM]; 974 spinlock_t sm_lock; 975 bool active; 976 bool is_reset; 977 bool dis_db; 978 unsigned long reset_cnt; 979 struct hns_roce_ib_iboe iboe; 980 enum hns_roce_device_state state; 981 struct list_head qp_list; /* list of all qps on this dev */ 982 spinlock_t qp_list_lock; /* protect qp_list */ 983 984 struct list_head pgdir_list; 985 struct mutex pgdir_mutex; 986 int irq[HNS_ROCE_MAX_IRQ_NUM]; 987 u8 __iomem *reg_base; 988 void __iomem *mem_base; 989 struct hns_roce_caps caps; 990 struct xarray qp_table_xa; 991 992 unsigned char dev_addr[HNS_ROCE_MAX_PORTS][ETH_ALEN]; 993 u64 sys_image_guid; 994 u32 vendor_id; 995 u32 vendor_part_id; 996 u32 hw_rev; 997 void __iomem *priv_addr; 998 999 struct hns_roce_cmdq cmd; 1000 struct hns_roce_ida pd_ida; 1001 struct hns_roce_ida xrcd_ida; 1002 struct hns_roce_ida uar_ida; 1003 struct hns_roce_mr_table mr_table; 1004 struct hns_roce_cq_table cq_table; 1005 struct hns_roce_srq_table srq_table; 1006 struct hns_roce_qp_table qp_table; 1007 struct hns_roce_eq_table eq_table; 1008 struct hns_roce_hem_table qpc_timer_table; 1009 struct hns_roce_hem_table cqc_timer_table; 1010 /* GMV is the memory area that the driver allocates for the hardware 1011 * to store SGID, SMAC and VLAN information. 1012 */ 1013 struct hns_roce_hem_table gmv_table; 1014 1015 int cmd_mod; 1016 int loop_idc; 1017 u32 sdb_offset; 1018 u32 odb_offset; 1019 const struct hns_roce_hw *hw; 1020 void *priv; 1021 struct workqueue_struct *irq_workq; 1022 struct work_struct ecc_work; 1023 u32 func_num; 1024 u32 is_vf; 1025 u32 cong_algo_tmpl_id; 1026 u64 dwqe_page; 1027 struct hns_roce_dev_debugfs dbgfs; 1028 atomic64_t *dfx_cnt; 1029 }; 1030 1031 enum hns_roce_trace_type { 1032 TRACE_SQ, 1033 TRACE_RQ, 1034 TRACE_SRQ, 1035 }; 1036 1037 static inline const char *trace_type_to_str(enum hns_roce_trace_type type) 1038 { 1039 switch (type) { 1040 case TRACE_SQ: 1041 return "SQ"; 1042 case TRACE_RQ: 1043 return "RQ"; 1044 case TRACE_SRQ: 1045 return "SRQ"; 1046 default: 1047 return "UNKNOWN"; 1048 } 1049 } 1050 1051 static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev) 1052 { 1053 return container_of(ib_dev, struct hns_roce_dev, ib_dev); 1054 } 1055 1056 static inline struct hns_roce_ucontext 1057 *to_hr_ucontext(struct ib_ucontext *ibucontext) 1058 { 1059 return container_of(ibucontext, struct hns_roce_ucontext, ibucontext); 1060 } 1061 1062 static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd) 1063 { 1064 return container_of(ibpd, struct hns_roce_pd, ibpd); 1065 } 1066 1067 static inline struct hns_roce_xrcd *to_hr_xrcd(struct ib_xrcd *ibxrcd) 1068 { 1069 return container_of(ibxrcd, struct hns_roce_xrcd, ibxrcd); 1070 } 1071 1072 static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah) 1073 { 1074 return container_of(ibah, struct hns_roce_ah, ibah); 1075 } 1076 1077 static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr) 1078 { 1079 return container_of(ibmr, struct hns_roce_mr, ibmr); 1080 } 1081 1082 static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp) 1083 { 1084 return container_of(ibqp, struct hns_roce_qp, ibqp); 1085 } 1086 1087 static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq) 1088 { 1089 return container_of(ib_cq, struct hns_roce_cq, ib_cq); 1090 } 1091 1092 static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq) 1093 { 1094 return container_of(ibsrq, struct hns_roce_srq, ibsrq); 1095 } 1096 1097 static inline struct hns_user_mmap_entry * 1098 to_hns_mmap(struct rdma_user_mmap_entry *rdma_entry) 1099 { 1100 return container_of(rdma_entry, struct hns_user_mmap_entry, rdma_entry); 1101 } 1102 1103 static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest) 1104 { 1105 writeq(*(u64 *)val, dest); 1106 } 1107 1108 static inline struct hns_roce_qp 1109 *__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn) 1110 { 1111 return xa_load(&hr_dev->qp_table_xa, qpn); 1112 } 1113 1114 static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, 1115 unsigned int offset) 1116 { 1117 return (char *)(buf->trunk_list[offset >> buf->trunk_shift].buf) + 1118 (offset & ((1 << buf->trunk_shift) - 1)); 1119 } 1120 1121 static inline dma_addr_t hns_roce_buf_dma_addr(struct hns_roce_buf *buf, 1122 unsigned int offset) 1123 { 1124 return buf->trunk_list[offset >> buf->trunk_shift].map + 1125 (offset & ((1 << buf->trunk_shift) - 1)); 1126 } 1127 1128 static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, u32 idx) 1129 { 1130 return hns_roce_buf_dma_addr(buf, idx << buf->page_shift); 1131 } 1132 1133 #define hr_hw_page_align(x) ALIGN(x, 1 << HNS_HW_PAGE_SHIFT) 1134 1135 static inline u64 to_hr_hw_page_addr(u64 addr) 1136 { 1137 return addr >> HNS_HW_PAGE_SHIFT; 1138 } 1139 1140 static inline u32 to_hr_hw_page_shift(u32 page_shift) 1141 { 1142 return page_shift - HNS_HW_PAGE_SHIFT; 1143 } 1144 1145 static inline u32 to_hr_hem_hopnum(u32 hopnum, u32 count) 1146 { 1147 if (count > 0) 1148 return hopnum == HNS_ROCE_HOP_NUM_0 ? 0 : hopnum; 1149 1150 return 0; 1151 } 1152 1153 static inline u32 to_hr_hem_entries_size(u32 count, u32 buf_shift) 1154 { 1155 return hr_hw_page_align(count << buf_shift); 1156 } 1157 1158 static inline u32 to_hr_hem_entries_count(u32 count, u32 buf_shift) 1159 { 1160 return hr_hw_page_align(count << buf_shift) >> buf_shift; 1161 } 1162 1163 static inline u32 to_hr_hem_entries_shift(u32 count, u32 buf_shift) 1164 { 1165 if (!count) 1166 return 0; 1167 1168 return ilog2(to_hr_hem_entries_count(count, buf_shift)); 1169 } 1170 1171 #define DSCP_SHIFT 2 1172 1173 static inline u8 get_tclass(const struct ib_global_route *grh) 1174 { 1175 return grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP ? 1176 grh->traffic_class >> DSCP_SHIFT : grh->traffic_class; 1177 } 1178 1179 static inline struct net_device *get_hr_netdev(struct hns_roce_dev *hr_dev, 1180 u8 port) 1181 { 1182 return hr_dev->iboe.netdevs[port]; 1183 } 1184 1185 static inline u8 get_hr_bus_num(struct hns_roce_dev *hr_dev) 1186 { 1187 return hr_dev->pci_dev->bus->number; 1188 } 1189 1190 void hns_roce_init_uar_table(struct hns_roce_dev *dev); 1191 int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar); 1192 1193 int hns_roce_cmd_init(struct hns_roce_dev *hr_dev); 1194 void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev); 1195 void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status, 1196 u64 out_param); 1197 int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev); 1198 void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev); 1199 1200 /* hns roce hw need current block and next block addr from mtt */ 1201 #define MTT_MIN_COUNT 2 1202 static inline dma_addr_t hns_roce_get_mtr_ba(struct hns_roce_mtr *mtr) 1203 { 1204 return mtr->hem_cfg.root_ba; 1205 } 1206 1207 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, 1208 u32 offset, u64 *mtt_buf, int mtt_max); 1209 int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, 1210 struct hns_roce_buf_attr *buf_attr, 1211 unsigned int page_shift, struct ib_udata *udata, 1212 unsigned long user_addr); 1213 void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, 1214 struct hns_roce_mtr *mtr); 1215 int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, 1216 dma_addr_t *pages, unsigned int page_cnt); 1217 1218 void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev); 1219 void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev); 1220 void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev); 1221 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev); 1222 void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev); 1223 void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev); 1224 1225 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev); 1226 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev); 1227 1228 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev); 1229 1230 int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, 1231 struct ib_udata *udata); 1232 int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); 1233 static inline int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags) 1234 { 1235 return 0; 1236 } 1237 1238 int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); 1239 int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); 1240 1241 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc); 1242 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 1243 u64 virt_addr, int access_flags, 1244 struct ib_dmah *dmah, 1245 struct ib_udata *udata); 1246 struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, 1247 u64 length, u64 virt_addr, 1248 int mr_access_flags, struct ib_pd *pd, 1249 struct ib_udata *udata); 1250 struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 1251 u32 max_num_sg); 1252 int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, 1253 unsigned int *sg_offset); 1254 int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); 1255 unsigned long key_to_hw_index(u32 key); 1256 1257 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf); 1258 struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, 1259 u32 page_shift, u32 flags); 1260 1261 int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, 1262 int buf_cnt, struct hns_roce_buf *buf, 1263 unsigned int page_shift); 1264 int hns_roce_get_umem_bufs(dma_addr_t *bufs, 1265 int buf_cnt, struct ib_umem *umem, 1266 unsigned int page_shift); 1267 1268 int hns_roce_create_srq(struct ib_srq *srq, 1269 struct ib_srq_init_attr *srq_init_attr, 1270 struct ib_udata *udata); 1271 int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata); 1272 1273 int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata); 1274 int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata); 1275 1276 int hns_roce_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *init_attr, 1277 struct ib_udata *udata); 1278 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1279 int attr_mask, struct ib_udata *udata); 1280 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); 1281 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n); 1282 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n); 1283 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n); 1284 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq, 1285 struct ib_cq *ib_cq); 1286 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, 1287 struct hns_roce_cq *recv_cq); 1288 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, 1289 struct hns_roce_cq *recv_cq); 1290 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); 1291 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 1292 struct ib_udata *udata); 1293 __be32 send_ieth(const struct ib_send_wr *wr); 1294 int to_hr_qp_type(int qp_type); 1295 1296 int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, 1297 struct uverbs_attr_bundle *attrs); 1298 1299 int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); 1300 int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt, 1301 struct hns_roce_db *db); 1302 void hns_roce_db_unmap_user(struct hns_roce_ucontext *context, 1303 struct hns_roce_db *db); 1304 int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db, 1305 int order); 1306 void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db); 1307 1308 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn); 1309 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type); 1310 void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp); 1311 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type); 1312 void hns_roce_flush_cqe(struct hns_roce_dev *hr_dev, u32 qpn); 1313 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type); 1314 void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev); 1315 int hns_roce_init(struct hns_roce_dev *hr_dev); 1316 void hns_roce_exit(struct hns_roce_dev *hr_dev, bool bond_cleanup); 1317 int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq); 1318 int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq); 1319 int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp); 1320 int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp); 1321 int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr); 1322 int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr); 1323 int hns_roce_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq); 1324 int hns_roce_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq); 1325 struct hns_user_mmap_entry * 1326 hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, 1327 size_t length, 1328 enum hns_roce_mmap_type mmap_type); 1329 bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl); 1330 void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx); 1331 void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx); 1332 1333 #endif /* _HNS_ROCE_DEVICE_H */ 1334