1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef _HNS_ROCE_DEVICE_H 34 #define _HNS_ROCE_DEVICE_H 35 36 #include <rdma/ib_verbs.h> 37 38 #define DRV_NAME "hns_roce" 39 40 #define MAC_ADDR_OCTET_NUM 6 41 #define HNS_ROCE_MAX_MSG_LEN 0x80000000 42 43 #define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b)) 44 45 #define HNS_ROCE_IB_MIN_SQ_STRIDE 6 46 47 #define HNS_ROCE_BA_SIZE (32 * 4096) 48 49 /* Hardware specification only for v1 engine */ 50 #define HNS_ROCE_MIN_CQE_NUM 0x40 51 #define HNS_ROCE_MIN_WQE_NUM 0x20 52 53 /* Hardware specification only for v1 engine */ 54 #define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7 55 #define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000 56 57 #define HNS_ROCE_MAX_IRQ_NUM 34 58 59 #define HNS_ROCE_COMP_VEC_NUM 32 60 61 #define HNS_ROCE_AEQE_VEC_NUM 1 62 #define HNS_ROCE_AEQE_OF_VEC_NUM 1 63 64 /* 4G/4K = 1M */ 65 #define HNS_ROCE_SL_SHIFT 29 66 #define HNS_ROCE_TCLASS_SHIFT 20 67 #define HNS_ROCE_FLOW_LABLE_MASK 0xfffff 68 69 #define HNS_ROCE_MAX_PORTS 6 70 #define HNS_ROCE_MAX_GID_NUM 16 71 #define HNS_ROCE_GID_SIZE 16 72 73 #define MR_TYPE_MR 0x00 74 #define MR_TYPE_DMA 0x03 75 76 #define PKEY_ID 0xffff 77 #define NODE_DESC_SIZE 64 78 79 #define SERV_TYPE_RC 0 80 #define SERV_TYPE_RD 1 81 #define SERV_TYPE_UC 2 82 #define SERV_TYPE_UD 3 83 84 #define PAGES_SHIFT_8 8 85 #define PAGES_SHIFT_16 16 86 #define PAGES_SHIFT_24 24 87 #define PAGES_SHIFT_32 32 88 89 enum hns_roce_qp_state { 90 HNS_ROCE_QP_STATE_RST, 91 HNS_ROCE_QP_STATE_INIT, 92 HNS_ROCE_QP_STATE_RTR, 93 HNS_ROCE_QP_STATE_RTS, 94 HNS_ROCE_QP_STATE_SQD, 95 HNS_ROCE_QP_STATE_ERR, 96 HNS_ROCE_QP_NUM_STATE, 97 }; 98 99 enum hns_roce_event { 100 HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01, 101 HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02, 102 HNS_ROCE_EVENT_TYPE_COMM_EST = 0x03, 103 HNS_ROCE_EVENT_TYPE_SQ_DRAINED = 0x04, 104 HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, 105 HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR = 0x06, 106 HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR = 0x07, 107 HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH = 0x08, 108 HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH = 0x09, 109 HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR = 0x0a, 110 HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR = 0x0b, 111 HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW = 0x0c, 112 HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID = 0x0d, 113 HNS_ROCE_EVENT_TYPE_PORT_CHANGE = 0x0f, 114 /* 0x10 and 0x11 is unused in currently application case */ 115 HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12, 116 HNS_ROCE_EVENT_TYPE_MB = 0x13, 117 HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW = 0x14, 118 }; 119 120 /* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */ 121 enum { 122 HNS_ROCE_LWQCE_QPC_ERROR = 1, 123 HNS_ROCE_LWQCE_MTU_ERROR = 2, 124 HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR = 3, 125 HNS_ROCE_LWQCE_WQE_ADDR_ERROR = 4, 126 HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR = 5, 127 HNS_ROCE_LWQCE_SL_ERROR = 6, 128 HNS_ROCE_LWQCE_PORT_ERROR = 7, 129 }; 130 131 /* Local Access Violation Work Queue Error,SUBTYPE 0x7 */ 132 enum { 133 HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1, 134 HNS_ROCE_LAVWQE_LENGTH_ERROR = 2, 135 HNS_ROCE_LAVWQE_VA_ERROR = 3, 136 HNS_ROCE_LAVWQE_PD_ERROR = 4, 137 HNS_ROCE_LAVWQE_RW_ACC_ERROR = 5, 138 HNS_ROCE_LAVWQE_KEY_STATE_ERROR = 6, 139 HNS_ROCE_LAVWQE_MR_OPERATION_ERROR = 7, 140 }; 141 142 /* DOORBELL overflow subtype */ 143 enum { 144 HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1, 145 HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF = 2, 146 HNS_ROCE_DB_SUBTYPE_ODB_OVF = 3, 147 HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF = 4, 148 HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP = 5, 149 HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP = 6, 150 }; 151 152 enum { 153 /* RQ&SRQ related operations */ 154 HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06, 155 HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07, 156 }; 157 158 #define HNS_ROCE_CMD_SUCCESS 1 159 160 #define HNS_ROCE_PORT_DOWN 0 161 #define HNS_ROCE_PORT_UP 1 162 163 #define HNS_ROCE_MTT_ENTRY_PER_SEG 8 164 165 #define PAGE_ADDR_SHIFT 12 166 167 struct hns_roce_uar { 168 u64 pfn; 169 unsigned long index; 170 }; 171 172 struct hns_roce_ucontext { 173 struct ib_ucontext ibucontext; 174 struct hns_roce_uar uar; 175 }; 176 177 struct hns_roce_pd { 178 struct ib_pd ibpd; 179 unsigned long pdn; 180 }; 181 182 struct hns_roce_bitmap { 183 /* Bitmap Traversal last a bit which is 1 */ 184 unsigned long last; 185 unsigned long top; 186 unsigned long max; 187 unsigned long reserved_top; 188 unsigned long mask; 189 spinlock_t lock; 190 unsigned long *table; 191 }; 192 193 /* Order bitmap length -- bit num compute formula: 1 << (max_order - order) */ 194 /* Order = 0: bitmap is biggest, order = max bitmap is least (only a bit) */ 195 /* Every bit repesent to a partner free/used status in bitmap */ 196 /* 197 * Initial, bits of other bitmap are all 0 except that a bit of max_order is 1 198 * Bit = 1 represent to idle and available; bit = 0: not available 199 */ 200 struct hns_roce_buddy { 201 /* Members point to every order level bitmap */ 202 unsigned long **bits; 203 /* Represent to avail bits of the order level bitmap */ 204 u32 *num_free; 205 int max_order; 206 spinlock_t lock; 207 }; 208 209 /* For Hardware Entry Memory */ 210 struct hns_roce_hem_table { 211 /* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */ 212 u32 type; 213 /* HEM array elment num */ 214 unsigned long num_hem; 215 /* HEM entry record obj total num */ 216 unsigned long num_obj; 217 /*Single obj size */ 218 unsigned long obj_size; 219 int lowmem; 220 struct mutex mutex; 221 struct hns_roce_hem **hem; 222 }; 223 224 struct hns_roce_mtt { 225 unsigned long first_seg; 226 int order; 227 int page_shift; 228 }; 229 230 /* Only support 4K page size for mr register */ 231 #define MR_SIZE_4K 0 232 233 struct hns_roce_mr { 234 struct ib_mr ibmr; 235 struct ib_umem *umem; 236 u64 iova; /* MR's virtual orignal addr */ 237 u64 size; /* Address range of MR */ 238 u32 key; /* Key of MR */ 239 u32 pd; /* PD num of MR */ 240 u32 access;/* Access permission of MR */ 241 int enabled; /* MR's active status */ 242 int type; /* MR's register type */ 243 u64 *pbl_buf;/* MR's PBL space */ 244 dma_addr_t pbl_dma_addr; /* MR's PBL space PA */ 245 }; 246 247 struct hns_roce_mr_table { 248 struct hns_roce_bitmap mtpt_bitmap; 249 struct hns_roce_buddy mtt_buddy; 250 struct hns_roce_hem_table mtt_table; 251 struct hns_roce_hem_table mtpt_table; 252 }; 253 254 struct hns_roce_wq { 255 u64 *wrid; /* Work request ID */ 256 spinlock_t lock; 257 int wqe_cnt; /* WQE num */ 258 u32 max_post; 259 int max_gs; 260 int offset; 261 int wqe_shift;/* WQE size */ 262 u32 head; 263 u32 tail; 264 void __iomem *db_reg_l; 265 }; 266 267 struct hns_roce_buf_list { 268 void *buf; 269 dma_addr_t map; 270 }; 271 272 struct hns_roce_buf { 273 struct hns_roce_buf_list direct; 274 struct hns_roce_buf_list *page_list; 275 int nbufs; 276 u32 npages; 277 int page_shift; 278 }; 279 280 struct hns_roce_cq_buf { 281 struct hns_roce_buf hr_buf; 282 struct hns_roce_mtt hr_mtt; 283 }; 284 285 struct hns_roce_cq_resize { 286 struct hns_roce_cq_buf hr_buf; 287 int cqe; 288 }; 289 290 struct hns_roce_cq { 291 struct ib_cq ib_cq; 292 struct hns_roce_cq_buf hr_buf; 293 /* pointer to store information after resize*/ 294 struct hns_roce_cq_resize *hr_resize_buf; 295 spinlock_t lock; 296 struct mutex resize_mutex; 297 struct ib_umem *umem; 298 struct ib_umem *resize_umem; 299 void (*comp)(struct hns_roce_cq *); 300 void (*event)(struct hns_roce_cq *, enum hns_roce_event); 301 302 struct hns_roce_uar *uar; 303 u32 cq_depth; 304 u32 cons_index; 305 void __iomem *cq_db_l; 306 void __iomem *tptr_addr; 307 unsigned long cqn; 308 u32 vector; 309 atomic_t refcount; 310 struct completion free; 311 }; 312 313 struct hns_roce_srq { 314 struct ib_srq ibsrq; 315 int srqn; 316 }; 317 318 struct hns_roce_uar_table { 319 struct hns_roce_bitmap bitmap; 320 }; 321 322 struct hns_roce_qp_table { 323 struct hns_roce_bitmap bitmap; 324 spinlock_t lock; 325 struct hns_roce_hem_table qp_table; 326 struct hns_roce_hem_table irrl_table; 327 }; 328 329 struct hns_roce_cq_table { 330 struct hns_roce_bitmap bitmap; 331 spinlock_t lock; 332 struct radix_tree_root tree; 333 struct hns_roce_hem_table table; 334 }; 335 336 struct hns_roce_raq_table { 337 struct hns_roce_buf_list *e_raq_buf; 338 }; 339 340 struct hns_roce_av { 341 __le32 port_pd; 342 u8 gid_index; 343 u8 stat_rate; 344 u8 hop_limit; 345 __le32 sl_tclass_flowlabel; 346 u8 dgid[HNS_ROCE_GID_SIZE]; 347 u8 mac[6]; 348 __le16 vlan; 349 }; 350 351 struct hns_roce_ah { 352 struct ib_ah ibah; 353 struct hns_roce_av av; 354 }; 355 356 struct hns_roce_cmd_context { 357 struct completion done; 358 int result; 359 int next; 360 u64 out_param; 361 u16 token; 362 }; 363 364 struct hns_roce_cmdq { 365 struct dma_pool *pool; 366 u8 __iomem *hcr; 367 struct mutex hcr_mutex; 368 struct semaphore poll_sem; 369 /* 370 * Event mode: cmd register mutex protection, 371 * ensure to not exceed max_cmds and user use limit region 372 */ 373 struct semaphore event_sem; 374 int max_cmds; 375 spinlock_t context_lock; 376 int free_head; 377 struct hns_roce_cmd_context *context; 378 /* 379 * Result of get integer part 380 * which max_comds compute according a power of 2 381 */ 382 u16 token_mask; 383 /* 384 * Process whether use event mode, init default non-zero 385 * After the event queue of cmd event ready, 386 * can switch into event mode 387 * close device, switch into poll mode(non event mode) 388 */ 389 u8 use_events; 390 u8 toggle; 391 }; 392 393 struct hns_roce_dev; 394 395 struct hns_roce_qp { 396 struct ib_qp ibqp; 397 struct hns_roce_buf hr_buf; 398 struct hns_roce_wq rq; 399 __le64 doorbell_qpn; 400 __le32 sq_signal_bits; 401 u32 sq_next_wqe; 402 int sq_max_wqes_per_wr; 403 int sq_spare_wqes; 404 struct hns_roce_wq sq; 405 406 struct ib_umem *umem; 407 struct hns_roce_mtt mtt; 408 u32 buff_size; 409 struct mutex mutex; 410 u8 port; 411 u8 sl; 412 u8 resp_depth; 413 u8 state; 414 u32 access_flags; 415 u32 pkey_index; 416 void (*event)(struct hns_roce_qp *, 417 enum hns_roce_event); 418 unsigned long qpn; 419 420 atomic_t refcount; 421 struct completion free; 422 }; 423 424 struct hns_roce_sqp { 425 struct hns_roce_qp hr_qp; 426 }; 427 428 struct hns_roce_ib_iboe { 429 spinlock_t lock; 430 struct net_device *netdevs[HNS_ROCE_MAX_PORTS]; 431 struct notifier_block nb; 432 struct notifier_block nb_inet; 433 /* 16 GID is shared by 6 port in v1 engine. */ 434 union ib_gid gid_table[HNS_ROCE_MAX_GID_NUM]; 435 u8 phy_port[HNS_ROCE_MAX_PORTS]; 436 }; 437 438 struct hns_roce_eq { 439 struct hns_roce_dev *hr_dev; 440 void __iomem *doorbell; 441 442 int type_flag;/* Aeq:1 ceq:0 */ 443 int eqn; 444 u32 entries; 445 int log_entries; 446 int eqe_size; 447 int irq; 448 int log_page_size; 449 int cons_index; 450 struct hns_roce_buf_list *buf_list; 451 }; 452 453 struct hns_roce_eq_table { 454 struct hns_roce_eq *eq; 455 void __iomem **eqc_base; 456 }; 457 458 struct hns_roce_caps { 459 u8 num_ports; 460 int gid_table_len[HNS_ROCE_MAX_PORTS]; 461 int pkey_table_len[HNS_ROCE_MAX_PORTS]; 462 int local_ca_ack_delay; 463 int num_uars; 464 u32 phy_num_uars; 465 u32 max_sq_sg; /* 2 */ 466 u32 max_sq_inline; /* 32 */ 467 u32 max_rq_sg; /* 2 */ 468 int num_qps; /* 256k */ 469 u32 max_wqes; /* 16k */ 470 u32 max_sq_desc_sz; /* 64 */ 471 u32 max_rq_desc_sz; /* 64 */ 472 int max_qp_init_rdma; 473 int max_qp_dest_rdma; 474 int sqp_start; 475 int num_cqs; 476 int max_cqes; 477 int reserved_cqs; 478 int num_aeq_vectors; /* 1 */ 479 int num_comp_vectors; /* 32 ceq */ 480 int num_other_vectors; 481 int num_mtpts; 482 u32 num_mtt_segs; 483 int reserved_mrws; 484 int reserved_uars; 485 int num_pds; 486 int reserved_pds; 487 u32 mtt_entry_sz; 488 u32 cq_entry_sz; 489 u32 page_size_cap; 490 u32 reserved_lkey; 491 int mtpt_entry_sz; 492 int qpc_entry_sz; 493 int irrl_entry_sz; 494 int cqc_entry_sz; 495 int aeqe_depth; 496 int ceqe_depth[HNS_ROCE_COMP_VEC_NUM]; 497 enum ib_mtu max_mtu; 498 }; 499 500 struct hns_roce_hw { 501 int (*reset)(struct hns_roce_dev *hr_dev, bool enable); 502 void (*hw_profile)(struct hns_roce_dev *hr_dev); 503 int (*hw_init)(struct hns_roce_dev *hr_dev); 504 void (*hw_exit)(struct hns_roce_dev *hr_dev); 505 void (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index, 506 union ib_gid *gid); 507 void (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr); 508 void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port, 509 enum ib_mtu mtu); 510 int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr, 511 unsigned long mtpt_idx); 512 void (*write_cqc)(struct hns_roce_dev *hr_dev, 513 struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, 514 dma_addr_t dma_handle, int nent, u32 vector); 515 int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 516 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); 517 int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr, 518 int attr_mask, enum ib_qp_state cur_state, 519 enum ib_qp_state new_state); 520 int (*destroy_qp)(struct ib_qp *ibqp); 521 int (*post_send)(struct ib_qp *ibqp, struct ib_send_wr *wr, 522 struct ib_send_wr **bad_wr); 523 int (*post_recv)(struct ib_qp *qp, struct ib_recv_wr *recv_wr, 524 struct ib_recv_wr **bad_recv_wr); 525 int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); 526 int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 527 void *priv; 528 }; 529 530 struct hns_roce_dev { 531 struct ib_device ib_dev; 532 struct platform_device *pdev; 533 struct hns_roce_uar priv_uar; 534 const char *irq_names[HNS_ROCE_MAX_IRQ_NUM]; 535 spinlock_t sm_lock; 536 spinlock_t cq_db_lock; 537 spinlock_t bt_cmd_lock; 538 struct hns_roce_ib_iboe iboe; 539 540 int irq[HNS_ROCE_MAX_IRQ_NUM]; 541 u8 __iomem *reg_base; 542 struct hns_roce_caps caps; 543 struct radix_tree_root qp_table_tree; 544 545 unsigned char dev_addr[HNS_ROCE_MAX_PORTS][MAC_ADDR_OCTET_NUM]; 546 u64 sys_image_guid; 547 u32 vendor_id; 548 u32 vendor_part_id; 549 u32 hw_rev; 550 void __iomem *priv_addr; 551 552 struct hns_roce_cmdq cmd; 553 struct hns_roce_bitmap pd_bitmap; 554 struct hns_roce_uar_table uar_table; 555 struct hns_roce_mr_table mr_table; 556 struct hns_roce_cq_table cq_table; 557 struct hns_roce_qp_table qp_table; 558 struct hns_roce_eq_table eq_table; 559 560 int cmd_mod; 561 int loop_idc; 562 struct hns_roce_hw *hw; 563 }; 564 565 static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev) 566 { 567 return container_of(ib_dev, struct hns_roce_dev, ib_dev); 568 } 569 570 static inline struct hns_roce_ucontext 571 *to_hr_ucontext(struct ib_ucontext *ibucontext) 572 { 573 return container_of(ibucontext, struct hns_roce_ucontext, ibucontext); 574 } 575 576 static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd) 577 { 578 return container_of(ibpd, struct hns_roce_pd, ibpd); 579 } 580 581 static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah) 582 { 583 return container_of(ibah, struct hns_roce_ah, ibah); 584 } 585 586 static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr) 587 { 588 return container_of(ibmr, struct hns_roce_mr, ibmr); 589 } 590 591 static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp) 592 { 593 return container_of(ibqp, struct hns_roce_qp, ibqp); 594 } 595 596 static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq) 597 { 598 return container_of(ib_cq, struct hns_roce_cq, ib_cq); 599 } 600 601 static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq) 602 { 603 return container_of(ibsrq, struct hns_roce_srq, ibsrq); 604 } 605 606 static inline struct hns_roce_sqp *hr_to_hr_sqp(struct hns_roce_qp *hr_qp) 607 { 608 return container_of(hr_qp, struct hns_roce_sqp, hr_qp); 609 } 610 611 static inline void hns_roce_write64_k(__be32 val[2], void __iomem *dest) 612 { 613 __raw_writeq(*(u64 *) val, dest); 614 } 615 616 static inline struct hns_roce_qp 617 *__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn) 618 { 619 return radix_tree_lookup(&hr_dev->qp_table_tree, 620 qpn & (hr_dev->caps.num_qps - 1)); 621 } 622 623 static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset) 624 { 625 u32 bits_per_long_val = BITS_PER_LONG; 626 627 if (bits_per_long_val == 64 || buf->nbufs == 1) 628 return (char *)(buf->direct.buf) + offset; 629 else 630 return (char *)(buf->page_list[offset >> PAGE_SHIFT].buf) + 631 (offset & (PAGE_SIZE - 1)); 632 } 633 634 int hns_roce_init_uar_table(struct hns_roce_dev *dev); 635 int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar); 636 void hns_roce_uar_free(struct hns_roce_dev *dev, struct hns_roce_uar *uar); 637 void hns_roce_cleanup_uar_table(struct hns_roce_dev *dev); 638 639 int hns_roce_cmd_init(struct hns_roce_dev *hr_dev); 640 void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev); 641 void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status, 642 u64 out_param); 643 int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev); 644 void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev); 645 646 int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, 647 struct hns_roce_mtt *mtt); 648 void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, 649 struct hns_roce_mtt *mtt); 650 int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev, 651 struct hns_roce_mtt *mtt, struct hns_roce_buf *buf); 652 653 int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev); 654 int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev); 655 int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev); 656 int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev); 657 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev); 658 659 void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev); 660 void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev); 661 void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev); 662 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev); 663 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev); 664 665 int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj); 666 void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj); 667 int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask, 668 u32 reserved_bot, u32 resetrved_top); 669 void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap); 670 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev); 671 int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt, 672 int align, unsigned long *obj); 673 void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap, 674 unsigned long obj, int cnt); 675 676 struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); 677 int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); 678 int hns_roce_destroy_ah(struct ib_ah *ah); 679 680 struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, 681 struct ib_ucontext *context, 682 struct ib_udata *udata); 683 int hns_roce_dealloc_pd(struct ib_pd *pd); 684 685 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc); 686 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 687 u64 virt_addr, int access_flags, 688 struct ib_udata *udata); 689 int hns_roce_dereg_mr(struct ib_mr *ibmr); 690 691 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size, 692 struct hns_roce_buf *buf); 693 int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, 694 struct hns_roce_buf *buf); 695 696 int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev, 697 struct hns_roce_mtt *mtt, struct ib_umem *umem); 698 699 struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd, 700 struct ib_qp_init_attr *init_attr, 701 struct ib_udata *udata); 702 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 703 int attr_mask, struct ib_udata *udata); 704 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n); 705 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n); 706 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, 707 struct ib_cq *ib_cq); 708 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state); 709 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, 710 struct hns_roce_cq *recv_cq); 711 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, 712 struct hns_roce_cq *recv_cq); 713 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); 714 void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); 715 void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, 716 int cnt); 717 __be32 send_ieth(struct ib_send_wr *wr); 718 int to_hr_qp_type(int qp_type); 719 720 struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, 721 const struct ib_cq_init_attr *attr, 722 struct ib_ucontext *context, 723 struct ib_udata *udata); 724 725 int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq); 726 727 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn); 728 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type); 729 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type); 730 int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index); 731 732 extern struct hns_roce_hw hns_roce_hw_v1; 733 734 #endif /* _HNS_ROCE_DEVICE_H */ 735