1 /* 2 * Copyright (c) 2018-2019 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #ifndef __QLNXR_CM_H__ 29 #define __QLNXR_CM_H__ 30 31 /* ECORE LL2 has a limit to the number of buffers it can handle. 32 * FYI, OFED used 512 and 128 for recv and send. 33 */ 34 #define QLNXR_GSI_MAX_RECV_WR (4096) 35 #define QLNXR_GSI_MAX_SEND_WR (4096) 36 37 #define QLNXR_GSI_MAX_RECV_SGE (1) /* LL2 FW limitation */ 38 39 /* future OFED/kernel will have these */ 40 #define ETH_P_ROCE (0x8915) 41 #define QLNXR_ROCE_V2_UDP_SPORT (0000) 42 43 #define rdma_wr(_wr) rdma_wr(_wr) 44 #define ud_wr(_wr) ud_wr(_wr) 45 #define atomic_wr(_wr) atomic_wr(_wr) 46 47 static inline u32 qlnxr_get_ipv4_from_gid(u8 *gid) 48 { 49 return *(u32 *)(void *)&gid[12]; 50 } 51 52 struct ecore_roce_ll2_header { 53 void *vaddr; 54 dma_addr_t baddr; 55 size_t len; 56 }; 57 58 struct ecore_roce_ll2_buffer { 59 dma_addr_t baddr; 60 size_t len; 61 }; 62 63 struct ecore_roce_ll2_packet { 64 struct ecore_roce_ll2_header header; 65 int n_seg; 66 struct ecore_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE]; 67 int roce_mode; 68 enum ecore_roce_ll2_tx_dest tx_dest; 69 }; 70 71 /* RDMA CM */ 72 73 extern int qlnxr_gsi_poll_cq(struct ib_cq *ibcq, 74 int num_entries, 75 struct ib_wc *wc); 76 77 extern int qlnxr_gsi_post_recv(struct ib_qp *ibqp, 78 const struct ib_recv_wr *wr, 79 const struct ib_recv_wr **bad_wr); 80 81 extern int qlnxr_gsi_post_send(struct ib_qp *ibqp, 82 const struct ib_send_wr *wr, 83 const struct ib_send_wr **bad_wr); 84 85 extern struct ib_qp* qlnxr_create_gsi_qp(struct qlnxr_dev *dev, 86 struct ib_qp_init_attr *attrs, 87 struct qlnxr_qp *qp); 88 89 extern void qlnxr_store_gsi_qp_cq(struct qlnxr_dev *dev, 90 struct qlnxr_qp *qp, 91 struct ib_qp_init_attr *attrs); 92 93 extern void qlnxr_inc_sw_gsi_cons(struct qlnxr_qp_hwq_info *info); 94 95 extern int qlnxr_destroy_gsi_qp(struct qlnxr_dev *dev); 96 97 #endif /* #ifndef __QLNXR_CM_H__ */ 98