1 /*- 2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB 3 * 4 * Copyright (c) 2021 - 2023 Intel Corporation 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenFabrics.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #ifndef FBSD_KCOMPAT_H 36 #define FBSD_KCOMPAT_H 37 #include "ice_rdma.h" 38 39 #define TASKLET_DATA_TYPE unsigned long 40 #define TASKLET_FUNC_TYPE void (*)(TASKLET_DATA_TYPE) 41 42 #ifndef tasklet_setup 43 #define tasklet_setup(tasklet, callback) \ 44 tasklet_init((tasklet), (TASKLET_FUNC_TYPE)(callback), \ 45 (TASKLET_DATA_TYPE)(tasklet)) 46 #endif 47 #ifndef from_tasklet 48 #define from_tasklet(var, callback_tasklet, tasklet_fieldname) \ 49 container_of(callback_tasklet, typeof(*var), tasklet_fieldname) 50 #endif 51 52 #if __FreeBSD_version >= 1400000 53 #define IRDMA_SET_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \ 54 (sizeof(struct drv_struct) + \ 55 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \ 56 BUILD_BUG_ON_ZERO( \ 57 !__same_type(((struct drv_struct *)NULL)->member, \ 58 struct ib_struct))) 59 #endif /* __FreeBSD_version > 1400000 */ 60 61 #define set_ibdev_dma_device(ibdev, dev) \ 62 ibdev.dma_device = (dev) 63 #define set_max_sge(props, rf) \ 64 ((props)->max_sge = (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags) 65 #define rdma_query_gid(ibdev, port, index, gid) \ 66 ib_get_cached_gid(ibdev, port, index, gid, NULL) 67 #define kmap(pg) page_address(pg) 68 #define kmap_local_page(pg) page_address(pg) 69 #define kunmap(pg) 70 #define kunmap_local(pg) 71 72 #define IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION IB_CQ_FLAGS_TIMESTAMP_COMPLETION 73 #if __FreeBSD_version < 1400026 74 #define kc_irdma_destroy_qp(ibqp, udata) irdma_destroy_qp(ibqp) 75 #else 76 #define kc_irdma_destroy_qp(ibqp, udata) irdma_destroy_qp(ibqp, udata) 77 #endif 78 #ifndef IB_QP_ATTR_STANDARD_BITS 79 #define IB_QP_ATTR_STANDARD_BITS GENMASK(20, 0) 80 #endif 81 82 #define IRDMA_QOS_MODE_VLAN 0x0 83 #define IRDMA_QOS_MODE_DSCP 0x1 84 85 #define IRDMA_VER_LEN 24 86 87 void kc_set_roce_uverbs_cmd_mask(struct irdma_device *iwdev); 88 void kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev); 89 90 struct irdma_tunable_info { 91 struct sysctl_ctx_list irdma_sysctl_ctx; 92 struct sysctl_oid *irdma_sysctl_tree; 93 struct sysctl_oid *sws_sysctl_tree; 94 char drv_ver[IRDMA_VER_LEN]; 95 u8 roce_ena; 96 }; 97 98 static inline int irdma_iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 99 u16 *pkey) 100 { 101 *pkey = 0; 102 return 0; 103 } 104 105 static inline int cq_validate_flags(u32 flags, u8 hw_rev) 106 { 107 /* GEN1 does not support CQ create flags */ 108 if (hw_rev == IRDMA_GEN_1) 109 return flags ? -EOPNOTSUPP : 0; 110 111 return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0; 112 } 113 static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo, 114 u32 *idx) 115 { 116 *idx += 1; 117 if (!(*pinfo) || *idx != (*pinfo)->cnt) 118 return ++pbl; 119 *idx = 0; 120 (*pinfo)++; 121 122 return (*pinfo)->addr; 123 } 124 125 static inline struct vnet * 126 irdma_cmid_to_vnet(struct iw_cm_id *cm_id) 127 { 128 struct rdma_cm_id *rdma_id; 129 130 if (!cm_id) 131 return &init_net; 132 133 rdma_id = (struct rdma_cm_id *)cm_id->context; 134 135 return rdma_id->route.addr.dev_addr.net; 136 } 137 138 #if __FreeBSD_version < 1400026 139 struct ib_cq *irdma_create_cq(struct ib_device *ibdev, 140 const struct ib_cq_init_attr *attr, 141 struct ib_ucontext *context, 142 struct ib_udata *udata); 143 #else 144 int irdma_create_cq(struct ib_cq *ibcq, 145 const struct ib_cq_init_attr *attr, 146 struct ib_udata *udata); 147 #endif 148 struct ib_qp *irdma_create_qp(struct ib_pd *ibpd, 149 struct ib_qp_init_attr *init_attr, 150 struct ib_udata *udata); 151 #if __FreeBSD_version >= 1400026 152 int irdma_create_ah(struct ib_ah *ib_ah, 153 struct ib_ah_attr *attr, u32 flags, 154 struct ib_udata *udata); 155 int irdma_create_ah_stub(struct ib_ah *ib_ah, 156 struct ib_ah_attr *attr, u32 flags, 157 struct ib_udata *udata); 158 #else 159 struct ib_ah *irdma_create_ah(struct ib_pd *ibpd, 160 struct ib_ah_attr *attr, 161 struct ib_udata *udata); 162 struct ib_ah *irdma_create_ah_stub(struct ib_pd *ibpd, 163 struct ib_ah_attr *attr, 164 struct ib_udata *udata); 165 #endif 166 void irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr); 167 168 #if __FreeBSD_version >= 1400026 169 void irdma_destroy_ah(struct ib_ah *ibah, u32 flags); 170 void irdma_destroy_ah_stub(struct ib_ah *ibah, u32 flags); 171 #else 172 int irdma_destroy_ah(struct ib_ah *ibah); 173 int irdma_destroy_ah_stub(struct ib_ah *ibah); 174 #endif 175 #if __FreeBSD_version < 1400026 176 int irdma_destroy_qp(struct ib_qp *ibqp); 177 #else 178 int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); 179 #endif 180 #if __FreeBSD_version < 1400026 181 int irdma_dereg_mr(struct ib_mr *ib_mr); 182 #else 183 int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); 184 #endif 185 int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u8 *speed, u8 *width); 186 enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev, 187 u8 port_num); 188 int irdma_roce_port_immutable(struct ib_device *ibdev, u8 port_num, 189 struct ib_port_immutable *immutable); 190 int irdma_iw_port_immutable(struct ib_device *ibdev, u8 port_num, 191 struct ib_port_immutable *immutable); 192 int irdma_query_gid(struct ib_device *ibdev, u8 port, int index, 193 union ib_gid *gid); 194 int irdma_query_gid_roce(struct ib_device *ibdev, u8 port, int index, 195 union ib_gid *gid); 196 int irdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 197 u16 *pkey); 198 int irdma_query_port(struct ib_device *ibdev, u8 port, 199 struct ib_port_attr *props); 200 struct rdma_hw_stats *irdma_alloc_hw_stats(struct ib_device *ibdev, u8 port_num); 201 int irdma_get_hw_stats(struct ib_device *ibdev, 202 struct rdma_hw_stats *stats, u8 port_num, 203 int index); 204 205 void irdma_request_reset(struct irdma_pci_f *rf); 206 int irdma_register_qset(struct irdma_sc_vsi *vsi, 207 struct irdma_ws_node *tc_node); 208 void irdma_unregister_qset(struct irdma_sc_vsi *vsi, 209 struct irdma_ws_node *tc_node); 210 void ib_unregister_device(struct ib_device *ibdev); 211 #if __FreeBSD_version < 1400026 212 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, 213 unsigned long pfn, unsigned long size, pgprot_t prot); 214 #endif 215 void irdma_disassociate_ucontext(struct ib_ucontext *context); 216 int kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp, 217 struct ib_qp_attr *attr, 218 u16 *vlan_id); 219 220 void kc_set_loc_seq_num_mss(struct irdma_cm_node *cm_node); 221 u16 kc_rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn); 222 223 void irdma_get_dev_fw_str(struct ib_device *dev, char *str, size_t str_len); 224 225 int irdma_modify_port(struct ib_device *ibdev, u8 port, int mask, 226 struct ib_port_modify *props); 227 int irdma_get_dst_mac(struct irdma_cm_node *cm_node, struct sockaddr *dst_sin, 228 u8 *dst_mac); 229 int irdma_resolve_neigh_lpb_chk(struct irdma_device *iwdev, struct irdma_cm_node *cm_node, 230 struct irdma_cm_info *cm_info); 231 int irdma_addr_resolve_neigh(struct irdma_cm_node *cm_node, u32 dst_ip, 232 int arpindex); 233 int irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node, u32 *dest, 234 int arpindex); 235 void irdma_dcqcn_tunables_init(struct irdma_pci_f *rf); 236 void irdma_sysctl_settings(struct irdma_pci_f *rf); 237 void irdma_sw_stats_tunables_init(struct irdma_pci_f *rf); 238 u32 irdma_create_stag(struct irdma_device *iwdev); 239 void irdma_free_stag(struct irdma_device *iwdev, u32 stag); 240 241 int irdma_hwdereg_mr(struct ib_mr *ib_mr); 242 int irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 len, 243 u64 virt, int new_access, struct ib_pd *new_pd, 244 struct ib_udata *udata); 245 struct irdma_mr; 246 struct irdma_cq; 247 struct irdma_cq_buf; 248 #if __FreeBSD_version < 1400026 249 struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 250 u32 max_num_sg); 251 #else 252 struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 253 u32 max_num_sg, struct ib_udata *udata); 254 #endif 255 int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr, 256 u16 access); 257 struct ib_mr *irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len, 258 u64 virt, struct ib_udata *udata); 259 int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr); 260 struct ib_mw *irdma_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, 261 struct ib_udata *udata); 262 int irdma_hw_alloc_stag(struct irdma_device *iwdev, struct irdma_mr *iwmr); 263 void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq); 264 int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr, 265 struct irdma_device *iwdev); 266 void irdma_setup_virt_qp(struct irdma_device *iwdev, 267 struct irdma_qp *iwqp, 268 struct irdma_qp_init_info *init_info); 269 int irdma_setup_kmode_qp(struct irdma_device *iwdev, 270 struct irdma_qp *iwqp, 271 struct irdma_qp_init_info *info, 272 struct ib_qp_init_attr *init_attr); 273 int irdma_setup_umode_qp(struct ib_udata *udata, 274 struct irdma_device *iwdev, 275 struct irdma_qp *iwqp, 276 struct irdma_qp_init_info *info, 277 struct ib_qp_init_attr *init_attr); 278 void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp, 279 struct irdma_qp_host_ctx_info *ctx_info); 280 void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp, 281 struct irdma_qp_host_ctx_info *ctx_info); 282 int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp); 283 void irdma_dealloc_push_page(struct irdma_pci_f *rf, 284 struct irdma_sc_qp *qp); 285 int irdma_process_resize_list(struct irdma_cq *iwcq, struct irdma_device *iwdev, 286 struct irdma_cq_buf *lcqe_buf); 287 #if __FreeBSD_version < 1400026 288 int irdma_destroy_cq(struct ib_cq *ib_cq); 289 #else 290 void irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); 291 #endif 292 #if __FreeBSD_version < 1400026 293 struct ib_ucontext *irdma_alloc_ucontext(struct ib_device *, struct ib_udata *); 294 #else 295 int irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); 296 #endif 297 #if __FreeBSD_version < 1400026 298 int irdma_dealloc_ucontext(struct ib_ucontext *); 299 #else 300 void irdma_dealloc_ucontext(struct ib_ucontext *context); 301 #endif 302 #if __FreeBSD_version < 1400026 303 struct ib_pd *irdma_alloc_pd(struct ib_device *, struct ib_ucontext *, 304 struct ib_udata *); 305 #else 306 int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); 307 #endif 308 #if __FreeBSD_version < 1400026 309 int irdma_dealloc_pd(struct ib_pd *); 310 #else 311 void irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); 312 #endif 313 int irdma_add_gid(struct ib_device *, u8, unsigned int, const union ib_gid *, 314 const struct ib_gid_attr *, void **); 315 int irdma_del_gid(struct ib_device *, u8, unsigned int, void **); 316 struct ib_device *ib_device_get_by_netdev(struct ifnet *ndev, int driver_id); 317 void ib_device_put(struct ib_device *device); 318 void ib_unregister_device_put(struct ib_device *device); 319 enum ib_mtu ib_mtu_int_to_enum(int mtu); 320 struct irdma_pbl *irdma_get_pbl(unsigned long va, struct list_head *pbl_list); 321 void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq); 322 void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp); 323 324 struct irdma_ucontext; 325 void irdma_del_memlist(struct irdma_mr *iwmr, struct irdma_ucontext *ucontext); 326 void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl, 327 enum irdma_pble_level level); 328 void irdma_reg_ipaddr_event_cb(struct irdma_pci_f *rf); 329 void irdma_dereg_ipaddr_event_cb(struct irdma_pci_f *rf); 330 331 /* Introduced in this series https://lore.kernel.org/linux-rdma/0-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com/ 332 * An irdma version helper doing same for older functions with difference that iova is passed in 333 * as opposed to derived from umem->iova. 334 */ 335 static inline size_t irdma_ib_umem_num_dma_blocks(struct ib_umem *umem, unsigned long pgsz, u64 iova) 336 { 337 /* some older OFED distros do not have ALIGN_DOWN */ 338 #ifndef ALIGN_DOWN 339 #define ALIGN_DOWN(x, a) ALIGN((x) - ((a) - 1), (a)) 340 #endif 341 342 return (size_t)((ALIGN(iova + umem->length, pgsz) - 343 ALIGN_DOWN(iova, pgsz))) / pgsz; 344 } 345 346 static inline void addrconf_addr_eui48(u8 *deui, const char *const addr) 347 { 348 memcpy(deui, addr, 3); 349 deui[3] = 0xFF; 350 deui[4] = 0xFE; 351 memcpy(deui + 5, addr + 3, 3); 352 deui[0] ^= 2; 353 } 354 355 #endif /* FBSD_KCOMPAT_H */ 356