1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved. 4 */ 5 6 #include "mana_ib.h" 7 #include <net/mana/mana_auxiliary.h> 8 #include <net/addrconf.h> 9 10 MODULE_DESCRIPTION("Microsoft Azure Network Adapter IB driver"); 11 MODULE_LICENSE("GPL"); 12 MODULE_IMPORT_NS("NET_MANA"); 13 14 static const struct ib_device_ops mana_ib_dev_ops = { 15 .owner = THIS_MODULE, 16 .driver_id = RDMA_DRIVER_MANA, 17 .uverbs_abi_ver = MANA_IB_UVERBS_ABI_VERSION, 18 19 .add_gid = mana_ib_gd_add_gid, 20 .alloc_pd = mana_ib_alloc_pd, 21 .alloc_ucontext = mana_ib_alloc_ucontext, 22 .create_ah = mana_ib_create_ah, 23 .create_cq = mana_ib_create_cq, 24 .create_qp = mana_ib_create_qp, 25 .create_rwq_ind_table = mana_ib_create_rwq_ind_table, 26 .create_wq = mana_ib_create_wq, 27 .dealloc_pd = mana_ib_dealloc_pd, 28 .dealloc_ucontext = mana_ib_dealloc_ucontext, 29 .del_gid = mana_ib_gd_del_gid, 30 .dereg_mr = mana_ib_dereg_mr, 31 .destroy_ah = mana_ib_destroy_ah, 32 .destroy_cq = mana_ib_destroy_cq, 33 .destroy_qp = mana_ib_destroy_qp, 34 .destroy_rwq_ind_table = mana_ib_destroy_rwq_ind_table, 35 .destroy_wq = mana_ib_destroy_wq, 36 .disassociate_ucontext = mana_ib_disassociate_ucontext, 37 .get_dma_mr = mana_ib_get_dma_mr, 38 .get_link_layer = mana_ib_get_link_layer, 39 .get_port_immutable = mana_ib_get_port_immutable, 40 .mmap = mana_ib_mmap, 41 .modify_qp = mana_ib_modify_qp, 42 .modify_wq = mana_ib_modify_wq, 43 .poll_cq = mana_ib_poll_cq, 44 .post_recv = mana_ib_post_recv, 45 .post_send = mana_ib_post_send, 46 .query_device = mana_ib_query_device, 47 .query_gid = mana_ib_query_gid, 48 .query_pkey = mana_ib_query_pkey, 49 .query_port = mana_ib_query_port, 50 .reg_user_mr = mana_ib_reg_user_mr, 51 .reg_user_mr_dmabuf = mana_ib_reg_user_mr_dmabuf, 52 .req_notify_cq = mana_ib_arm_cq, 53 54 INIT_RDMA_OBJ_SIZE(ib_ah, mana_ib_ah, ibah), 55 INIT_RDMA_OBJ_SIZE(ib_cq, mana_ib_cq, ibcq), 56 INIT_RDMA_OBJ_SIZE(ib_pd, mana_ib_pd, ibpd), 57 INIT_RDMA_OBJ_SIZE(ib_qp, mana_ib_qp, ibqp), 58 INIT_RDMA_OBJ_SIZE(ib_ucontext, mana_ib_ucontext, ibucontext), 59 INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mana_ib_rwq_ind_table, 60 ib_ind_table), 61 }; 62 63 static const struct ib_device_ops mana_ib_stats_ops = { 64 .alloc_hw_port_stats = mana_ib_alloc_hw_port_stats, 65 .get_hw_stats = mana_ib_get_hw_stats, 66 }; 67 68 static const struct ib_device_ops mana_ib_device_stats_ops = { 69 .alloc_hw_device_stats = mana_ib_alloc_hw_device_stats, 70 }; 71 72 static int mana_ib_netdev_event(struct notifier_block *this, 73 unsigned long event, void *ptr) 74 { 75 struct mana_ib_dev *dev = container_of(this, struct mana_ib_dev, nb); 76 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 77 struct gdma_context *gc = dev->gdma_dev->gdma_context; 78 struct mana_context *mc = gc->mana.driver_data; 79 struct net_device *ndev; 80 81 /* Only process events from our parent device */ 82 if (event_dev != mc->ports[0]) 83 return NOTIFY_DONE; 84 85 switch (event) { 86 case NETDEV_CHANGEUPPER: 87 ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker); 88 /* 89 * RDMA core will setup GID based on updated netdev. 90 * It's not possible to race with the core as rtnl lock is being 91 * held. 92 */ 93 ib_device_set_netdev(&dev->ib_dev, ndev, 1); 94 95 /* mana_get_primary_netdev() returns ndev with refcount held */ 96 netdev_put(ndev, &dev->dev_tracker); 97 98 return NOTIFY_OK; 99 default: 100 return NOTIFY_DONE; 101 } 102 } 103 104 static int mana_ib_probe(struct auxiliary_device *adev, 105 const struct auxiliary_device_id *id) 106 { 107 struct mana_adev *madev = container_of(adev, struct mana_adev, adev); 108 struct gdma_context *gc = madev->mdev->gdma_context; 109 struct mana_context *mc = gc->mana.driver_data; 110 struct gdma_dev *mdev = madev->mdev; 111 struct net_device *ndev; 112 struct mana_ib_dev *dev; 113 u8 mac_addr[ETH_ALEN]; 114 int ret; 115 116 dev = ib_alloc_device(mana_ib_dev, ib_dev); 117 if (!dev) 118 return -ENOMEM; 119 120 ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_ops); 121 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 122 dev->ib_dev.num_comp_vectors = gc->max_num_queues; 123 dev->ib_dev.dev.parent = gc->dev; 124 dev->gdma_dev = mdev; 125 xa_init_flags(&dev->qp_table_wq, XA_FLAGS_LOCK_IRQ); 126 127 if (mana_ib_is_rnic(dev)) { 128 dev->ib_dev.phys_port_cnt = 1; 129 ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker); 130 if (!ndev) { 131 ret = -ENODEV; 132 ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1"); 133 goto free_ib_device; 134 } 135 ether_addr_copy(mac_addr, ndev->dev_addr); 136 addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr); 137 ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1); 138 /* mana_get_primary_netdev() returns ndev with refcount held */ 139 netdev_put(ndev, &dev->dev_tracker); 140 if (ret) { 141 ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret); 142 goto free_ib_device; 143 } 144 145 dev->nb.notifier_call = mana_ib_netdev_event; 146 ret = register_netdevice_notifier(&dev->nb); 147 if (ret) { 148 ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d", 149 ret); 150 goto free_ib_device; 151 } 152 153 ret = mana_ib_gd_query_adapter_caps(dev); 154 if (ret) { 155 ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d", ret); 156 goto deregister_net_notifier; 157 } 158 159 ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops); 160 if (dev->adapter_caps.feature_flags & MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT) 161 ib_set_device_ops(&dev->ib_dev, &mana_ib_device_stats_ops); 162 163 ret = mana_ib_create_eqs(dev); 164 if (ret) { 165 ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret); 166 goto deregister_net_notifier; 167 } 168 169 ret = mana_ib_gd_create_rnic_adapter(dev); 170 if (ret) 171 goto destroy_eqs; 172 173 ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr); 174 if (ret) { 175 ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d", ret); 176 goto destroy_rnic; 177 } 178 } else { 179 dev->ib_dev.phys_port_cnt = mc->num_ports; 180 ret = mana_eth_query_adapter_caps(dev); 181 if (ret) { 182 ibdev_err(&dev->ib_dev, "Failed to query ETH device caps, ret %d", ret); 183 goto free_ib_device; 184 } 185 } 186 187 dev->av_pool = dma_pool_create("mana_ib_av", gc->dev, MANA_AV_BUFFER_SIZE, 188 MANA_AV_BUFFER_SIZE, 0); 189 if (!dev->av_pool) { 190 ret = -ENOMEM; 191 goto destroy_rnic; 192 } 193 194 ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev, 195 mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt); 196 197 ret = ib_register_device(&dev->ib_dev, mana_ib_is_rnic(dev) ? "mana_%d" : "manae_%d", 198 gc->dev); 199 if (ret) 200 goto deallocate_pool; 201 202 dev_set_drvdata(&adev->dev, dev); 203 204 return 0; 205 206 deallocate_pool: 207 dma_pool_destroy(dev->av_pool); 208 destroy_rnic: 209 if (mana_ib_is_rnic(dev)) 210 mana_ib_gd_destroy_rnic_adapter(dev); 211 destroy_eqs: 212 if (mana_ib_is_rnic(dev)) 213 mana_ib_destroy_eqs(dev); 214 deregister_net_notifier: 215 if (mana_ib_is_rnic(dev)) 216 unregister_netdevice_notifier(&dev->nb); 217 free_ib_device: 218 xa_destroy(&dev->qp_table_wq); 219 ib_dealloc_device(&dev->ib_dev); 220 return ret; 221 } 222 223 static void mana_ib_remove(struct auxiliary_device *adev) 224 { 225 struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev); 226 227 ib_unregister_device(&dev->ib_dev); 228 dma_pool_destroy(dev->av_pool); 229 if (mana_ib_is_rnic(dev)) { 230 mana_ib_gd_destroy_rnic_adapter(dev); 231 mana_ib_destroy_eqs(dev); 232 unregister_netdevice_notifier(&dev->nb); 233 } 234 xa_destroy(&dev->qp_table_wq); 235 ib_dealloc_device(&dev->ib_dev); 236 } 237 238 static const struct auxiliary_device_id mana_id_table[] = { 239 { .name = "mana.rdma", }, 240 { .name = "mana.eth", }, 241 {}, 242 }; 243 244 MODULE_DEVICE_TABLE(auxiliary, mana_id_table); 245 246 static struct auxiliary_driver mana_driver = { 247 .probe = mana_ib_probe, 248 .remove = mana_ib_remove, 249 .id_table = mana_id_table, 250 }; 251 252 module_auxiliary_driver(mana_driver); 253