1 /* QLogic qedr NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/pci.h> 33 #include <linux/netdevice.h> 34 #include <linux/list.h> 35 #include <linux/mutex.h> 36 #include <linux/qed/qede_rdma.h> 37 #include "qede.h" 38 39 static struct qedr_driver *qedr_drv; 40 static LIST_HEAD(qedr_dev_list); 41 static DEFINE_MUTEX(qedr_dev_list_lock); 42 43 bool qede_rdma_supported(struct qede_dev *dev) 44 { 45 return dev->dev_info.common.rdma_supported; 46 } 47 48 static void _qede_rdma_dev_add(struct qede_dev *edev) 49 { 50 if (!qedr_drv) 51 return; 52 53 edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev, 54 edev->ndev); 55 } 56 57 static int qede_rdma_create_wq(struct qede_dev *edev) 58 { 59 INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list); 60 edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq"); 61 if (!edev->rdma_info.rdma_wq) { 62 DP_NOTICE(edev, "qedr: Could not create workqueue\n"); 63 return -ENOMEM; 64 } 65 66 return 0; 67 } 68 69 static void qede_rdma_cleanup_event(struct qede_dev *edev) 70 { 71 struct list_head *head = &edev->rdma_info.rdma_event_list; 72 struct qede_rdma_event_work *event_node; 73 74 flush_workqueue(edev->rdma_info.rdma_wq); 75 while (!list_empty(head)) { 76 event_node = list_entry(head->next, struct qede_rdma_event_work, 77 list); 78 cancel_work_sync(&event_node->work); 79 list_del(&event_node->list); 80 kfree(event_node); 81 } 82 } 83 84 static void qede_rdma_destroy_wq(struct qede_dev *edev) 85 { 86 qede_rdma_cleanup_event(edev); 87 destroy_workqueue(edev->rdma_info.rdma_wq); 88 } 89 90 int qede_rdma_dev_add(struct qede_dev *edev) 91 { 92 int rc = 0; 93 94 if (qede_rdma_supported(edev)) { 95 rc = qede_rdma_create_wq(edev); 96 if (rc) 97 return rc; 98 99 INIT_LIST_HEAD(&edev->rdma_info.entry); 100 mutex_lock(&qedr_dev_list_lock); 101 list_add_tail(&edev->rdma_info.entry, &qedr_dev_list); 102 _qede_rdma_dev_add(edev); 103 mutex_unlock(&qedr_dev_list_lock); 104 } 105 106 return rc; 107 } 108 109 static void _qede_rdma_dev_remove(struct qede_dev *edev) 110 { 111 if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev) 112 qedr_drv->remove(edev->rdma_info.qedr_dev); 113 edev->rdma_info.qedr_dev = NULL; 114 } 115 116 void qede_rdma_dev_remove(struct qede_dev *edev) 117 { 118 if (!qede_rdma_supported(edev)) 119 return; 120 121 qede_rdma_destroy_wq(edev); 122 mutex_lock(&qedr_dev_list_lock); 123 _qede_rdma_dev_remove(edev); 124 list_del(&edev->rdma_info.entry); 125 mutex_unlock(&qedr_dev_list_lock); 126 } 127 128 static void _qede_rdma_dev_open(struct qede_dev *edev) 129 { 130 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) 131 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP); 132 } 133 134 static void qede_rdma_dev_open(struct qede_dev *edev) 135 { 136 if (!qede_rdma_supported(edev)) 137 return; 138 139 mutex_lock(&qedr_dev_list_lock); 140 _qede_rdma_dev_open(edev); 141 mutex_unlock(&qedr_dev_list_lock); 142 } 143 144 static void _qede_rdma_dev_close(struct qede_dev *edev) 145 { 146 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) 147 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN); 148 } 149 150 static void qede_rdma_dev_close(struct qede_dev *edev) 151 { 152 if (!qede_rdma_supported(edev)) 153 return; 154 155 mutex_lock(&qedr_dev_list_lock); 156 _qede_rdma_dev_close(edev); 157 mutex_unlock(&qedr_dev_list_lock); 158 } 159 160 static void qede_rdma_dev_shutdown(struct qede_dev *edev) 161 { 162 if (!qede_rdma_supported(edev)) 163 return; 164 165 mutex_lock(&qedr_dev_list_lock); 166 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) 167 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE); 168 mutex_unlock(&qedr_dev_list_lock); 169 } 170 171 int qede_rdma_register_driver(struct qedr_driver *drv) 172 { 173 struct qede_dev *edev; 174 u8 qedr_counter = 0; 175 176 mutex_lock(&qedr_dev_list_lock); 177 if (qedr_drv) { 178 mutex_unlock(&qedr_dev_list_lock); 179 return -EINVAL; 180 } 181 qedr_drv = drv; 182 183 list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { 184 struct net_device *ndev; 185 186 qedr_counter++; 187 _qede_rdma_dev_add(edev); 188 ndev = edev->ndev; 189 if (netif_running(ndev) && netif_oper_up(ndev)) 190 _qede_rdma_dev_open(edev); 191 } 192 mutex_unlock(&qedr_dev_list_lock); 193 194 pr_notice("qedr: discovered and registered %d RDMA funcs\n", 195 qedr_counter); 196 197 return 0; 198 } 199 EXPORT_SYMBOL(qede_rdma_register_driver); 200 201 void qede_rdma_unregister_driver(struct qedr_driver *drv) 202 { 203 struct qede_dev *edev; 204 205 mutex_lock(&qedr_dev_list_lock); 206 list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { 207 if (edev->rdma_info.qedr_dev) 208 _qede_rdma_dev_remove(edev); 209 } 210 qedr_drv = NULL; 211 mutex_unlock(&qedr_dev_list_lock); 212 } 213 EXPORT_SYMBOL(qede_rdma_unregister_driver); 214 215 static void qede_rdma_changeaddr(struct qede_dev *edev) 216 { 217 if (!qede_rdma_supported(edev)) 218 return; 219 220 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) 221 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR); 222 } 223 224 static struct qede_rdma_event_work * 225 qede_rdma_get_free_event_node(struct qede_dev *edev) 226 { 227 struct qede_rdma_event_work *event_node = NULL; 228 struct list_head *list_node = NULL; 229 bool found = false; 230 231 list_for_each(list_node, &edev->rdma_info.rdma_event_list) { 232 event_node = list_entry(list_node, struct qede_rdma_event_work, 233 list); 234 if (!work_pending(&event_node->work)) { 235 found = true; 236 break; 237 } 238 } 239 240 if (!found) { 241 event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC); 242 if (!event_node) { 243 DP_NOTICE(edev, 244 "qedr: Could not allocate memory for rdma work\n"); 245 return NULL; 246 } 247 list_add_tail(&event_node->list, 248 &edev->rdma_info.rdma_event_list); 249 } 250 251 return event_node; 252 } 253 254 static void qede_rdma_handle_event(struct work_struct *work) 255 { 256 struct qede_rdma_event_work *event_node; 257 enum qede_rdma_event event; 258 struct qede_dev *edev; 259 260 event_node = container_of(work, struct qede_rdma_event_work, work); 261 event = event_node->event; 262 edev = event_node->ptr; 263 264 switch (event) { 265 case QEDE_UP: 266 qede_rdma_dev_open(edev); 267 break; 268 case QEDE_DOWN: 269 qede_rdma_dev_close(edev); 270 break; 271 case QEDE_CLOSE: 272 qede_rdma_dev_shutdown(edev); 273 break; 274 case QEDE_CHANGE_ADDR: 275 qede_rdma_changeaddr(edev); 276 break; 277 default: 278 DP_NOTICE(edev, "Invalid rdma event %d", event); 279 } 280 } 281 282 static void qede_rdma_add_event(struct qede_dev *edev, 283 enum qede_rdma_event event) 284 { 285 struct qede_rdma_event_work *event_node; 286 287 if (!edev->rdma_info.qedr_dev) 288 return; 289 290 event_node = qede_rdma_get_free_event_node(edev); 291 if (!event_node) 292 return; 293 294 event_node->event = event; 295 event_node->ptr = edev; 296 297 INIT_WORK(&event_node->work, qede_rdma_handle_event); 298 queue_work(edev->rdma_info.rdma_wq, &event_node->work); 299 } 300 301 void qede_rdma_dev_event_open(struct qede_dev *edev) 302 { 303 qede_rdma_add_event(edev, QEDE_UP); 304 } 305 306 void qede_rdma_dev_event_close(struct qede_dev *edev) 307 { 308 qede_rdma_add_event(edev, QEDE_DOWN); 309 } 310 311 void qede_rdma_event_changeaddr(struct qede_dev *edev) 312 { 313 qede_rdma_add_event(edev, QEDE_CHANGE_ADDR); 314 } 315