1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2014-2015 Hisilicon Limited. 4 */ 5 6 #include <linux/dma-mapping.h> 7 #include <linux/interrupt.h> 8 #include <linux/of.h> 9 #include <linux/skbuff.h> 10 #include <linux/slab.h> 11 #include "hnae.h" 12 13 #define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev) 14 15 static const struct class hnae_class = { 16 .name = "hnae", 17 }; 18 19 static void 20 hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head) 21 { 22 unsigned long flags; 23 24 spin_lock_irqsave(lock, flags); 25 list_add_tail_rcu(node, head); 26 spin_unlock_irqrestore(lock, flags); 27 } 28 29 static void hnae_list_del(spinlock_t *lock, struct list_head *node) 30 { 31 unsigned long flags; 32 33 spin_lock_irqsave(lock, flags); 34 list_del_rcu(node); 35 spin_unlock_irqrestore(lock, flags); 36 } 37 38 static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) 39 { 40 unsigned int order = hnae_page_order(ring); 41 struct page *p = dev_alloc_pages(order); 42 43 if (!p) 44 return -ENOMEM; 45 46 cb->priv = p; 47 cb->page_offset = 0; 48 cb->reuse_flag = 0; 49 cb->buf = page_address(p); 50 cb->length = hnae_page_size(ring); 51 cb->type = DESC_TYPE_PAGE; 52 53 return 0; 54 } 55 56 static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) 57 { 58 if (unlikely(!cb->priv)) 59 return; 60 61 if (cb->type == DESC_TYPE_SKB) 62 dev_kfree_skb_any((struct sk_buff *)cb->priv); 63 else if (unlikely(is_rx_ring(ring))) 64 put_page((struct page *)cb->priv); 65 66 cb->priv = NULL; 67 } 68 69 static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) 70 { 71 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, 72 cb->length, ring_to_dma_dir(ring)); 73 74 if (dma_mapping_error(ring_to_dev(ring), cb->dma)) 75 return -EIO; 76 77 return 0; 78 } 79 80 static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) 81 { 82 if (cb->type == DESC_TYPE_SKB) 83 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 84 ring_to_dma_dir(ring)); 85 else if (cb->length) 86 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 87 ring_to_dma_dir(ring)); 88 } 89 90 static struct hnae_buf_ops hnae_bops = { 91 .alloc_buffer = hnae_alloc_buffer, 92 .free_buffer = hnae_free_buffer, 93 .map_buffer = hnae_map_buffer, 94 .unmap_buffer = hnae_unmap_buffer, 95 }; 96 97 static int __ae_match(struct device *dev, const void *data) 98 { 99 struct hnae_ae_dev *hdev = cls_to_ae_dev(dev); 100 101 if (dev_of_node(hdev->dev)) 102 return (data == &hdev->dev->of_node->fwnode); 103 else if (is_acpi_node(hdev->dev->fwnode)) 104 return (data == hdev->dev->fwnode); 105 106 dev_err(dev, "__ae_match cannot read cfg data from OF or acpi\n"); 107 return 0; 108 } 109 110 static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode) 111 { 112 struct device *dev; 113 114 WARN_ON(!fwnode); 115 116 dev = class_find_device(&hnae_class, NULL, fwnode, __ae_match); 117 118 return dev ? cls_to_ae_dev(dev) : NULL; 119 } 120 121 static void hnae_free_buffers(struct hnae_ring *ring) 122 { 123 int i; 124 125 for (i = 0; i < ring->desc_num; i++) 126 hnae_free_buffer_detach(ring, i); 127 } 128 129 /* Allocate memory for raw pkg, and map with dma */ 130 static int hnae_alloc_buffers(struct hnae_ring *ring) 131 { 132 int i, j, ret; 133 134 for (i = 0; i < ring->desc_num; i++) { 135 ret = hnae_alloc_buffer_attach(ring, i); 136 if (ret) 137 goto out_buffer_fail; 138 } 139 140 return 0; 141 142 out_buffer_fail: 143 for (j = i - 1; j >= 0; j--) 144 hnae_free_buffer_detach(ring, j); 145 return ret; 146 } 147 148 /* free desc along with its attached buffer */ 149 static void hnae_free_desc(struct hnae_ring *ring) 150 { 151 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, 152 ring->desc_num * sizeof(ring->desc[0]), 153 ring_to_dma_dir(ring)); 154 ring->desc_dma_addr = 0; 155 kfree(ring->desc); 156 ring->desc = NULL; 157 } 158 159 /* alloc desc, without buffer attached */ 160 static int hnae_alloc_desc(struct hnae_ring *ring) 161 { 162 int size = ring->desc_num * sizeof(ring->desc[0]); 163 164 ring->desc = kzalloc(size, GFP_KERNEL); 165 if (!ring->desc) 166 return -ENOMEM; 167 168 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), 169 ring->desc, size, ring_to_dma_dir(ring)); 170 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) { 171 ring->desc_dma_addr = 0; 172 kfree(ring->desc); 173 ring->desc = NULL; 174 return -ENOMEM; 175 } 176 177 return 0; 178 } 179 180 /* fini ring, also free the buffer for the ring */ 181 static void hnae_fini_ring(struct hnae_ring *ring) 182 { 183 if (is_rx_ring(ring)) 184 hnae_free_buffers(ring); 185 186 hnae_free_desc(ring); 187 kfree(ring->desc_cb); 188 ring->desc_cb = NULL; 189 ring->next_to_clean = 0; 190 ring->next_to_use = 0; 191 } 192 193 /* init ring, and with buffer for rx ring */ 194 static int 195 hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags) 196 { 197 int ret; 198 199 if (ring->desc_num <= 0 || ring->buf_size <= 0) 200 return -EINVAL; 201 202 ring->q = q; 203 ring->flags = flags; 204 ring->coal_param = q->handle->coal_param; 205 assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr); 206 207 /* not matter for tx or rx ring, the ntc and ntc start from 0 */ 208 assert(ring->next_to_use == 0); 209 assert(ring->next_to_clean == 0); 210 211 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), 212 GFP_KERNEL); 213 if (!ring->desc_cb) { 214 ret = -ENOMEM; 215 goto out; 216 } 217 218 ret = hnae_alloc_desc(ring); 219 if (ret) 220 goto out_with_desc_cb; 221 222 if (is_rx_ring(ring)) { 223 ret = hnae_alloc_buffers(ring); 224 if (ret) 225 goto out_with_desc; 226 } 227 228 return 0; 229 230 out_with_desc: 231 hnae_free_desc(ring); 232 out_with_desc_cb: 233 kfree(ring->desc_cb); 234 ring->desc_cb = NULL; 235 out: 236 return ret; 237 } 238 239 static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q, 240 struct hnae_ae_dev *dev) 241 { 242 int ret; 243 244 q->dev = dev; 245 q->handle = h; 246 247 ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR); 248 if (ret) 249 goto out; 250 251 ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR); 252 if (ret) 253 goto out_with_tx_ring; 254 255 if (dev->ops->init_queue) 256 dev->ops->init_queue(q); 257 258 return 0; 259 260 out_with_tx_ring: 261 hnae_fini_ring(&q->tx_ring); 262 out: 263 return ret; 264 } 265 266 static void hnae_fini_queue(struct hnae_queue *q) 267 { 268 if (q->dev->ops->fini_queue) 269 q->dev->ops->fini_queue(q); 270 271 hnae_fini_ring(&q->tx_ring); 272 hnae_fini_ring(&q->rx_ring); 273 } 274 275 /* 276 * ae_chain - define ae chain head 277 */ 278 static RAW_NOTIFIER_HEAD(ae_chain); 279 280 int hnae_register_notifier(struct notifier_block *nb) 281 { 282 return raw_notifier_chain_register(&ae_chain, nb); 283 } 284 EXPORT_SYMBOL(hnae_register_notifier); 285 286 void hnae_unregister_notifier(struct notifier_block *nb) 287 { 288 if (raw_notifier_chain_unregister(&ae_chain, nb)) 289 dev_err(NULL, "notifier chain unregister fail\n"); 290 } 291 EXPORT_SYMBOL(hnae_unregister_notifier); 292 293 int hnae_reinit_handle(struct hnae_handle *handle) 294 { 295 int i, j; 296 int ret; 297 298 for (i = 0; i < handle->q_num; i++) /* free ring*/ 299 hnae_fini_queue(handle->qs[i]); 300 301 if (handle->dev->ops->reset) 302 handle->dev->ops->reset(handle); 303 304 for (i = 0; i < handle->q_num; i++) {/* reinit ring*/ 305 ret = hnae_init_queue(handle, handle->qs[i], handle->dev); 306 if (ret) 307 goto out_when_init_queue; 308 } 309 return 0; 310 out_when_init_queue: 311 for (j = i - 1; j >= 0; j--) 312 hnae_fini_queue(handle->qs[j]); 313 return ret; 314 } 315 EXPORT_SYMBOL(hnae_reinit_handle); 316 317 /* hnae_get_handle - get a handle from the AE 318 * @owner_dev: the dev use this handle 319 * @ae_id: the id of the ae to be used 320 * @ae_opts: the options set for the handle 321 * @bops: the callbacks for buffer management 322 * 323 * return handle ptr or ERR_PTR 324 */ 325 struct hnae_handle *hnae_get_handle(struct device *owner_dev, 326 const struct fwnode_handle *fwnode, 327 u32 port_id, 328 struct hnae_buf_ops *bops) 329 { 330 struct hnae_ae_dev *dev; 331 struct hnae_handle *handle; 332 int i, j; 333 int ret; 334 335 dev = find_ae(fwnode); 336 if (!dev) 337 return ERR_PTR(-ENODEV); 338 339 handle = dev->ops->get_handle(dev, port_id); 340 if (IS_ERR(handle)) { 341 put_device(&dev->cls_dev); 342 return handle; 343 } 344 345 handle->dev = dev; 346 handle->owner_dev = owner_dev; 347 handle->bops = bops ? bops : &hnae_bops; 348 handle->eport_id = port_id; 349 350 for (i = 0; i < handle->q_num; i++) { 351 ret = hnae_init_queue(handle, handle->qs[i], dev); 352 if (ret) 353 goto out_when_init_queue; 354 } 355 356 __module_get(dev->owner); 357 358 hnae_list_add(&dev->lock, &handle->node, &dev->handle_list); 359 360 return handle; 361 362 out_when_init_queue: 363 for (j = i - 1; j >= 0; j--) 364 hnae_fini_queue(handle->qs[j]); 365 366 put_device(&dev->cls_dev); 367 368 return ERR_PTR(-ENOMEM); 369 } 370 EXPORT_SYMBOL(hnae_get_handle); 371 372 void hnae_put_handle(struct hnae_handle *h) 373 { 374 struct hnae_ae_dev *dev = h->dev; 375 int i; 376 377 for (i = 0; i < h->q_num; i++) 378 hnae_fini_queue(h->qs[i]); 379 380 if (h->dev->ops->reset) 381 h->dev->ops->reset(h); 382 383 hnae_list_del(&dev->lock, &h->node); 384 385 if (dev->ops->put_handle) 386 dev->ops->put_handle(h); 387 388 module_put(dev->owner); 389 390 put_device(&dev->cls_dev); 391 } 392 EXPORT_SYMBOL(hnae_put_handle); 393 394 static void hnae_release(struct device *dev) 395 { 396 } 397 398 /** 399 * hnae_ae_register - register a AE engine to hnae framework 400 * @hdev: the hnae ae engine device 401 * @owner: the module who provides this dev 402 * NOTE: the duplicated name will not be checked 403 */ 404 int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner) 405 { 406 static atomic_t id = ATOMIC_INIT(-1); 407 int ret; 408 409 if (!hdev->dev) 410 return -ENODEV; 411 412 if (!hdev->ops || !hdev->ops->get_handle || 413 !hdev->ops->toggle_ring_irq || 414 !hdev->ops->get_status || !hdev->ops->adjust_link) 415 return -EINVAL; 416 417 hdev->owner = owner; 418 hdev->id = (int)atomic_inc_return(&id); 419 hdev->cls_dev.parent = hdev->dev; 420 hdev->cls_dev.class = &hnae_class; 421 hdev->cls_dev.release = hnae_release; 422 (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id); 423 ret = device_register(&hdev->cls_dev); 424 if (ret) { 425 put_device(&hdev->cls_dev); 426 return ret; 427 } 428 429 INIT_LIST_HEAD(&hdev->handle_list); 430 spin_lock_init(&hdev->lock); 431 432 ret = raw_notifier_call_chain(&ae_chain, HNAE_AE_REGISTER, NULL); 433 if (ret) 434 dev_dbg(hdev->dev, 435 "has not notifier for AE: %s\n", hdev->name); 436 437 return 0; 438 } 439 EXPORT_SYMBOL(hnae_ae_register); 440 441 /** 442 * hnae_ae_unregister - unregisters a HNAE AE engine 443 * @hdev: the device to unregister 444 */ 445 void hnae_ae_unregister(struct hnae_ae_dev *hdev) 446 { 447 device_unregister(&hdev->cls_dev); 448 } 449 EXPORT_SYMBOL(hnae_ae_unregister); 450 451 static int __init hnae_init(void) 452 { 453 return class_register(&hnae_class); 454 } 455 456 static void __exit hnae_exit(void) 457 { 458 class_unregister(&hnae_class); 459 } 460 461 subsys_initcall(hnae_init); 462 module_exit(hnae_exit); 463 464 MODULE_AUTHOR("Hisilicon, Inc."); 465 MODULE_LICENSE("GPL"); 466 MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework"); 467 468 /* vi: set tw=78 noet: */ 469