1 /* 2 * Huawei HiNIC PCI Express Linux driver 3 * Copyright(c) 2017 Huawei Technologies Co., Ltd 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * for more details. 13 * 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/netdevice.h> 18 #include <linux/u64_stats_sync.h> 19 #include <linux/errno.h> 20 #include <linux/types.h> 21 #include <linux/pci.h> 22 #include <linux/device.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 #include <linux/skbuff.h> 27 #include <linux/smp.h> 28 #include <asm/byteorder.h> 29 30 #include "hinic_common.h" 31 #include "hinic_hw_if.h" 32 #include "hinic_hw_wqe.h" 33 #include "hinic_hw_wq.h" 34 #include "hinic_hw_qp.h" 35 #include "hinic_hw_dev.h" 36 #include "hinic_dev.h" 37 #include "hinic_tx.h" 38 39 #define TX_IRQ_NO_PENDING 0 40 #define TX_IRQ_NO_COALESC 0 41 #define TX_IRQ_NO_LLI_TIMER 0 42 #define TX_IRQ_NO_CREDIT 0 43 #define TX_IRQ_NO_RESEND_TIMER 0 44 45 #define CI_UPDATE_NO_PENDING 0 46 #define CI_UPDATE_NO_COALESC 0 47 48 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) 49 50 #define MIN_SKB_LEN 64 51 52 /** 53 * hinic_txq_clean_stats - Clean the statistics of specific queue 54 * @txq: Logical Tx Queue 55 **/ 56 void hinic_txq_clean_stats(struct hinic_txq *txq) 57 { 58 struct hinic_txq_stats *txq_stats = &txq->txq_stats; 59 60 u64_stats_update_begin(&txq_stats->syncp); 61 txq_stats->pkts = 0; 62 txq_stats->bytes = 0; 63 txq_stats->tx_busy = 0; 64 txq_stats->tx_wake = 0; 65 txq_stats->tx_dropped = 0; 66 u64_stats_update_end(&txq_stats->syncp); 67 } 68 69 /** 70 * hinic_txq_get_stats - get statistics of Tx Queue 71 * @txq: Logical Tx Queue 72 * @stats: return updated stats here 73 **/ 74 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) 75 { 76 struct hinic_txq_stats *txq_stats = &txq->txq_stats; 77 unsigned int start; 78 79 u64_stats_update_begin(&stats->syncp); 80 do { 81 start = u64_stats_fetch_begin(&txq_stats->syncp); 82 stats->pkts = txq_stats->pkts; 83 stats->bytes = txq_stats->bytes; 84 stats->tx_busy = txq_stats->tx_busy; 85 stats->tx_wake = txq_stats->tx_wake; 86 stats->tx_dropped = txq_stats->tx_dropped; 87 } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); 88 u64_stats_update_end(&stats->syncp); 89 } 90 91 /** 92 * txq_stats_init - Initialize the statistics of specific queue 93 * @txq: Logical Tx Queue 94 **/ 95 static void txq_stats_init(struct hinic_txq *txq) 96 { 97 struct hinic_txq_stats *txq_stats = &txq->txq_stats; 98 99 u64_stats_init(&txq_stats->syncp); 100 hinic_txq_clean_stats(txq); 101 } 102 103 /** 104 * tx_map_skb - dma mapping for skb and return sges 105 * @nic_dev: nic device 106 * @skb: the skb 107 * @sges: returned sges 108 * 109 * Return 0 - Success, negative - Failure 110 **/ 111 static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, 112 struct hinic_sge *sges) 113 { 114 struct hinic_hwdev *hwdev = nic_dev->hwdev; 115 struct hinic_hwif *hwif = hwdev->hwif; 116 struct pci_dev *pdev = hwif->pdev; 117 struct skb_frag_struct *frag; 118 dma_addr_t dma_addr; 119 int i, j; 120 121 dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb), 122 DMA_TO_DEVICE); 123 if (dma_mapping_error(&pdev->dev, dma_addr)) { 124 dev_err(&pdev->dev, "Failed to map Tx skb data\n"); 125 return -EFAULT; 126 } 127 128 hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb)); 129 130 for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) { 131 frag = &skb_shinfo(skb)->frags[i]; 132 133 dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0, 134 skb_frag_size(frag), 135 DMA_TO_DEVICE); 136 if (dma_mapping_error(&pdev->dev, dma_addr)) { 137 dev_err(&pdev->dev, "Failed to map Tx skb frag\n"); 138 goto err_tx_map; 139 } 140 141 hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag)); 142 } 143 144 return 0; 145 146 err_tx_map: 147 for (j = 0; j < i; j++) 148 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]), 149 sges[j + 1].len, DMA_TO_DEVICE); 150 151 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len, 152 DMA_TO_DEVICE); 153 return -EFAULT; 154 } 155 156 /** 157 * tx_unmap_skb - unmap the dma address of the skb 158 * @nic_dev: nic device 159 * @skb: the skb 160 * @sges: the sges that are connected to the skb 161 **/ 162 static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, 163 struct hinic_sge *sges) 164 { 165 struct hinic_hwdev *hwdev = nic_dev->hwdev; 166 struct hinic_hwif *hwif = hwdev->hwif; 167 struct pci_dev *pdev = hwif->pdev; 168 int i; 169 170 for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++) 171 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]), 172 sges[i + 1].len, DMA_TO_DEVICE); 173 174 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len, 175 DMA_TO_DEVICE); 176 } 177 178 netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 179 { 180 struct hinic_dev *nic_dev = netdev_priv(netdev); 181 struct netdev_queue *netdev_txq; 182 int nr_sges, err = NETDEV_TX_OK; 183 struct hinic_sq_wqe *sq_wqe; 184 unsigned int wqe_size; 185 struct hinic_txq *txq; 186 struct hinic_qp *qp; 187 u16 prod_idx; 188 189 txq = &nic_dev->txqs[skb->queue_mapping]; 190 qp = container_of(txq->sq, struct hinic_qp, sq); 191 192 if (skb->len < MIN_SKB_LEN) { 193 if (skb_pad(skb, MIN_SKB_LEN - skb->len)) { 194 netdev_err(netdev, "Failed to pad skb\n"); 195 goto update_error_stats; 196 } 197 198 skb->len = MIN_SKB_LEN; 199 } 200 201 nr_sges = skb_shinfo(skb)->nr_frags + 1; 202 if (nr_sges > txq->max_sges) { 203 netdev_err(netdev, "Too many Tx sges\n"); 204 goto skb_error; 205 } 206 207 err = tx_map_skb(nic_dev, skb, txq->sges); 208 if (err) 209 goto skb_error; 210 211 wqe_size = HINIC_SQ_WQE_SIZE(nr_sges); 212 213 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); 214 if (!sq_wqe) { 215 netif_stop_subqueue(netdev, qp->q_id); 216 217 /* Check for the case free_tx_poll is called in another cpu 218 * and we stopped the subqueue after free_tx_poll check. 219 */ 220 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); 221 if (sq_wqe) { 222 netif_wake_subqueue(nic_dev->netdev, qp->q_id); 223 goto process_sq_wqe; 224 } 225 226 tx_unmap_skb(nic_dev, skb, txq->sges); 227 228 u64_stats_update_begin(&txq->txq_stats.syncp); 229 txq->txq_stats.tx_busy++; 230 u64_stats_update_end(&txq->txq_stats.syncp); 231 err = NETDEV_TX_BUSY; 232 wqe_size = 0; 233 goto flush_skbs; 234 } 235 236 process_sq_wqe: 237 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); 238 239 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); 240 241 flush_skbs: 242 netdev_txq = netdev_get_tx_queue(netdev, skb->queue_mapping); 243 if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq))) 244 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); 245 246 return err; 247 248 skb_error: 249 dev_kfree_skb_any(skb); 250 251 update_error_stats: 252 u64_stats_update_begin(&txq->txq_stats.syncp); 253 txq->txq_stats.tx_dropped++; 254 u64_stats_update_end(&txq->txq_stats.syncp); 255 return err; 256 } 257 258 /** 259 * tx_free_skb - unmap and free skb 260 * @nic_dev: nic device 261 * @skb: the skb 262 * @sges: the sges that are connected to the skb 263 **/ 264 static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, 265 struct hinic_sge *sges) 266 { 267 tx_unmap_skb(nic_dev, skb, sges); 268 269 dev_kfree_skb_any(skb); 270 } 271 272 /** 273 * free_all_rx_skbs - free all skbs in tx queue 274 * @txq: tx queue 275 **/ 276 static void free_all_tx_skbs(struct hinic_txq *txq) 277 { 278 struct hinic_dev *nic_dev = netdev_priv(txq->netdev); 279 struct hinic_sq *sq = txq->sq; 280 struct hinic_sq_wqe *sq_wqe; 281 unsigned int wqe_size; 282 struct sk_buff *skb; 283 int nr_sges; 284 u16 ci; 285 286 while ((sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &ci))) { 287 nr_sges = skb_shinfo(skb)->nr_frags + 1; 288 289 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); 290 291 hinic_sq_put_wqe(sq, wqe_size); 292 293 tx_free_skb(nic_dev, skb, txq->free_sges); 294 } 295 } 296 297 /** 298 * free_tx_poll - free finished tx skbs in tx queue that connected to napi 299 * @napi: napi 300 * @budget: number of tx 301 * 302 * Return 0 - Success, negative - Failure 303 **/ 304 static int free_tx_poll(struct napi_struct *napi, int budget) 305 { 306 struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi); 307 struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq); 308 struct hinic_dev *nic_dev = netdev_priv(txq->netdev); 309 struct netdev_queue *netdev_txq; 310 struct hinic_sq *sq = txq->sq; 311 struct hinic_wq *wq = sq->wq; 312 struct hinic_sq_wqe *sq_wqe; 313 unsigned int wqe_size; 314 int nr_sges, pkts = 0; 315 struct sk_buff *skb; 316 u64 tx_bytes = 0; 317 u16 hw_ci, sw_ci; 318 319 do { 320 hw_ci = HW_CONS_IDX(sq) & wq->mask; 321 322 sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &sw_ci); 323 if ((!sq_wqe) || 324 (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size)) 325 break; 326 327 tx_bytes += skb->len; 328 pkts++; 329 330 nr_sges = skb_shinfo(skb)->nr_frags + 1; 331 332 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); 333 334 hinic_sq_put_wqe(sq, wqe_size); 335 336 tx_free_skb(nic_dev, skb, txq->free_sges); 337 } while (pkts < budget); 338 339 if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) && 340 hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) { 341 netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id); 342 343 __netif_tx_lock(netdev_txq, smp_processor_id()); 344 345 netif_wake_subqueue(nic_dev->netdev, qp->q_id); 346 347 __netif_tx_unlock(netdev_txq); 348 349 u64_stats_update_begin(&txq->txq_stats.syncp); 350 txq->txq_stats.tx_wake++; 351 u64_stats_update_end(&txq->txq_stats.syncp); 352 } 353 354 u64_stats_update_begin(&txq->txq_stats.syncp); 355 txq->txq_stats.bytes += tx_bytes; 356 txq->txq_stats.pkts += pkts; 357 u64_stats_update_end(&txq->txq_stats.syncp); 358 359 if (pkts < budget) { 360 napi_complete(napi); 361 enable_irq(sq->irq); 362 return pkts; 363 } 364 365 return budget; 366 } 367 368 static void tx_napi_add(struct hinic_txq *txq, int weight) 369 { 370 netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight); 371 napi_enable(&txq->napi); 372 } 373 374 static void tx_napi_del(struct hinic_txq *txq) 375 { 376 napi_disable(&txq->napi); 377 netif_napi_del(&txq->napi); 378 } 379 380 static irqreturn_t tx_irq(int irq, void *data) 381 { 382 struct hinic_txq *txq = data; 383 struct hinic_dev *nic_dev; 384 385 nic_dev = netdev_priv(txq->netdev); 386 387 /* Disable the interrupt until napi will be completed */ 388 disable_irq_nosync(txq->sq->irq); 389 390 hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry); 391 392 napi_schedule(&txq->napi); 393 return IRQ_HANDLED; 394 } 395 396 static int tx_request_irq(struct hinic_txq *txq) 397 { 398 struct hinic_dev *nic_dev = netdev_priv(txq->netdev); 399 struct hinic_hwdev *hwdev = nic_dev->hwdev; 400 struct hinic_hwif *hwif = hwdev->hwif; 401 struct pci_dev *pdev = hwif->pdev; 402 struct hinic_sq *sq = txq->sq; 403 int err; 404 405 tx_napi_add(txq, nic_dev->tx_weight); 406 407 hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry, 408 TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC, 409 TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT, 410 TX_IRQ_NO_RESEND_TIMER); 411 412 err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq); 413 if (err) { 414 dev_err(&pdev->dev, "Failed to request Tx irq\n"); 415 tx_napi_del(txq); 416 return err; 417 } 418 419 return 0; 420 } 421 422 static void tx_free_irq(struct hinic_txq *txq) 423 { 424 struct hinic_sq *sq = txq->sq; 425 426 free_irq(sq->irq, txq); 427 tx_napi_del(txq); 428 } 429 430 /** 431 * hinic_init_txq - Initialize the Tx Queue 432 * @txq: Logical Tx Queue 433 * @sq: Hardware Tx Queue to connect the Logical queue with 434 * @netdev: network device to connect the Logical queue with 435 * 436 * Return 0 - Success, negative - Failure 437 **/ 438 int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, 439 struct net_device *netdev) 440 { 441 struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); 442 struct hinic_dev *nic_dev = netdev_priv(netdev); 443 struct hinic_hwdev *hwdev = nic_dev->hwdev; 444 int err, irqname_len; 445 size_t sges_size; 446 447 txq->netdev = netdev; 448 txq->sq = sq; 449 450 txq_stats_init(txq); 451 452 txq->max_sges = HINIC_MAX_SQ_BUFDESCS; 453 454 sges_size = txq->max_sges * sizeof(*txq->sges); 455 txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); 456 if (!txq->sges) 457 return -ENOMEM; 458 459 sges_size = txq->max_sges * sizeof(*txq->free_sges); 460 txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); 461 if (!txq->free_sges) { 462 err = -ENOMEM; 463 goto err_alloc_free_sges; 464 } 465 466 irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1; 467 txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL); 468 if (!txq->irq_name) { 469 err = -ENOMEM; 470 goto err_alloc_irqname; 471 } 472 473 sprintf(txq->irq_name, "hinic_txq%d", qp->q_id); 474 475 err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING, 476 CI_UPDATE_NO_COALESC); 477 if (err) 478 goto err_hw_ci; 479 480 err = tx_request_irq(txq); 481 if (err) { 482 netdev_err(netdev, "Failed to request Tx irq\n"); 483 goto err_req_tx_irq; 484 } 485 486 return 0; 487 488 err_req_tx_irq: 489 err_hw_ci: 490 devm_kfree(&netdev->dev, txq->irq_name); 491 492 err_alloc_irqname: 493 devm_kfree(&netdev->dev, txq->free_sges); 494 495 err_alloc_free_sges: 496 devm_kfree(&netdev->dev, txq->sges); 497 return err; 498 } 499 500 /** 501 * hinic_clean_txq - Clean the Tx Queue 502 * @txq: Logical Tx Queue 503 **/ 504 void hinic_clean_txq(struct hinic_txq *txq) 505 { 506 struct net_device *netdev = txq->netdev; 507 508 tx_free_irq(txq); 509 510 free_all_tx_skbs(txq); 511 512 devm_kfree(&netdev->dev, txq->irq_name); 513 devm_kfree(&netdev->dev, txq->free_sges); 514 devm_kfree(&netdev->dev, txq->sges); 515 } 516