1 /* 2 * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management 3 * 4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 * 34 * Written by: Atul Gupta (atul.gupta@chelsio.com) 35 * Written by: Hariprasad Shenai (hariprasad@chelsio.com) 36 */ 37 38 #include <linux/kernel.h> 39 #include <linux/module.h> 40 #include <linux/errno.h> 41 #include <linux/types.h> 42 #include <linux/debugfs.h> 43 #include <linux/export.h> 44 #include <linux/list.h> 45 #include <linux/skbuff.h> 46 #include <linux/pci.h> 47 48 #include "cxgb4.h" 49 #include "cxgb4_uld.h" 50 #include "t4_regs.h" 51 #include "t4fw_api.h" 52 #include "t4_msg.h" 53 54 #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++) 55 56 static int get_msix_idx_from_bmap(struct adapter *adap) 57 { 58 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds; 59 unsigned long flags; 60 unsigned int msix_idx; 61 62 spin_lock_irqsave(&bmap->lock, flags); 63 msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize); 64 if (msix_idx < bmap->mapsize) { 65 __set_bit(msix_idx, bmap->msix_bmap); 66 } else { 67 spin_unlock_irqrestore(&bmap->lock, flags); 68 return -ENOSPC; 69 } 70 71 spin_unlock_irqrestore(&bmap->lock, flags); 72 return msix_idx; 73 } 74 75 static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx) 76 { 77 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds; 78 unsigned long flags; 79 80 spin_lock_irqsave(&bmap->lock, flags); 81 __clear_bit(msix_idx, bmap->msix_bmap); 82 spin_unlock_irqrestore(&bmap->lock, flags); 83 } 84 85 /* Flush the aggregated lro sessions */ 86 static void uldrx_flush_handler(struct sge_rspq *q) 87 { 88 struct adapter *adap = q->adap; 89 90 if (adap->uld[q->uld].lro_flush) 91 adap->uld[q->uld].lro_flush(&q->lro_mgr); 92 } 93 94 /** 95 * uldrx_handler - response queue handler for ULD queues 96 * @q: the response queue that received the packet 97 * @rsp: the response queue descriptor holding the offload message 98 * @gl: the gather list of packet fragments 99 * 100 * Deliver an ingress offload packet to a ULD. All processing is done by 101 * the ULD, we just maintain statistics. 102 */ 103 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, 104 const struct pkt_gl *gl) 105 { 106 struct adapter *adap = q->adap; 107 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); 108 int ret; 109 110 /* FW can send CPLs encapsulated in a CPL_FW4_MSG */ 111 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG && 112 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL) 113 rsp += 2; 114 115 if (q->flush_handler) 116 ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle, 117 rsp, gl, &q->lro_mgr, 118 &q->napi); 119 else 120 ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle, 121 rsp, gl); 122 123 if (ret) { 124 rxq->stats.nomem++; 125 return -1; 126 } 127 128 if (!gl) 129 rxq->stats.imm++; 130 else if (gl == CXGB4_MSG_AN) 131 rxq->stats.an++; 132 else 133 rxq->stats.pkts++; 134 return 0; 135 } 136 137 static int alloc_uld_rxqs(struct adapter *adap, 138 struct sge_uld_rxq_info *rxq_info, bool lro) 139 { 140 unsigned int nq = rxq_info->nrxq + rxq_info->nciq; 141 int i, err, msi_idx, que_idx = 0, bmap_idx = 0; 142 struct sge_ofld_rxq *q = rxq_info->uldrxq; 143 unsigned short *ids = rxq_info->rspq_id; 144 struct sge *s = &adap->sge; 145 unsigned int per_chan; 146 147 per_chan = rxq_info->nrxq / adap->params.nports; 148 149 if (adap->flags & CXGB4_USING_MSIX) 150 msi_idx = 1; 151 else 152 msi_idx = -((int)s->intrq.abs_id + 1); 153 154 for (i = 0; i < nq; i++, q++) { 155 if (i == rxq_info->nrxq) { 156 /* start allocation of concentrator queues */ 157 per_chan = rxq_info->nciq / adap->params.nports; 158 que_idx = 0; 159 } 160 161 if (msi_idx >= 0) { 162 bmap_idx = get_msix_idx_from_bmap(adap); 163 if (bmap_idx < 0) { 164 err = -ENOSPC; 165 goto freeout; 166 } 167 msi_idx = adap->msix_info_ulds[bmap_idx].idx; 168 } 169 err = t4_sge_alloc_rxq(adap, &q->rspq, false, 170 adap->port[que_idx++ / per_chan], 171 msi_idx, 172 q->fl.size ? &q->fl : NULL, 173 uldrx_handler, 174 lro ? uldrx_flush_handler : NULL, 175 0); 176 if (err) 177 goto freeout; 178 if (msi_idx >= 0) 179 rxq_info->msix_tbl[i] = bmap_idx; 180 memset(&q->stats, 0, sizeof(q->stats)); 181 if (ids) 182 ids[i] = q->rspq.abs_id; 183 } 184 return 0; 185 freeout: 186 q = rxq_info->uldrxq; 187 for ( ; i; i--, q++) { 188 if (q->rspq.desc) 189 free_rspq_fl(adap, &q->rspq, 190 q->fl.size ? &q->fl : NULL); 191 } 192 return err; 193 } 194 195 static int 196 setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) 197 { 198 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 199 int i, ret = 0; 200 201 if (adap->flags & CXGB4_USING_MSIX) { 202 rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq), 203 sizeof(unsigned short), 204 GFP_KERNEL); 205 if (!rxq_info->msix_tbl) 206 return -ENOMEM; 207 } 208 209 ret = !(!alloc_uld_rxqs(adap, rxq_info, lro)); 210 211 /* Tell uP to route control queue completions to rdma rspq */ 212 if (adap->flags & CXGB4_FULL_INIT_DONE && 213 !ret && uld_type == CXGB4_ULD_RDMA) { 214 struct sge *s = &adap->sge; 215 unsigned int cmplqid; 216 u32 param, cmdop; 217 218 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL; 219 for_each_port(adap, i) { 220 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id; 221 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 222 FW_PARAMS_PARAM_X_V(cmdop) | 223 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id)); 224 ret = t4_set_params(adap, adap->mbox, adap->pf, 225 0, 1, ¶m, &cmplqid); 226 } 227 } 228 return ret; 229 } 230 231 static void t4_free_uld_rxqs(struct adapter *adap, int n, 232 struct sge_ofld_rxq *q) 233 { 234 for ( ; n; n--, q++) { 235 if (q->rspq.desc) 236 free_rspq_fl(adap, &q->rspq, 237 q->fl.size ? &q->fl : NULL); 238 } 239 } 240 241 static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) 242 { 243 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 244 245 if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) { 246 struct sge *s = &adap->sge; 247 u32 param, cmdop, cmplqid = 0; 248 int i; 249 250 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL; 251 for_each_port(adap, i) { 252 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 253 FW_PARAMS_PARAM_X_V(cmdop) | 254 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id)); 255 t4_set_params(adap, adap->mbox, adap->pf, 256 0, 1, ¶m, &cmplqid); 257 } 258 } 259 260 if (rxq_info->nciq) 261 t4_free_uld_rxqs(adap, rxq_info->nciq, 262 rxq_info->uldrxq + rxq_info->nrxq); 263 t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq); 264 if (adap->flags & CXGB4_USING_MSIX) 265 kfree(rxq_info->msix_tbl); 266 } 267 268 static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, 269 const struct cxgb4_uld_info *uld_info) 270 { 271 struct sge *s = &adap->sge; 272 struct sge_uld_rxq_info *rxq_info; 273 int i, nrxq, ciq_size; 274 275 rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL); 276 if (!rxq_info) 277 return -ENOMEM; 278 279 if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) { 280 i = s->nqs_per_uld; 281 rxq_info->nrxq = roundup(i, adap->params.nports); 282 } else { 283 i = min_t(int, uld_info->nrxq, 284 num_online_cpus()); 285 rxq_info->nrxq = roundup(i, adap->params.nports); 286 } 287 if (!uld_info->ciq) { 288 rxq_info->nciq = 0; 289 } else { 290 if (adap->flags & CXGB4_USING_MSIX) 291 rxq_info->nciq = min_t(int, s->nqs_per_uld, 292 num_online_cpus()); 293 else 294 rxq_info->nciq = min_t(int, MAX_OFLD_QSETS, 295 num_online_cpus()); 296 rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) * 297 adap->params.nports); 298 rxq_info->nciq = max_t(int, rxq_info->nciq, 299 adap->params.nports); 300 } 301 302 nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */ 303 rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq), 304 GFP_KERNEL); 305 if (!rxq_info->uldrxq) { 306 kfree(rxq_info); 307 return -ENOMEM; 308 } 309 310 rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL); 311 if (!rxq_info->rspq_id) { 312 kfree(rxq_info->uldrxq); 313 kfree(rxq_info); 314 return -ENOMEM; 315 } 316 317 for (i = 0; i < rxq_info->nrxq; i++) { 318 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i]; 319 320 init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64); 321 r->rspq.uld = uld_type; 322 r->fl.size = 72; 323 } 324 325 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids; 326 if (ciq_size > SGE_MAX_IQ_SIZE) { 327 dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n"); 328 ciq_size = SGE_MAX_IQ_SIZE; 329 } 330 331 for (i = rxq_info->nrxq; i < nrxq; i++) { 332 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i]; 333 334 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64); 335 r->rspq.uld = uld_type; 336 } 337 338 memcpy(rxq_info->name, uld_info->name, IFNAMSIZ); 339 adap->sge.uld_rxq_info[uld_type] = rxq_info; 340 341 return 0; 342 } 343 344 static void free_queues_uld(struct adapter *adap, unsigned int uld_type) 345 { 346 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 347 348 adap->sge.uld_rxq_info[uld_type] = NULL; 349 kfree(rxq_info->rspq_id); 350 kfree(rxq_info->uldrxq); 351 kfree(rxq_info); 352 } 353 354 static int 355 request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) 356 { 357 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 358 struct uld_msix_info *minfo; 359 int err = 0; 360 unsigned int idx, bmap_idx; 361 362 for_each_uldrxq(rxq_info, idx) { 363 bmap_idx = rxq_info->msix_tbl[idx]; 364 minfo = &adap->msix_info_ulds[bmap_idx]; 365 err = request_irq(minfo->vec, 366 t4_sge_intr_msix, 0, 367 minfo->desc, 368 &rxq_info->uldrxq[idx].rspq); 369 if (err) 370 goto unwind; 371 372 cxgb4_set_msix_aff(adap, minfo->vec, 373 &minfo->aff_mask, idx); 374 } 375 return 0; 376 377 unwind: 378 while (idx-- > 0) { 379 bmap_idx = rxq_info->msix_tbl[idx]; 380 minfo = &adap->msix_info_ulds[bmap_idx]; 381 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); 382 free_msix_idx_in_bmap(adap, bmap_idx); 383 free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq); 384 } 385 return err; 386 } 387 388 static void 389 free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) 390 { 391 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 392 struct uld_msix_info *minfo; 393 unsigned int idx, bmap_idx; 394 395 for_each_uldrxq(rxq_info, idx) { 396 bmap_idx = rxq_info->msix_tbl[idx]; 397 minfo = &adap->msix_info_ulds[bmap_idx]; 398 399 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); 400 free_msix_idx_in_bmap(adap, bmap_idx); 401 free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq); 402 } 403 } 404 405 static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type) 406 { 407 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 408 int n = sizeof(adap->msix_info_ulds[0].desc); 409 unsigned int idx, bmap_idx; 410 411 for_each_uldrxq(rxq_info, idx) { 412 bmap_idx = rxq_info->msix_tbl[idx]; 413 414 snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d", 415 adap->port[0]->name, rxq_info->name, idx); 416 } 417 } 418 419 static void enable_rx(struct adapter *adap, struct sge_rspq *q) 420 { 421 if (!q) 422 return; 423 424 if (q->handler) 425 napi_enable(&q->napi); 426 427 /* 0-increment GTS to start the timer and enable interrupts */ 428 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), 429 SEINTARM_V(q->intr_params) | 430 INGRESSQID_V(q->cntxt_id)); 431 } 432 433 static void quiesce_rx(struct adapter *adap, struct sge_rspq *q) 434 { 435 if (q && q->handler) 436 napi_disable(&q->napi); 437 } 438 439 static void enable_rx_uld(struct adapter *adap, unsigned int uld_type) 440 { 441 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 442 int idx; 443 444 for_each_uldrxq(rxq_info, idx) 445 enable_rx(adap, &rxq_info->uldrxq[idx].rspq); 446 } 447 448 static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type) 449 { 450 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 451 int idx; 452 453 for_each_uldrxq(rxq_info, idx) 454 quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq); 455 } 456 457 static void 458 free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info) 459 { 460 int nq = txq_info->ntxq; 461 int i; 462 463 for (i = 0; i < nq; i++) { 464 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; 465 466 if (txq && txq->q.desc) { 467 tasklet_kill(&txq->qresume_tsk); 468 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, 469 txq->q.cntxt_id); 470 free_tx_desc(adap, &txq->q, txq->q.in_use, false); 471 kfree(txq->q.sdesc); 472 __skb_queue_purge(&txq->sendq); 473 free_txq(adap, &txq->q); 474 } 475 } 476 } 477 478 static int 479 alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info, 480 unsigned int uld_type) 481 { 482 struct sge *s = &adap->sge; 483 int nq = txq_info->ntxq; 484 int i, j, err; 485 486 j = nq / adap->params.nports; 487 for (i = 0; i < nq; i++) { 488 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; 489 490 txq->q.size = 1024; 491 err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j], 492 s->fw_evtq.cntxt_id, uld_type); 493 if (err) 494 goto freeout; 495 } 496 return 0; 497 freeout: 498 free_sge_txq_uld(adap, txq_info); 499 return err; 500 } 501 502 static void 503 release_sge_txq_uld(struct adapter *adap, unsigned int uld_type) 504 { 505 struct sge_uld_txq_info *txq_info = NULL; 506 int tx_uld_type = TX_ULD(uld_type); 507 508 txq_info = adap->sge.uld_txq_info[tx_uld_type]; 509 510 if (txq_info && atomic_dec_and_test(&txq_info->users)) { 511 free_sge_txq_uld(adap, txq_info); 512 kfree(txq_info->uldtxq); 513 kfree(txq_info); 514 adap->sge.uld_txq_info[tx_uld_type] = NULL; 515 } 516 } 517 518 static int 519 setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type, 520 const struct cxgb4_uld_info *uld_info) 521 { 522 struct sge_uld_txq_info *txq_info = NULL; 523 int tx_uld_type, i; 524 525 tx_uld_type = TX_ULD(uld_type); 526 txq_info = adap->sge.uld_txq_info[tx_uld_type]; 527 528 if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info && 529 (atomic_inc_return(&txq_info->users) > 1)) 530 return 0; 531 532 txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL); 533 if (!txq_info) 534 return -ENOMEM; 535 if (uld_type == CXGB4_ULD_CRYPTO) { 536 i = min_t(int, adap->vres.ncrypto_fc, 537 num_online_cpus()); 538 txq_info->ntxq = rounddown(i, adap->params.nports); 539 if (txq_info->ntxq <= 0) { 540 dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n"); 541 kfree(txq_info); 542 return -EINVAL; 543 } 544 545 } else { 546 i = min_t(int, uld_info->ntxq, num_online_cpus()); 547 txq_info->ntxq = roundup(i, adap->params.nports); 548 } 549 txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq), 550 GFP_KERNEL); 551 if (!txq_info->uldtxq) { 552 kfree(txq_info); 553 return -ENOMEM; 554 } 555 556 if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) { 557 kfree(txq_info->uldtxq); 558 kfree(txq_info); 559 return -ENOMEM; 560 } 561 562 atomic_inc(&txq_info->users); 563 adap->sge.uld_txq_info[tx_uld_type] = txq_info; 564 return 0; 565 } 566 567 static void uld_queue_init(struct adapter *adap, unsigned int uld_type, 568 struct cxgb4_lld_info *lli) 569 { 570 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 571 int tx_uld_type = TX_ULD(uld_type); 572 struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type]; 573 574 lli->rxq_ids = rxq_info->rspq_id; 575 lli->nrxq = rxq_info->nrxq; 576 lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq; 577 lli->nciq = rxq_info->nciq; 578 lli->ntxq = txq_info->ntxq; 579 } 580 581 int t4_uld_mem_alloc(struct adapter *adap) 582 { 583 struct sge *s = &adap->sge; 584 585 adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL); 586 if (!adap->uld) 587 return -ENOMEM; 588 589 s->uld_rxq_info = kcalloc(CXGB4_ULD_MAX, 590 sizeof(struct sge_uld_rxq_info *), 591 GFP_KERNEL); 592 if (!s->uld_rxq_info) 593 goto err_uld; 594 595 s->uld_txq_info = kcalloc(CXGB4_TX_MAX, 596 sizeof(struct sge_uld_txq_info *), 597 GFP_KERNEL); 598 if (!s->uld_txq_info) 599 goto err_uld_rx; 600 return 0; 601 602 err_uld_rx: 603 kfree(s->uld_rxq_info); 604 err_uld: 605 kfree(adap->uld); 606 return -ENOMEM; 607 } 608 609 void t4_uld_mem_free(struct adapter *adap) 610 { 611 struct sge *s = &adap->sge; 612 613 kfree(s->uld_txq_info); 614 kfree(s->uld_rxq_info); 615 kfree(adap->uld); 616 } 617 618 /* This function should be called with uld_mutex taken. */ 619 static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type) 620 { 621 if (adap->uld[type].handle) { 622 adap->uld[type].handle = NULL; 623 adap->uld[type].add = NULL; 624 release_sge_txq_uld(adap, type); 625 626 if (adap->flags & CXGB4_FULL_INIT_DONE) 627 quiesce_rx_uld(adap, type); 628 629 if (adap->flags & CXGB4_USING_MSIX) 630 free_msix_queue_irqs_uld(adap, type); 631 632 free_sge_queues_uld(adap, type); 633 free_queues_uld(adap, type); 634 } 635 } 636 637 void t4_uld_clean_up(struct adapter *adap) 638 { 639 unsigned int i; 640 641 mutex_lock(&uld_mutex); 642 for (i = 0; i < CXGB4_ULD_MAX; i++) { 643 if (!adap->uld[i].handle) 644 continue; 645 646 cxgb4_shutdown_uld_adapter(adap, i); 647 } 648 mutex_unlock(&uld_mutex); 649 } 650 651 static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) 652 { 653 int i; 654 655 lld->pdev = adap->pdev; 656 lld->pf = adap->pf; 657 lld->l2t = adap->l2t; 658 lld->tids = &adap->tids; 659 lld->ports = adap->port; 660 lld->vr = &adap->vres; 661 lld->mtus = adap->params.mtus; 662 lld->nchan = adap->params.nports; 663 lld->nports = adap->params.nports; 664 lld->wr_cred = adap->params.ofldq_wr_cred; 665 lld->crypto = adap->params.crypto; 666 lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A)); 667 lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A); 668 lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A); 669 lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A); 670 lld->iscsi_ppm = &adap->iscsi_ppm; 671 lld->adapter_type = adap->params.chip; 672 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; 673 lld->udb_density = 1 << adap->params.sge.eq_qpp; 674 lld->ucq_density = 1 << adap->params.sge.iq_qpp; 675 lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10); 676 lld->filt_mode = adap->params.tp.vlan_pri_map; 677 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ 678 for (i = 0; i < NCHAN; i++) 679 lld->tx_modq[i] = i; 680 lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A); 681 lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A); 682 lld->fw_vers = adap->params.fw_vers; 683 lld->dbfifo_int_thresh = dbfifo_int_thresh; 684 lld->sge_ingpadboundary = adap->sge.fl_align; 685 lld->sge_egrstatuspagesize = adap->sge.stat_len; 686 lld->sge_pktshift = adap->sge.pktshift; 687 lld->ulp_crypto = adap->params.crypto; 688 lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN; 689 lld->max_ordird_qp = adap->params.max_ordird_qp; 690 lld->max_ird_adapter = adap->params.max_ird_adapter; 691 lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; 692 lld->nodeid = dev_to_node(adap->pdev_dev); 693 lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support; 694 lld->write_w_imm_support = adap->params.write_w_imm_support; 695 lld->write_cmpl_support = adap->params.write_cmpl_support; 696 } 697 698 static int uld_attach(struct adapter *adap, unsigned int uld) 699 { 700 struct cxgb4_lld_info lli; 701 void *handle; 702 703 uld_init(adap, &lli); 704 uld_queue_init(adap, uld, &lli); 705 706 handle = adap->uld[uld].add(&lli); 707 if (IS_ERR(handle)) { 708 dev_warn(adap->pdev_dev, 709 "could not attach to the %s driver, error %ld\n", 710 adap->uld[uld].name, PTR_ERR(handle)); 711 return PTR_ERR(handle); 712 } 713 714 adap->uld[uld].handle = handle; 715 t4_register_netevent_notifier(); 716 717 if (adap->flags & CXGB4_FULL_INIT_DONE) 718 adap->uld[uld].state_change(handle, CXGB4_STATE_UP); 719 720 return 0; 721 } 722 723 /* cxgb4_register_uld - register an upper-layer driver 724 * @type: the ULD type 725 * @p: the ULD methods 726 * 727 * Registers an upper-layer driver with this driver and notifies the ULD 728 * about any presently available devices that support its type. 729 */ 730 void cxgb4_register_uld(enum cxgb4_uld type, 731 const struct cxgb4_uld_info *p) 732 { 733 struct adapter *adap; 734 int ret = 0; 735 736 if (type >= CXGB4_ULD_MAX) 737 return; 738 739 mutex_lock(&uld_mutex); 740 list_for_each_entry(adap, &adapter_list, list_node) { 741 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || 742 (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) 743 continue; 744 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) 745 continue; 746 ret = cfg_queues_uld(adap, type, p); 747 if (ret) 748 goto out; 749 ret = setup_sge_queues_uld(adap, type, p->lro); 750 if (ret) 751 goto free_queues; 752 if (adap->flags & CXGB4_USING_MSIX) { 753 name_msix_vecs_uld(adap, type); 754 ret = request_msix_queue_irqs_uld(adap, type); 755 if (ret) 756 goto free_rxq; 757 } 758 if (adap->flags & CXGB4_FULL_INIT_DONE) 759 enable_rx_uld(adap, type); 760 if (adap->uld[type].add) 761 goto free_irq; 762 ret = setup_sge_txq_uld(adap, type, p); 763 if (ret) 764 goto free_irq; 765 adap->uld[type] = *p; 766 ret = uld_attach(adap, type); 767 if (ret) 768 goto free_txq; 769 continue; 770 free_txq: 771 release_sge_txq_uld(adap, type); 772 free_irq: 773 if (adap->flags & CXGB4_FULL_INIT_DONE) 774 quiesce_rx_uld(adap, type); 775 if (adap->flags & CXGB4_USING_MSIX) 776 free_msix_queue_irqs_uld(adap, type); 777 free_rxq: 778 free_sge_queues_uld(adap, type); 779 free_queues: 780 free_queues_uld(adap, type); 781 out: 782 dev_warn(adap->pdev_dev, 783 "ULD registration failed for uld type %d\n", type); 784 } 785 mutex_unlock(&uld_mutex); 786 return; 787 } 788 EXPORT_SYMBOL(cxgb4_register_uld); 789 790 /** 791 * cxgb4_unregister_uld - unregister an upper-layer driver 792 * @type: the ULD type 793 * 794 * Unregisters an existing upper-layer driver. 795 */ 796 int cxgb4_unregister_uld(enum cxgb4_uld type) 797 { 798 struct adapter *adap; 799 800 if (type >= CXGB4_ULD_MAX) 801 return -EINVAL; 802 803 mutex_lock(&uld_mutex); 804 list_for_each_entry(adap, &adapter_list, list_node) { 805 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || 806 (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) 807 continue; 808 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) 809 continue; 810 811 cxgb4_shutdown_uld_adapter(adap, type); 812 } 813 mutex_unlock(&uld_mutex); 814 815 return 0; 816 } 817 EXPORT_SYMBOL(cxgb4_unregister_uld); 818