1 /*- 2 * Copyright (c) 2011-2012 Semihalf. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/bus.h> 34 #include <sys/lock.h> 35 #include <sys/module.h> 36 #include <sys/mutex.h> 37 #include <sys/proc.h> 38 #include <sys/pcpu.h> 39 #include <sys/rman.h> 40 #include <sys/sched.h> 41 #include <sys/smp.h> 42 43 #include <machine/bus.h> 44 #include <machine/resource.h> 45 #include <machine/tlb.h> 46 47 #include "qman.h" 48 #include "portals.h" 49 50 extern struct dpaa_portals_softc *qp_sc; 51 static struct qman_softc *qman_sc; 52 53 extern t_Handle qman_portal_setup(struct qman_softc *qsc); 54 55 static void 56 qman_exception(t_Handle app, e_QmExceptions exception) 57 { 58 struct qman_softc *sc; 59 const char *message; 60 61 sc = app; 62 63 switch (exception) { 64 case e_QM_EX_CORENET_INITIATOR_DATA: 65 message = "Initiator Data Error"; 66 break; 67 case e_QM_EX_CORENET_TARGET_DATA: 68 message = "CoreNet Target Data Error"; 69 break; 70 case e_QM_EX_CORENET_INVALID_TARGET_TRANSACTION: 71 message = "Invalid Target Transaction"; 72 break; 73 case e_QM_EX_PFDR_THRESHOLD: 74 message = "PFDR Low Watermark Interrupt"; 75 break; 76 case e_QM_EX_PFDR_ENQUEUE_BLOCKED: 77 message = "PFDR Enqueues Blocked Interrupt"; 78 break; 79 case e_QM_EX_SINGLE_ECC: 80 message = "Single Bit ECC Error Interrupt"; 81 break; 82 case e_QM_EX_MULTI_ECC: 83 message = "Multi Bit ECC Error Interrupt"; 84 break; 85 case e_QM_EX_INVALID_COMMAND: 86 message = "Invalid Command Verb Interrupt"; 87 break; 88 case e_QM_EX_DEQUEUE_DCP: 89 message = "Invalid Dequeue Direct Connect Portal Interrupt"; 90 break; 91 case e_QM_EX_DEQUEUE_FQ: 92 message = "Invalid Dequeue FQ Interrupt"; 93 break; 94 case e_QM_EX_DEQUEUE_SOURCE: 95 message = "Invalid Dequeue Source Interrupt"; 96 break; 97 case e_QM_EX_DEQUEUE_QUEUE: 98 message = "Invalid Dequeue Queue Interrupt"; 99 break; 100 case e_QM_EX_ENQUEUE_OVERFLOW: 101 message = "Invalid Enqueue Overflow Interrupt"; 102 break; 103 case e_QM_EX_ENQUEUE_STATE: 104 message = "Invalid Enqueue State Interrupt"; 105 break; 106 case e_QM_EX_ENQUEUE_CHANNEL: 107 message = "Invalid Enqueue Channel Interrupt"; 108 break; 109 case e_QM_EX_ENQUEUE_QUEUE: 110 message = "Invalid Enqueue Queue Interrupt"; 111 break; 112 case e_QM_EX_CG_STATE_CHANGE: 113 message = "CG change state notification"; 114 break; 115 default: 116 message = "Unknown error"; 117 } 118 119 device_printf(sc->sc_dev, "QMan Exception: %s.\n", message); 120 } 121 122 /** 123 * General received frame callback. 124 * This is called, when user did not register his own callback for a given 125 * frame queue range (fqr). 126 */ 127 e_RxStoreResponse 128 qman_received_frame_callback(t_Handle app, t_Handle qm_fqr, t_Handle qm_portal, 129 uint32_t fqid_offset, t_DpaaFD *frame) 130 { 131 struct qman_softc *sc; 132 133 sc = app; 134 135 device_printf(sc->sc_dev, "dummy callback for received frame.\n"); 136 return (e_RX_STORE_RESPONSE_CONTINUE); 137 } 138 139 /** 140 * General rejected frame callback. 141 * This is called, when user did not register his own callback for a given 142 * frame queue range (fqr). 143 */ 144 e_RxStoreResponse 145 qman_rejected_frame_callback(t_Handle app, t_Handle qm_fqr, t_Handle qm_portal, 146 uint32_t fqid_offset, t_DpaaFD *frame, 147 t_QmRejectedFrameInfo *qm_rejected_frame_info) 148 { 149 struct qman_softc *sc; 150 151 sc = app; 152 153 device_printf(sc->sc_dev, "dummy callback for rejected frame.\n"); 154 return (e_RX_STORE_RESPONSE_CONTINUE); 155 } 156 157 int 158 qman_attach(device_t dev) 159 { 160 struct qman_softc *sc; 161 t_QmParam qp; 162 t_Error error; 163 t_QmRevisionInfo rev; 164 165 sc = device_get_softc(dev); 166 sc->sc_dev = dev; 167 qman_sc = sc; 168 169 if (XX_MallocSmartInit() != E_OK) { 170 device_printf(dev, "could not initialize smart allocator.\n"); 171 return (ENXIO); 172 } 173 174 sched_pin(); 175 176 /* Allocate resources */ 177 sc->sc_rrid = 0; 178 sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY, 179 &sc->sc_rrid, 0, ~0, QMAN_CCSR_SIZE, RF_ACTIVE); 180 if (sc->sc_rres == NULL) { 181 device_printf(dev, "could not allocate memory.\n"); 182 goto err; 183 } 184 185 sc->sc_irid = 0; 186 sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, 187 &sc->sc_irid, RF_ACTIVE | RF_SHAREABLE); 188 if (sc->sc_ires == NULL) { 189 device_printf(dev, "could not allocate error interrupt.\n"); 190 goto err; 191 } 192 193 if (qp_sc == NULL) 194 goto err; 195 196 dpaa_portal_map_registers(qp_sc); 197 198 /* Initialize QMan */ 199 qp.guestId = NCSW_MASTER_ID; 200 qp.baseAddress = rman_get_bushandle(sc->sc_rres); 201 qp.swPortalsBaseAddress = rman_get_bushandle(qp_sc->sc_rres[0]); 202 qp.liodn = 0; 203 qp.totalNumOfFqids = QMAN_MAX_FQIDS; 204 qp.fqdMemPartitionId = NCSW_MASTER_ID; 205 qp.pfdrMemPartitionId = NCSW_MASTER_ID; 206 qp.f_Exception = qman_exception; 207 qp.h_App = sc; 208 qp.errIrq = (uintptr_t)sc->sc_ires; 209 qp.partFqidBase = QMAN_FQID_BASE; 210 qp.partNumOfFqids = QMAN_MAX_FQIDS; 211 qp.partCgsBase = 0; 212 qp.partNumOfCgs = 0; 213 214 sc->sc_qh = QM_Config(&qp); 215 if (sc->sc_qh == NULL) { 216 device_printf(dev, "could not be configured\n"); 217 goto err; 218 } 219 220 error = QM_Init(sc->sc_qh); 221 if (error != E_OK) { 222 device_printf(dev, "could not be initialized\n"); 223 goto err; 224 } 225 226 error = QM_GetRevision(sc->sc_qh, &rev); 227 if (error != E_OK) { 228 device_printf(dev, "could not get QMan revision\n"); 229 goto err; 230 } 231 232 device_printf(dev, "Hardware version: %d.%d.\n", 233 rev.majorRev, rev.minorRev); 234 235 sched_unpin(); 236 237 qman_portal_setup(sc); 238 239 return (0); 240 241 err: 242 sched_unpin(); 243 qman_detach(dev); 244 return (ENXIO); 245 } 246 247 int 248 qman_detach(device_t dev) 249 { 250 struct qman_softc *sc; 251 252 sc = device_get_softc(dev); 253 254 if (sc->sc_qh) 255 QM_Free(sc->sc_qh); 256 257 if (sc->sc_ires != NULL) 258 XX_DeallocIntr((uintptr_t)sc->sc_ires); 259 260 if (sc->sc_ires != NULL) 261 bus_release_resource(dev, SYS_RES_IRQ, 262 sc->sc_irid, sc->sc_ires); 263 264 if (sc->sc_rres != NULL) 265 bus_release_resource(dev, SYS_RES_MEMORY, 266 sc->sc_rrid, sc->sc_rres); 267 268 return (0); 269 } 270 271 int 272 qman_suspend(device_t dev) 273 { 274 275 return (0); 276 } 277 278 int 279 qman_resume(device_t dev) 280 { 281 282 return (0); 283 } 284 285 int 286 qman_shutdown(device_t dev) 287 { 288 289 return (0); 290 } 291 292 293 /** 294 * @group QMan API functions implementation. 295 * @{ 296 */ 297 298 t_Handle 299 qman_fqr_create(uint32_t fqids_num, e_QmFQChannel channel, uint8_t wq, 300 bool force_fqid, uint32_t fqid_or_align, bool init_parked, 301 bool hold_active, bool prefer_in_cache, bool congst_avoid_ena, 302 t_Handle congst_group, int8_t overhead_accounting_len, 303 uint32_t tail_drop_threshold) 304 { 305 struct qman_softc *sc; 306 t_QmFqrParams fqr; 307 unsigned int cpu; 308 t_Handle fqrh, portal; 309 310 sc = qman_sc; 311 312 sched_pin(); 313 cpu = PCPU_GET(cpuid); 314 315 /* Ensure we have got QMan port initialized */ 316 portal = qman_portal_setup(sc); 317 if (portal == NULL) { 318 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 319 goto err; 320 } 321 322 fqr.h_Qm = sc->sc_qh; 323 fqr.h_QmPortal = portal; 324 fqr.initParked = init_parked; 325 fqr.holdActive = hold_active; 326 fqr.preferInCache = prefer_in_cache; 327 328 /* We do not support stashing */ 329 fqr.useContextAForStash = FALSE; 330 fqr.p_ContextA = 0; 331 fqr.p_ContextB = 0; 332 333 fqr.channel = channel; 334 fqr.wq = wq; 335 fqr.shadowMode = FALSE; 336 fqr.numOfFqids = fqids_num; 337 338 /* FQID */ 339 fqr.useForce = force_fqid; 340 if (force_fqid) { 341 fqr.qs.frcQ.fqid = fqid_or_align; 342 } else { 343 fqr.qs.nonFrcQs.align = fqid_or_align; 344 } 345 346 /* Congestion Avoidance */ 347 fqr.congestionAvoidanceEnable = congst_avoid_ena; 348 if (congst_avoid_ena) { 349 fqr.congestionAvoidanceParams.h_QmCg = congst_group; 350 fqr.congestionAvoidanceParams.overheadAccountingLength = 351 overhead_accounting_len; 352 fqr.congestionAvoidanceParams.fqTailDropThreshold = 353 tail_drop_threshold; 354 } else { 355 fqr.congestionAvoidanceParams.h_QmCg = 0; 356 fqr.congestionAvoidanceParams.overheadAccountingLength = 0; 357 fqr.congestionAvoidanceParams.fqTailDropThreshold = 0; 358 } 359 360 fqrh = QM_FQR_Create(&fqr); 361 if (fqrh == NULL) { 362 device_printf(sc->sc_dev, "could not create Frame Queue Range" 363 "\n"); 364 goto err; 365 } 366 367 sc->sc_fqr_cpu[QM_FQR_GetFqid(fqrh)] = PCPU_GET(cpuid); 368 369 sched_unpin(); 370 371 return (fqrh); 372 373 err: 374 sched_unpin(); 375 376 return (NULL); 377 } 378 379 t_Error 380 qman_fqr_free(t_Handle fqr) 381 { 382 struct qman_softc *sc; 383 t_Error error; 384 385 sc = qman_sc; 386 thread_lock(curthread); 387 sched_bind(curthread, sc->sc_fqr_cpu[QM_FQR_GetFqid(fqr)]); 388 thread_unlock(curthread); 389 390 error = QM_FQR_Free(fqr); 391 392 thread_lock(curthread); 393 sched_unbind(curthread); 394 thread_unlock(curthread); 395 396 return (error); 397 } 398 399 t_Error 400 qman_fqr_register_cb(t_Handle fqr, t_QmReceivedFrameCallback *callback, 401 t_Handle app) 402 { 403 struct qman_softc *sc; 404 t_Error error; 405 t_Handle portal; 406 407 sc = qman_sc; 408 sched_pin(); 409 410 /* Ensure we have got QMan port initialized */ 411 portal = qman_portal_setup(sc); 412 if (portal == NULL) { 413 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 414 sched_unpin(); 415 return (E_NOT_SUPPORTED); 416 } 417 418 error = QM_FQR_RegisterCB(fqr, callback, app); 419 420 sched_unpin(); 421 422 return (error); 423 } 424 425 t_Error 426 qman_fqr_enqueue(t_Handle fqr, uint32_t fqid_off, t_DpaaFD *frame) 427 { 428 struct qman_softc *sc; 429 t_Error error; 430 t_Handle portal; 431 432 sc = qman_sc; 433 sched_pin(); 434 435 /* Ensure we have got QMan port initialized */ 436 portal = qman_portal_setup(sc); 437 if (portal == NULL) { 438 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 439 sched_unpin(); 440 return (E_NOT_SUPPORTED); 441 } 442 443 error = QM_FQR_Enqueue(fqr, portal, fqid_off, frame); 444 445 sched_unpin(); 446 447 return (error); 448 } 449 450 uint32_t 451 qman_fqr_get_counter(t_Handle fqr, uint32_t fqid_off, 452 e_QmFqrCounters counter) 453 { 454 struct qman_softc *sc; 455 uint32_t val; 456 t_Handle portal; 457 458 sc = qman_sc; 459 sched_pin(); 460 461 /* Ensure we have got QMan port initialized */ 462 portal = qman_portal_setup(sc); 463 if (portal == NULL) { 464 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 465 sched_unpin(); 466 return (0); 467 } 468 469 val = QM_FQR_GetCounter(fqr, portal, fqid_off, counter); 470 471 sched_unpin(); 472 473 return (val); 474 } 475 476 t_Error 477 qman_fqr_pull_frame(t_Handle fqr, uint32_t fqid_off, t_DpaaFD *frame) 478 { 479 struct qman_softc *sc; 480 t_Error error; 481 t_Handle portal; 482 483 sc = qman_sc; 484 sched_pin(); 485 486 /* Ensure we have got QMan port initialized */ 487 portal = qman_portal_setup(sc); 488 if (portal == NULL) { 489 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 490 sched_unpin(); 491 return (E_NOT_SUPPORTED); 492 } 493 494 error = QM_FQR_PullFrame(fqr, portal, fqid_off, frame); 495 496 sched_unpin(); 497 498 return (error); 499 } 500 501 uint32_t 502 qman_fqr_get_base_fqid(t_Handle fqr) 503 { 504 struct qman_softc *sc; 505 uint32_t val; 506 t_Handle portal; 507 508 sc = qman_sc; 509 sched_pin(); 510 511 /* Ensure we have got QMan port initialized */ 512 portal = qman_portal_setup(sc); 513 if (portal == NULL) { 514 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 515 sched_unpin(); 516 return (0); 517 } 518 519 val = QM_FQR_GetFqid(fqr); 520 521 sched_unpin(); 522 523 return (val); 524 } 525 526 t_Error 527 qman_poll(e_QmPortalPollSource source) 528 { 529 struct qman_softc *sc; 530 t_Error error; 531 t_Handle portal; 532 533 sc = qman_sc; 534 sched_pin(); 535 536 /* Ensure we have got QMan port initialized */ 537 portal = qman_portal_setup(sc); 538 if (portal == NULL) { 539 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 540 sched_unpin(); 541 return (E_NOT_SUPPORTED); 542 } 543 544 error = QM_Poll(sc->sc_qh, source); 545 546 sched_unpin(); 547 548 return (error); 549 } 550 551 /* 552 * TODO: add polling and/or congestion support. 553 */ 554 555 /** @} */ 556