1 /*- 2 * Copyright (c) 2011-2012 Semihalf. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/kernel.h> 31 #include <sys/bus.h> 32 #include <sys/lock.h> 33 #include <sys/module.h> 34 #include <sys/mutex.h> 35 #include <sys/proc.h> 36 #include <sys/pcpu.h> 37 #include <sys/rman.h> 38 #include <sys/sched.h> 39 #include <sys/smp.h> 40 41 #include <machine/bus.h> 42 #include <machine/resource.h> 43 #include <machine/tlb.h> 44 45 #include "qman.h" 46 #include "portals.h" 47 48 extern struct dpaa_portals_softc *qp_sc; 49 static struct qman_softc *qman_sc; 50 51 extern t_Handle qman_portal_setup(struct qman_softc *qsc); 52 53 static void 54 qman_exception(t_Handle app, e_QmExceptions exception) 55 { 56 struct qman_softc *sc; 57 const char *message; 58 59 sc = app; 60 61 switch (exception) { 62 case e_QM_EX_CORENET_INITIATOR_DATA: 63 message = "Initiator Data Error"; 64 break; 65 case e_QM_EX_CORENET_TARGET_DATA: 66 message = "CoreNet Target Data Error"; 67 break; 68 case e_QM_EX_CORENET_INVALID_TARGET_TRANSACTION: 69 message = "Invalid Target Transaction"; 70 break; 71 case e_QM_EX_PFDR_THRESHOLD: 72 message = "PFDR Low Watermark Interrupt"; 73 break; 74 case e_QM_EX_PFDR_ENQUEUE_BLOCKED: 75 message = "PFDR Enqueues Blocked Interrupt"; 76 break; 77 case e_QM_EX_SINGLE_ECC: 78 message = "Single Bit ECC Error Interrupt"; 79 break; 80 case e_QM_EX_MULTI_ECC: 81 message = "Multi Bit ECC Error Interrupt"; 82 break; 83 case e_QM_EX_INVALID_COMMAND: 84 message = "Invalid Command Verb Interrupt"; 85 break; 86 case e_QM_EX_DEQUEUE_DCP: 87 message = "Invalid Dequeue Direct Connect Portal Interrupt"; 88 break; 89 case e_QM_EX_DEQUEUE_FQ: 90 message = "Invalid Dequeue FQ Interrupt"; 91 break; 92 case e_QM_EX_DEQUEUE_SOURCE: 93 message = "Invalid Dequeue Source Interrupt"; 94 break; 95 case e_QM_EX_DEQUEUE_QUEUE: 96 message = "Invalid Dequeue Queue Interrupt"; 97 break; 98 case e_QM_EX_ENQUEUE_OVERFLOW: 99 message = "Invalid Enqueue Overflow Interrupt"; 100 break; 101 case e_QM_EX_ENQUEUE_STATE: 102 message = "Invalid Enqueue State Interrupt"; 103 break; 104 case e_QM_EX_ENQUEUE_CHANNEL: 105 message = "Invalid Enqueue Channel Interrupt"; 106 break; 107 case e_QM_EX_ENQUEUE_QUEUE: 108 message = "Invalid Enqueue Queue Interrupt"; 109 break; 110 case e_QM_EX_CG_STATE_CHANGE: 111 message = "CG change state notification"; 112 break; 113 default: 114 message = "Unknown error"; 115 } 116 117 device_printf(sc->sc_dev, "QMan Exception: %s.\n", message); 118 } 119 120 /** 121 * General received frame callback. 122 * This is called, when user did not register his own callback for a given 123 * frame queue range (fqr). 124 */ 125 e_RxStoreResponse 126 qman_received_frame_callback(t_Handle app, t_Handle qm_fqr, t_Handle qm_portal, 127 uint32_t fqid_offset, t_DpaaFD *frame) 128 { 129 struct qman_softc *sc; 130 131 sc = app; 132 133 device_printf(sc->sc_dev, "dummy callback for received frame.\n"); 134 return (e_RX_STORE_RESPONSE_CONTINUE); 135 } 136 137 /** 138 * General rejected frame callback. 139 * This is called, when user did not register his own callback for a given 140 * frame queue range (fqr). 141 */ 142 e_RxStoreResponse 143 qman_rejected_frame_callback(t_Handle app, t_Handle qm_fqr, t_Handle qm_portal, 144 uint32_t fqid_offset, t_DpaaFD *frame, 145 t_QmRejectedFrameInfo *qm_rejected_frame_info) 146 { 147 struct qman_softc *sc; 148 149 sc = app; 150 151 device_printf(sc->sc_dev, "dummy callback for rejected frame.\n"); 152 return (e_RX_STORE_RESPONSE_CONTINUE); 153 } 154 155 int 156 qman_attach(device_t dev) 157 { 158 struct qman_softc *sc; 159 t_QmParam qp; 160 t_Error error; 161 t_QmRevisionInfo rev; 162 163 sc = device_get_softc(dev); 164 sc->sc_dev = dev; 165 qman_sc = sc; 166 167 if (XX_MallocSmartInit() != E_OK) { 168 device_printf(dev, "could not initialize smart allocator.\n"); 169 return (ENXIO); 170 } 171 172 sched_pin(); 173 174 /* Allocate resources */ 175 sc->sc_rrid = 0; 176 sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY, 177 &sc->sc_rrid, 0, ~0, QMAN_CCSR_SIZE, RF_ACTIVE); 178 if (sc->sc_rres == NULL) { 179 device_printf(dev, "could not allocate memory.\n"); 180 goto err; 181 } 182 183 sc->sc_irid = 0; 184 sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, 185 &sc->sc_irid, RF_ACTIVE | RF_SHAREABLE); 186 if (sc->sc_ires == NULL) { 187 device_printf(dev, "could not allocate error interrupt.\n"); 188 goto err; 189 } 190 191 if (qp_sc == NULL) 192 goto err; 193 194 dpaa_portal_map_registers(qp_sc); 195 196 /* Initialize QMan */ 197 qp.guestId = NCSW_MASTER_ID; 198 qp.baseAddress = rman_get_bushandle(sc->sc_rres); 199 qp.swPortalsBaseAddress = rman_get_bushandle(qp_sc->sc_rres[0]); 200 qp.liodn = 0; 201 qp.totalNumOfFqids = QMAN_MAX_FQIDS; 202 qp.fqdMemPartitionId = NCSW_MASTER_ID; 203 qp.pfdrMemPartitionId = NCSW_MASTER_ID; 204 qp.f_Exception = qman_exception; 205 qp.h_App = sc; 206 qp.errIrq = (uintptr_t)sc->sc_ires; 207 qp.partFqidBase = QMAN_FQID_BASE; 208 qp.partNumOfFqids = QMAN_MAX_FQIDS; 209 qp.partCgsBase = 0; 210 qp.partNumOfCgs = 0; 211 212 sc->sc_qh = QM_Config(&qp); 213 if (sc->sc_qh == NULL) { 214 device_printf(dev, "could not be configured\n"); 215 goto err; 216 } 217 218 error = QM_Init(sc->sc_qh); 219 if (error != E_OK) { 220 device_printf(dev, "could not be initialized\n"); 221 goto err; 222 } 223 224 error = QM_GetRevision(sc->sc_qh, &rev); 225 if (error != E_OK) { 226 device_printf(dev, "could not get QMan revision\n"); 227 goto err; 228 } 229 230 device_printf(dev, "Hardware version: %d.%d.\n", 231 rev.majorRev, rev.minorRev); 232 233 sched_unpin(); 234 235 qman_portal_setup(sc); 236 237 return (0); 238 239 err: 240 sched_unpin(); 241 qman_detach(dev); 242 return (ENXIO); 243 } 244 245 int 246 qman_detach(device_t dev) 247 { 248 struct qman_softc *sc; 249 250 sc = device_get_softc(dev); 251 252 if (sc->sc_qh) 253 QM_Free(sc->sc_qh); 254 255 if (sc->sc_ires != NULL) 256 XX_DeallocIntr((uintptr_t)sc->sc_ires); 257 258 if (sc->sc_ires != NULL) 259 bus_release_resource(dev, SYS_RES_IRQ, 260 sc->sc_irid, sc->sc_ires); 261 262 if (sc->sc_rres != NULL) 263 bus_release_resource(dev, SYS_RES_MEMORY, 264 sc->sc_rrid, sc->sc_rres); 265 266 return (0); 267 } 268 269 int 270 qman_suspend(device_t dev) 271 { 272 273 return (0); 274 } 275 276 int 277 qman_resume(device_t dev) 278 { 279 280 return (0); 281 } 282 283 int 284 qman_shutdown(device_t dev) 285 { 286 287 return (0); 288 } 289 290 291 /** 292 * @group QMan API functions implementation. 293 * @{ 294 */ 295 296 t_Handle 297 qman_fqr_create(uint32_t fqids_num, e_QmFQChannel channel, uint8_t wq, 298 bool force_fqid, uint32_t fqid_or_align, bool init_parked, 299 bool hold_active, bool prefer_in_cache, bool congst_avoid_ena, 300 t_Handle congst_group, int8_t overhead_accounting_len, 301 uint32_t tail_drop_threshold) 302 { 303 struct qman_softc *sc; 304 t_QmFqrParams fqr; 305 t_Handle fqrh, portal; 306 307 sc = qman_sc; 308 309 sched_pin(); 310 311 /* Ensure we have got QMan port initialized */ 312 portal = qman_portal_setup(sc); 313 if (portal == NULL) { 314 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 315 goto err; 316 } 317 318 fqr.h_Qm = sc->sc_qh; 319 fqr.h_QmPortal = portal; 320 fqr.initParked = init_parked; 321 fqr.holdActive = hold_active; 322 fqr.preferInCache = prefer_in_cache; 323 324 /* We do not support stashing */ 325 fqr.useContextAForStash = FALSE; 326 fqr.p_ContextA = 0; 327 fqr.p_ContextB = 0; 328 329 fqr.channel = channel; 330 fqr.wq = wq; 331 fqr.shadowMode = FALSE; 332 fqr.numOfFqids = fqids_num; 333 334 /* FQID */ 335 fqr.useForce = force_fqid; 336 if (force_fqid) { 337 fqr.qs.frcQ.fqid = fqid_or_align; 338 } else { 339 fqr.qs.nonFrcQs.align = fqid_or_align; 340 } 341 342 /* Congestion Avoidance */ 343 fqr.congestionAvoidanceEnable = congst_avoid_ena; 344 if (congst_avoid_ena) { 345 fqr.congestionAvoidanceParams.h_QmCg = congst_group; 346 fqr.congestionAvoidanceParams.overheadAccountingLength = 347 overhead_accounting_len; 348 fqr.congestionAvoidanceParams.fqTailDropThreshold = 349 tail_drop_threshold; 350 } else { 351 fqr.congestionAvoidanceParams.h_QmCg = 0; 352 fqr.congestionAvoidanceParams.overheadAccountingLength = 0; 353 fqr.congestionAvoidanceParams.fqTailDropThreshold = 0; 354 } 355 356 fqrh = QM_FQR_Create(&fqr); 357 if (fqrh == NULL) { 358 device_printf(sc->sc_dev, "could not create Frame Queue Range" 359 "\n"); 360 goto err; 361 } 362 363 sc->sc_fqr_cpu[QM_FQR_GetFqid(fqrh)] = PCPU_GET(cpuid); 364 365 sched_unpin(); 366 367 return (fqrh); 368 369 err: 370 sched_unpin(); 371 372 return (NULL); 373 } 374 375 t_Error 376 qman_fqr_free(t_Handle fqr) 377 { 378 struct qman_softc *sc; 379 t_Error error; 380 381 sc = qman_sc; 382 thread_lock(curthread); 383 sched_bind(curthread, sc->sc_fqr_cpu[QM_FQR_GetFqid(fqr)]); 384 thread_unlock(curthread); 385 386 error = QM_FQR_Free(fqr); 387 388 thread_lock(curthread); 389 sched_unbind(curthread); 390 thread_unlock(curthread); 391 392 return (error); 393 } 394 395 t_Error 396 qman_fqr_register_cb(t_Handle fqr, t_QmReceivedFrameCallback *callback, 397 t_Handle app) 398 { 399 struct qman_softc *sc; 400 t_Error error; 401 t_Handle portal; 402 403 sc = qman_sc; 404 sched_pin(); 405 406 /* Ensure we have got QMan port initialized */ 407 portal = qman_portal_setup(sc); 408 if (portal == NULL) { 409 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 410 sched_unpin(); 411 return (E_NOT_SUPPORTED); 412 } 413 414 error = QM_FQR_RegisterCB(fqr, callback, app); 415 416 sched_unpin(); 417 418 return (error); 419 } 420 421 t_Error 422 qman_fqr_enqueue(t_Handle fqr, uint32_t fqid_off, t_DpaaFD *frame) 423 { 424 struct qman_softc *sc; 425 t_Error error; 426 t_Handle portal; 427 428 sc = qman_sc; 429 sched_pin(); 430 431 /* Ensure we have got QMan port initialized */ 432 portal = qman_portal_setup(sc); 433 if (portal == NULL) { 434 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 435 sched_unpin(); 436 return (E_NOT_SUPPORTED); 437 } 438 439 error = QM_FQR_Enqueue(fqr, portal, fqid_off, frame); 440 441 sched_unpin(); 442 443 return (error); 444 } 445 446 uint32_t 447 qman_fqr_get_counter(t_Handle fqr, uint32_t fqid_off, 448 e_QmFqrCounters counter) 449 { 450 struct qman_softc *sc; 451 uint32_t val; 452 t_Handle portal; 453 454 sc = qman_sc; 455 sched_pin(); 456 457 /* Ensure we have got QMan port initialized */ 458 portal = qman_portal_setup(sc); 459 if (portal == NULL) { 460 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 461 sched_unpin(); 462 return (0); 463 } 464 465 val = QM_FQR_GetCounter(fqr, portal, fqid_off, counter); 466 467 sched_unpin(); 468 469 return (val); 470 } 471 472 t_Error 473 qman_fqr_pull_frame(t_Handle fqr, uint32_t fqid_off, t_DpaaFD *frame) 474 { 475 struct qman_softc *sc; 476 t_Error error; 477 t_Handle portal; 478 479 sc = qman_sc; 480 sched_pin(); 481 482 /* Ensure we have got QMan port initialized */ 483 portal = qman_portal_setup(sc); 484 if (portal == NULL) { 485 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 486 sched_unpin(); 487 return (E_NOT_SUPPORTED); 488 } 489 490 error = QM_FQR_PullFrame(fqr, portal, fqid_off, frame); 491 492 sched_unpin(); 493 494 return (error); 495 } 496 497 uint32_t 498 qman_fqr_get_base_fqid(t_Handle fqr) 499 { 500 struct qman_softc *sc; 501 uint32_t val; 502 t_Handle portal; 503 504 sc = qman_sc; 505 sched_pin(); 506 507 /* Ensure we have got QMan port initialized */ 508 portal = qman_portal_setup(sc); 509 if (portal == NULL) { 510 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 511 sched_unpin(); 512 return (0); 513 } 514 515 val = QM_FQR_GetFqid(fqr); 516 517 sched_unpin(); 518 519 return (val); 520 } 521 522 t_Error 523 qman_poll(e_QmPortalPollSource source) 524 { 525 struct qman_softc *sc; 526 t_Error error; 527 t_Handle portal; 528 529 sc = qman_sc; 530 sched_pin(); 531 532 /* Ensure we have got QMan port initialized */ 533 portal = qman_portal_setup(sc); 534 if (portal == NULL) { 535 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 536 sched_unpin(); 537 return (E_NOT_SUPPORTED); 538 } 539 540 error = QM_Poll(sc->sc_qh, source); 541 542 sched_unpin(); 543 544 return (error); 545 } 546 547 /* 548 * TODO: add polling and/or congestion support. 549 */ 550 551 /** @} */ 552