1 /*- 2 * Copyright (c) 2011-2012 Semihalf. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/bus.h> 34 #include <sys/lock.h> 35 #include <sys/module.h> 36 #include <sys/mutex.h> 37 #include <sys/proc.h> 38 #include <sys/pcpu.h> 39 #include <sys/rman.h> 40 #include <sys/sched.h> 41 #include <sys/smp.h> 42 43 #include <machine/bus.h> 44 #include <machine/resource.h> 45 #include <machine/tlb.h> 46 47 #include "qman.h" 48 #include "portals.h" 49 50 extern struct dpaa_portals_softc *qp_sc; 51 static struct qman_softc *qman_sc; 52 53 extern t_Handle qman_portal_setup(struct qman_softc *qsc); 54 55 static void 56 qman_exception(t_Handle app, e_QmExceptions exception) 57 { 58 struct qman_softc *sc; 59 const char *message; 60 61 sc = app; 62 63 switch (exception) { 64 case e_QM_EX_CORENET_INITIATOR_DATA: 65 message = "Initiator Data Error"; 66 break; 67 case e_QM_EX_CORENET_TARGET_DATA: 68 message = "CoreNet Target Data Error"; 69 break; 70 case e_QM_EX_CORENET_INVALID_TARGET_TRANSACTION: 71 message = "Invalid Target Transaction"; 72 break; 73 case e_QM_EX_PFDR_THRESHOLD: 74 message = "PFDR Low Watermark Interrupt"; 75 break; 76 case e_QM_EX_PFDR_ENQUEUE_BLOCKED: 77 message = "PFDR Enqueues Blocked Interrupt"; 78 break; 79 case e_QM_EX_SINGLE_ECC: 80 message = "Single Bit ECC Error Interrupt"; 81 break; 82 case e_QM_EX_MULTI_ECC: 83 message = "Multi Bit ECC Error Interrupt"; 84 break; 85 case e_QM_EX_INVALID_COMMAND: 86 message = "Invalid Command Verb Interrupt"; 87 break; 88 case e_QM_EX_DEQUEUE_DCP: 89 message = "Invalid Dequeue Direct Connect Portal Interrupt"; 90 break; 91 case e_QM_EX_DEQUEUE_FQ: 92 message = "Invalid Dequeue FQ Interrupt"; 93 break; 94 case e_QM_EX_DEQUEUE_SOURCE: 95 message = "Invalid Dequeue Source Interrupt"; 96 break; 97 case e_QM_EX_DEQUEUE_QUEUE: 98 message = "Invalid Dequeue Queue Interrupt"; 99 break; 100 case e_QM_EX_ENQUEUE_OVERFLOW: 101 message = "Invalid Enqueue Overflow Interrupt"; 102 break; 103 case e_QM_EX_ENQUEUE_STATE: 104 message = "Invalid Enqueue State Interrupt"; 105 break; 106 case e_QM_EX_ENQUEUE_CHANNEL: 107 message = "Invalid Enqueue Channel Interrupt"; 108 break; 109 case e_QM_EX_ENQUEUE_QUEUE: 110 message = "Invalid Enqueue Queue Interrupt"; 111 break; 112 case e_QM_EX_CG_STATE_CHANGE: 113 message = "CG change state notification"; 114 break; 115 default: 116 message = "Unknown error"; 117 } 118 119 device_printf(sc->sc_dev, "QMan Exception: %s.\n", message); 120 } 121 122 /** 123 * General received frame callback. 124 * This is called, when user did not register his own callback for a given 125 * frame queue range (fqr). 126 */ 127 e_RxStoreResponse 128 qman_received_frame_callback(t_Handle app, t_Handle qm_fqr, t_Handle qm_portal, 129 uint32_t fqid_offset, t_DpaaFD *frame) 130 { 131 struct qman_softc *sc; 132 133 sc = app; 134 135 device_printf(sc->sc_dev, "dummy callback for received frame.\n"); 136 return (e_RX_STORE_RESPONSE_CONTINUE); 137 } 138 139 /** 140 * General rejected frame callback. 141 * This is called, when user did not register his own callback for a given 142 * frame queue range (fqr). 143 */ 144 e_RxStoreResponse 145 qman_rejected_frame_callback(t_Handle app, t_Handle qm_fqr, t_Handle qm_portal, 146 uint32_t fqid_offset, t_DpaaFD *frame, 147 t_QmRejectedFrameInfo *qm_rejected_frame_info) 148 { 149 struct qman_softc *sc; 150 151 sc = app; 152 153 device_printf(sc->sc_dev, "dummy callback for rejected frame.\n"); 154 return (e_RX_STORE_RESPONSE_CONTINUE); 155 } 156 157 int 158 qman_attach(device_t dev) 159 { 160 struct qman_softc *sc; 161 t_QmParam qp; 162 t_Error error; 163 t_QmRevisionInfo rev; 164 165 sc = device_get_softc(dev); 166 sc->sc_dev = dev; 167 qman_sc = sc; 168 169 if (XX_MallocSmartInit() != E_OK) { 170 device_printf(dev, "could not initialize smart allocator.\n"); 171 return (ENXIO); 172 } 173 174 sched_pin(); 175 176 /* Allocate resources */ 177 sc->sc_rrid = 0; 178 sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY, 179 &sc->sc_rrid, 0, ~0, QMAN_CCSR_SIZE, RF_ACTIVE); 180 if (sc->sc_rres == NULL) { 181 device_printf(dev, "could not allocate memory.\n"); 182 goto err; 183 } 184 185 sc->sc_irid = 0; 186 sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, 187 &sc->sc_irid, RF_ACTIVE | RF_SHAREABLE); 188 if (sc->sc_ires == NULL) { 189 device_printf(dev, "could not allocate error interrupt.\n"); 190 goto err; 191 } 192 193 if (qp_sc == NULL) 194 goto err; 195 196 dpaa_portal_map_registers(qp_sc); 197 198 /* Initialize QMan */ 199 qp.guestId = NCSW_MASTER_ID; 200 qp.baseAddress = rman_get_bushandle(sc->sc_rres); 201 qp.swPortalsBaseAddress = rman_get_bushandle(qp_sc->sc_rres[0]); 202 qp.liodn = 0; 203 qp.totalNumOfFqids = QMAN_MAX_FQIDS; 204 qp.fqdMemPartitionId = NCSW_MASTER_ID; 205 qp.pfdrMemPartitionId = NCSW_MASTER_ID; 206 qp.f_Exception = qman_exception; 207 qp.h_App = sc; 208 qp.errIrq = (uintptr_t)sc->sc_ires; 209 qp.partFqidBase = QMAN_FQID_BASE; 210 qp.partNumOfFqids = QMAN_MAX_FQIDS; 211 qp.partCgsBase = 0; 212 qp.partNumOfCgs = 0; 213 214 sc->sc_qh = QM_Config(&qp); 215 if (sc->sc_qh == NULL) { 216 device_printf(dev, "could not be configured\n"); 217 goto err; 218 } 219 220 error = QM_Init(sc->sc_qh); 221 if (error != E_OK) { 222 device_printf(dev, "could not be initialized\n"); 223 goto err; 224 } 225 226 error = QM_GetRevision(sc->sc_qh, &rev); 227 if (error != E_OK) { 228 device_printf(dev, "could not get QMan revision\n"); 229 goto err; 230 } 231 232 device_printf(dev, "Hardware version: %d.%d.\n", 233 rev.majorRev, rev.minorRev); 234 235 sched_unpin(); 236 237 qman_portal_setup(sc); 238 239 return (0); 240 241 err: 242 sched_unpin(); 243 qman_detach(dev); 244 return (ENXIO); 245 } 246 247 int 248 qman_detach(device_t dev) 249 { 250 struct qman_softc *sc; 251 252 sc = device_get_softc(dev); 253 254 if (sc->sc_qh) 255 QM_Free(sc->sc_qh); 256 257 if (sc->sc_ires != NULL) 258 XX_DeallocIntr((uintptr_t)sc->sc_ires); 259 260 if (sc->sc_ires != NULL) 261 bus_release_resource(dev, SYS_RES_IRQ, 262 sc->sc_irid, sc->sc_ires); 263 264 if (sc->sc_rres != NULL) 265 bus_release_resource(dev, SYS_RES_MEMORY, 266 sc->sc_rrid, sc->sc_rres); 267 268 return (0); 269 } 270 271 int 272 qman_suspend(device_t dev) 273 { 274 275 return (0); 276 } 277 278 int 279 qman_resume(device_t dev) 280 { 281 282 return (0); 283 } 284 285 int 286 qman_shutdown(device_t dev) 287 { 288 289 return (0); 290 } 291 292 293 /** 294 * @group QMan API functions implementation. 295 * @{ 296 */ 297 298 t_Handle 299 qman_fqr_create(uint32_t fqids_num, e_QmFQChannel channel, uint8_t wq, 300 bool force_fqid, uint32_t fqid_or_align, bool init_parked, 301 bool hold_active, bool prefer_in_cache, bool congst_avoid_ena, 302 t_Handle congst_group, int8_t overhead_accounting_len, 303 uint32_t tail_drop_threshold) 304 { 305 struct qman_softc *sc; 306 t_QmFqrParams fqr; 307 t_Handle fqrh, portal; 308 309 sc = qman_sc; 310 311 sched_pin(); 312 313 /* Ensure we have got QMan port initialized */ 314 portal = qman_portal_setup(sc); 315 if (portal == NULL) { 316 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 317 goto err; 318 } 319 320 fqr.h_Qm = sc->sc_qh; 321 fqr.h_QmPortal = portal; 322 fqr.initParked = init_parked; 323 fqr.holdActive = hold_active; 324 fqr.preferInCache = prefer_in_cache; 325 326 /* We do not support stashing */ 327 fqr.useContextAForStash = FALSE; 328 fqr.p_ContextA = 0; 329 fqr.p_ContextB = 0; 330 331 fqr.channel = channel; 332 fqr.wq = wq; 333 fqr.shadowMode = FALSE; 334 fqr.numOfFqids = fqids_num; 335 336 /* FQID */ 337 fqr.useForce = force_fqid; 338 if (force_fqid) { 339 fqr.qs.frcQ.fqid = fqid_or_align; 340 } else { 341 fqr.qs.nonFrcQs.align = fqid_or_align; 342 } 343 344 /* Congestion Avoidance */ 345 fqr.congestionAvoidanceEnable = congst_avoid_ena; 346 if (congst_avoid_ena) { 347 fqr.congestionAvoidanceParams.h_QmCg = congst_group; 348 fqr.congestionAvoidanceParams.overheadAccountingLength = 349 overhead_accounting_len; 350 fqr.congestionAvoidanceParams.fqTailDropThreshold = 351 tail_drop_threshold; 352 } else { 353 fqr.congestionAvoidanceParams.h_QmCg = 0; 354 fqr.congestionAvoidanceParams.overheadAccountingLength = 0; 355 fqr.congestionAvoidanceParams.fqTailDropThreshold = 0; 356 } 357 358 fqrh = QM_FQR_Create(&fqr); 359 if (fqrh == NULL) { 360 device_printf(sc->sc_dev, "could not create Frame Queue Range" 361 "\n"); 362 goto err; 363 } 364 365 sc->sc_fqr_cpu[QM_FQR_GetFqid(fqrh)] = PCPU_GET(cpuid); 366 367 sched_unpin(); 368 369 return (fqrh); 370 371 err: 372 sched_unpin(); 373 374 return (NULL); 375 } 376 377 t_Error 378 qman_fqr_free(t_Handle fqr) 379 { 380 struct qman_softc *sc; 381 t_Error error; 382 383 sc = qman_sc; 384 thread_lock(curthread); 385 sched_bind(curthread, sc->sc_fqr_cpu[QM_FQR_GetFqid(fqr)]); 386 thread_unlock(curthread); 387 388 error = QM_FQR_Free(fqr); 389 390 thread_lock(curthread); 391 sched_unbind(curthread); 392 thread_unlock(curthread); 393 394 return (error); 395 } 396 397 t_Error 398 qman_fqr_register_cb(t_Handle fqr, t_QmReceivedFrameCallback *callback, 399 t_Handle app) 400 { 401 struct qman_softc *sc; 402 t_Error error; 403 t_Handle portal; 404 405 sc = qman_sc; 406 sched_pin(); 407 408 /* Ensure we have got QMan port initialized */ 409 portal = qman_portal_setup(sc); 410 if (portal == NULL) { 411 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 412 sched_unpin(); 413 return (E_NOT_SUPPORTED); 414 } 415 416 error = QM_FQR_RegisterCB(fqr, callback, app); 417 418 sched_unpin(); 419 420 return (error); 421 } 422 423 t_Error 424 qman_fqr_enqueue(t_Handle fqr, uint32_t fqid_off, t_DpaaFD *frame) 425 { 426 struct qman_softc *sc; 427 t_Error error; 428 t_Handle portal; 429 430 sc = qman_sc; 431 sched_pin(); 432 433 /* Ensure we have got QMan port initialized */ 434 portal = qman_portal_setup(sc); 435 if (portal == NULL) { 436 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 437 sched_unpin(); 438 return (E_NOT_SUPPORTED); 439 } 440 441 error = QM_FQR_Enqueue(fqr, portal, fqid_off, frame); 442 443 sched_unpin(); 444 445 return (error); 446 } 447 448 uint32_t 449 qman_fqr_get_counter(t_Handle fqr, uint32_t fqid_off, 450 e_QmFqrCounters counter) 451 { 452 struct qman_softc *sc; 453 uint32_t val; 454 t_Handle portal; 455 456 sc = qman_sc; 457 sched_pin(); 458 459 /* Ensure we have got QMan port initialized */ 460 portal = qman_portal_setup(sc); 461 if (portal == NULL) { 462 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 463 sched_unpin(); 464 return (0); 465 } 466 467 val = QM_FQR_GetCounter(fqr, portal, fqid_off, counter); 468 469 sched_unpin(); 470 471 return (val); 472 } 473 474 t_Error 475 qman_fqr_pull_frame(t_Handle fqr, uint32_t fqid_off, t_DpaaFD *frame) 476 { 477 struct qman_softc *sc; 478 t_Error error; 479 t_Handle portal; 480 481 sc = qman_sc; 482 sched_pin(); 483 484 /* Ensure we have got QMan port initialized */ 485 portal = qman_portal_setup(sc); 486 if (portal == NULL) { 487 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 488 sched_unpin(); 489 return (E_NOT_SUPPORTED); 490 } 491 492 error = QM_FQR_PullFrame(fqr, portal, fqid_off, frame); 493 494 sched_unpin(); 495 496 return (error); 497 } 498 499 uint32_t 500 qman_fqr_get_base_fqid(t_Handle fqr) 501 { 502 struct qman_softc *sc; 503 uint32_t val; 504 t_Handle portal; 505 506 sc = qman_sc; 507 sched_pin(); 508 509 /* Ensure we have got QMan port initialized */ 510 portal = qman_portal_setup(sc); 511 if (portal == NULL) { 512 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 513 sched_unpin(); 514 return (0); 515 } 516 517 val = QM_FQR_GetFqid(fqr); 518 519 sched_unpin(); 520 521 return (val); 522 } 523 524 t_Error 525 qman_poll(e_QmPortalPollSource source) 526 { 527 struct qman_softc *sc; 528 t_Error error; 529 t_Handle portal; 530 531 sc = qman_sc; 532 sched_pin(); 533 534 /* Ensure we have got QMan port initialized */ 535 portal = qman_portal_setup(sc); 536 if (portal == NULL) { 537 device_printf(sc->sc_dev, "could not setup QMan portal\n"); 538 sched_unpin(); 539 return (E_NOT_SUPPORTED); 540 } 541 542 error = QM_Poll(sc->sc_qh, source); 543 544 sched_unpin(); 545 546 return (error); 547 } 548 549 /* 550 * TODO: add polling and/or congestion support. 551 */ 552 553 /** @} */ 554