1 /*- 2 * Copyright (c) 2011 Semihalf. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/param.h> 28 #include <sys/systm.h> 29 #include <sys/kernel.h> 30 #include <sys/malloc.h> 31 #include <sys/bus.h> 32 #include <sys/interrupt.h> 33 #include <sys/lock.h> 34 #include <sys/mutex.h> 35 #include <sys/proc.h> 36 #include <sys/queue.h> 37 #include <sys/rman.h> 38 #include <sys/sched.h> 39 #include <sys/smp.h> 40 41 #include <vm/vm.h> 42 #include <vm/vm_param.h> 43 #include <vm/vm_page.h> 44 45 #include <machine/cpufunc.h> 46 #include <machine/intr_machdep.h> 47 #include <machine/pmap.h> 48 #include <machine/stdarg.h> 49 50 #include <dev/dpaa/bman.h> 51 #include <dev/dpaa/qman.h> 52 #include <dev/dpaa/portals.h> 53 54 #include <powerpc/mpc85xx/mpc85xx.h> 55 #include "error_ext.h" 56 #include "std_ext.h" 57 #include "list_ext.h" 58 #include "mm_ext.h" 59 60 /* Configuration */ 61 62 /* Define the number of dTSEC ports active in system */ 63 #define MALLOCSMART_DTSEC_IN_USE 4 64 65 /* 66 * Calculate malloc's pool size for dTSEC's buffers. 67 * We reserve 1MB pool for each dTSEC port. 68 */ 69 #define MALLOCSMART_POOL_SIZE \ 70 (MALLOCSMART_DTSEC_IN_USE * 1024 * 1024) 71 72 #define MALLOCSMART_SLICE_SIZE (PAGE_SIZE / 2) /* 2kB */ 73 74 /* Defines */ 75 #define MALLOCSMART_SIZE_TO_SLICE(x) \ 76 (((x) + MALLOCSMART_SLICE_SIZE - 1) / MALLOCSMART_SLICE_SIZE) 77 #define MALLOCSMART_SLICES \ 78 MALLOCSMART_SIZE_TO_SLICE(MALLOCSMART_POOL_SIZE) 79 80 /* Malloc Pool for NetCommSW */ 81 MALLOC_DEFINE(M_NETCOMMSW, "NetCommSW", "NetCommSW software stack"); 82 MALLOC_DEFINE(M_NETCOMMSW_MT, "NetCommSWTrack", 83 "NetCommSW software allocation tracker"); 84 85 /* MallocSmart data structures */ 86 static void *XX_MallocSmartPool; 87 static int XX_MallocSmartMap[MALLOCSMART_SLICES]; 88 89 static struct mtx XX_MallocSmartLock; 90 static struct mtx XX_MallocTrackLock; 91 MTX_SYSINIT(XX_MallocSmartLockInit, &XX_MallocSmartLock, 92 "NetCommSW MallocSmart Lock", MTX_DEF); 93 MTX_SYSINIT(XX_MallocTrackLockInit, &XX_MallocTrackLock, 94 "NetCommSW MallocTrack Lock", MTX_DEF); 95 96 /* Interrupt info */ 97 #define XX_INTR_FLAG_PREALLOCATED (1 << 0) 98 #define XX_INTR_FLAG_BOUND (1 << 1) 99 #define XX_INTR_FLAG_FMAN_FIX (1 << 2) 100 101 struct XX_IntrInfo { 102 driver_intr_t *handler; 103 void *arg; 104 int cpu; 105 int flags; 106 void *cookie; 107 }; 108 109 static struct XX_IntrInfo XX_IntrInfo[INTR_VECTORS]; 110 /* Portal type identifiers */ 111 enum XX_PortalIdent{ 112 BM_PORTAL = 0, 113 QM_PORTAL, 114 }; 115 /* Structure to store portals' properties */ 116 struct XX_PortalInfo { 117 vm_paddr_t portal_ce_pa[2][MAXCPU]; 118 vm_paddr_t portal_ci_pa[2][MAXCPU]; 119 uint32_t portal_ce_size[2][MAXCPU]; 120 uint32_t portal_ci_size[2][MAXCPU]; 121 vm_offset_t portal_ce_va[2]; 122 vm_offset_t portal_ci_va[2]; 123 uintptr_t portal_intr[2][MAXCPU]; 124 }; 125 126 static struct XX_PortalInfo XX_PInfo; 127 128 void 129 XX_Exit(int status) 130 { 131 132 panic("NetCommSW: Exit called with status %i", status); 133 } 134 135 void 136 XX_Print(char *str, ...) 137 { 138 va_list ap; 139 140 va_start(ap, str); 141 vprintf(str, ap); 142 va_end(ap); 143 } 144 145 void * 146 XX_Malloc(uint32_t size) 147 { 148 void *p = (malloc(size, M_NETCOMMSW, M_NOWAIT)); 149 150 return (p); 151 } 152 153 static int 154 XX_MallocSmartMapCheck(unsigned int start, unsigned int slices) 155 { 156 unsigned int i; 157 158 mtx_assert(&XX_MallocSmartLock, MA_OWNED); 159 for (i = start; i < start + slices; i++) 160 if (XX_MallocSmartMap[i]) 161 return (FALSE); 162 return (TRUE); 163 } 164 165 static void 166 XX_MallocSmartMapSet(unsigned int start, unsigned int slices) 167 { 168 unsigned int i; 169 170 mtx_assert(&XX_MallocSmartLock, MA_OWNED); 171 172 for (i = start; i < start + slices; i++) 173 XX_MallocSmartMap[i] = ((i == start) ? slices : -1); 174 } 175 176 static void 177 XX_MallocSmartMapClear(unsigned int start, unsigned int slices) 178 { 179 unsigned int i; 180 181 mtx_assert(&XX_MallocSmartLock, MA_OWNED); 182 183 for (i = start; i < start + slices; i++) 184 XX_MallocSmartMap[i] = 0; 185 } 186 187 int 188 XX_MallocSmartInit(void) 189 { 190 int error; 191 192 error = E_OK; 193 mtx_lock(&XX_MallocSmartLock); 194 195 if (XX_MallocSmartPool) 196 goto out; 197 198 /* Allocate MallocSmart pool */ 199 XX_MallocSmartPool = contigmalloc(MALLOCSMART_POOL_SIZE, M_NETCOMMSW, 200 M_NOWAIT, 0, 0xFFFFFFFFFull, MALLOCSMART_POOL_SIZE, 0); 201 if (!XX_MallocSmartPool) { 202 error = E_NO_MEMORY; 203 goto out; 204 } 205 206 out: 207 mtx_unlock(&XX_MallocSmartLock); 208 return (error); 209 } 210 211 void * 212 XX_MallocSmart(uint32_t size, int memPartitionId, uint32_t alignment) 213 { 214 unsigned int i; 215 vm_offset_t addr; 216 217 addr = 0; 218 219 /* Convert alignment and size to number of slices */ 220 alignment = MALLOCSMART_SIZE_TO_SLICE(alignment); 221 size = MALLOCSMART_SIZE_TO_SLICE(size); 222 223 /* Lock resources */ 224 mtx_lock(&XX_MallocSmartLock); 225 226 /* Allocate region */ 227 for (i = 0; i + size <= MALLOCSMART_SLICES; i += alignment) { 228 if (XX_MallocSmartMapCheck(i, size)) { 229 XX_MallocSmartMapSet(i, size); 230 addr = (vm_offset_t)XX_MallocSmartPool + 231 (i * MALLOCSMART_SLICE_SIZE); 232 break; 233 } 234 } 235 236 /* Unlock resources */ 237 mtx_unlock(&XX_MallocSmartLock); 238 239 return ((void *)addr); 240 } 241 242 void 243 XX_FreeSmart(void *p) 244 { 245 unsigned int start, slices; 246 247 /* Calculate first slice of region */ 248 start = MALLOCSMART_SIZE_TO_SLICE((vm_offset_t)(p) - 249 (vm_offset_t)XX_MallocSmartPool); 250 251 /* Lock resources */ 252 mtx_lock(&XX_MallocSmartLock); 253 254 KASSERT(XX_MallocSmartMap[start] > 0, 255 ("XX_FreeSmart: Double or mid-block free!\n")); 256 257 /* Free region */ 258 slices = XX_MallocSmartMap[start]; 259 XX_MallocSmartMapClear(start, slices); 260 261 /* Unlock resources */ 262 mtx_unlock(&XX_MallocSmartLock); 263 } 264 265 void 266 XX_Free(void *p) 267 { 268 269 free(p, M_NETCOMMSW); 270 } 271 272 uint32_t 273 XX_DisableAllIntr(void) 274 { 275 276 return (intr_disable()); 277 } 278 279 void 280 XX_RestoreAllIntr(uint32_t flags) 281 { 282 283 intr_restore(flags); 284 } 285 286 static bool 287 XX_IsPortalIntr(uintptr_t irq) 288 { 289 int cpu, type; 290 /* Check interrupt numbers of all available portals */ 291 for (type = 0; type < 2; type++) 292 for (cpu = 0; cpu < MAXCPU; cpu++) 293 if (irq == XX_PInfo.portal_intr[type][cpu]) 294 return (1); 295 296 return (0); 297 } 298 299 void 300 XX_FmanFixIntr(int irq) 301 { 302 303 XX_IntrInfo[irq].flags |= XX_INTR_FLAG_FMAN_FIX; 304 } 305 306 static bool 307 XX_FmanNeedsIntrFix(int irq) 308 { 309 310 if (XX_IntrInfo[irq].flags & XX_INTR_FLAG_FMAN_FIX) 311 return (1); 312 313 return (0); 314 } 315 316 static void 317 XX_Dispatch(void *arg) 318 { 319 struct XX_IntrInfo *info; 320 321 info = arg; 322 323 /* Bind this thread to proper CPU when SMP has been already started. */ 324 if ((info->flags & XX_INTR_FLAG_BOUND) == 0 && smp_started && 325 info->cpu >= 0) { 326 thread_lock(curthread); 327 sched_bind(curthread, info->cpu); 328 thread_unlock(curthread); 329 330 info->flags |= XX_INTR_FLAG_BOUND; 331 } 332 333 if (info->handler == NULL) { 334 printf("%s(): IRQ handler is NULL!\n", __func__); 335 return; 336 } 337 338 info->handler(info->arg); 339 } 340 341 t_Error 342 XX_PreallocAndBindIntr(uintptr_t irq, unsigned int cpu) 343 { 344 struct resource *r; 345 unsigned int inum; 346 t_Error error; 347 348 r = (struct resource *)irq; 349 inum = rman_get_start(r); 350 351 error = XX_SetIntr(irq, XX_Dispatch, &XX_IntrInfo[inum]); 352 if (error != 0) 353 return (error); 354 355 XX_IntrInfo[inum].flags = XX_INTR_FLAG_PREALLOCATED; 356 XX_IntrInfo[inum].cpu = cpu; 357 358 return (E_OK); 359 } 360 361 t_Error 362 XX_DeallocIntr(uintptr_t irq) 363 { 364 struct resource *r; 365 unsigned int inum; 366 367 r = (struct resource *)irq; 368 inum = rman_get_start(r); 369 370 if ((XX_IntrInfo[inum].flags & XX_INTR_FLAG_PREALLOCATED) == 0) 371 return (E_INVALID_STATE); 372 373 XX_IntrInfo[inum].flags = 0; 374 return (XX_FreeIntr(irq)); 375 } 376 377 t_Error 378 XX_SetIntr(uintptr_t irq, t_Isr *f_Isr, t_Handle handle) 379 { 380 device_t dev; 381 struct resource *r; 382 unsigned int flags; 383 int err; 384 385 r = (struct resource *)irq; 386 dev = rman_get_device(r); 387 irq = rman_get_start(r); 388 389 /* Handle preallocated interrupts */ 390 if (XX_IntrInfo[irq].flags & XX_INTR_FLAG_PREALLOCATED) { 391 if (XX_IntrInfo[irq].handler != NULL) 392 return (E_BUSY); 393 394 XX_IntrInfo[irq].handler = f_Isr; 395 XX_IntrInfo[irq].arg = handle; 396 397 return (E_OK); 398 } 399 400 flags = INTR_TYPE_NET | INTR_MPSAFE; 401 402 /* BMAN/QMAN Portal interrupts must be exlusive */ 403 if (XX_IsPortalIntr(irq)) 404 flags |= INTR_EXCL; 405 406 err = bus_setup_intr(dev, r, flags, NULL, f_Isr, handle, 407 &XX_IntrInfo[irq].cookie); 408 if (err) 409 goto finish; 410 411 /* 412 * XXX: Bind FMan IRQ to CPU0. Current interrupt subsystem directs each 413 * interrupt to all CPUs. Race between an interrupt assertion and 414 * masking may occur and interrupt handler may be called multiple times 415 * per one interrupt. FMan doesn't support such a situation. Workaround 416 * is to bind FMan interrupt to one CPU0 only. 417 */ 418 #ifdef SMP 419 if (XX_FmanNeedsIntrFix(irq)) 420 err = powerpc_bind_intr(irq, 0); 421 #endif 422 finish: 423 return (err); 424 } 425 426 t_Error 427 XX_FreeIntr(uintptr_t irq) 428 { 429 device_t dev; 430 struct resource *r; 431 432 r = (struct resource *)irq; 433 dev = rman_get_device(r); 434 irq = rman_get_start(r); 435 436 /* Handle preallocated interrupts */ 437 if (XX_IntrInfo[irq].flags & XX_INTR_FLAG_PREALLOCATED) { 438 if (XX_IntrInfo[irq].handler == NULL) 439 return (E_INVALID_STATE); 440 441 XX_IntrInfo[irq].handler = NULL; 442 XX_IntrInfo[irq].arg = NULL; 443 444 return (E_OK); 445 } 446 447 return (bus_teardown_intr(dev, r, XX_IntrInfo[irq].cookie)); 448 } 449 450 t_Error 451 XX_EnableIntr(uintptr_t irq) 452 { 453 struct resource *r; 454 455 r = (struct resource *)irq; 456 irq = rman_get_start(r); 457 458 powerpc_intr_unmask(irq); 459 460 return (E_OK); 461 } 462 463 t_Error 464 XX_DisableIntr(uintptr_t irq) 465 { 466 struct resource *r; 467 468 r = (struct resource *)irq; 469 irq = rman_get_start(r); 470 471 powerpc_intr_mask(irq); 472 473 return (E_OK); 474 } 475 476 t_TaskletHandle 477 XX_InitTasklet (void (*routine)(void *), void *data) 478 { 479 /* Not referenced */ 480 printf("NetCommSW: Unimplemented function %s() called!\n", __func__); 481 return (NULL); 482 } 483 484 485 void 486 XX_FreeTasklet (t_TaskletHandle h_Tasklet) 487 { 488 /* Not referenced */ 489 printf("NetCommSW: Unimplemented function %s() called!\n", __func__); 490 } 491 492 int 493 XX_ScheduleTask(t_TaskletHandle h_Tasklet, int immediate) 494 { 495 /* Not referenced */ 496 printf("NetCommSW: Unimplemented function %s() called!\n", __func__); 497 return (0); 498 } 499 500 void 501 XX_FlushScheduledTasks(void) 502 { 503 /* Not referenced */ 504 printf("NetCommSW: Unimplemented function %s() called!\n", __func__); 505 } 506 507 int 508 XX_TaskletIsQueued(t_TaskletHandle h_Tasklet) 509 { 510 /* Not referenced */ 511 printf("NetCommSW: Unimplemented function %s() called!\n", __func__); 512 return (0); 513 } 514 515 void 516 XX_SetTaskletData(t_TaskletHandle h_Tasklet, t_Handle data) 517 { 518 /* Not referenced */ 519 printf("NetCommSW: Unimplemented function %s() called!\n", __func__); 520 } 521 522 t_Handle 523 XX_GetTaskletData(t_TaskletHandle h_Tasklet) 524 { 525 /* Not referenced */ 526 printf("NetCommSW: Unimplemented function %s() called!\n", __func__); 527 return (NULL); 528 } 529 530 t_Handle 531 XX_InitSpinlock(void) 532 { 533 struct mtx *m; 534 535 m = malloc(sizeof(*m), M_NETCOMMSW, M_NOWAIT | M_ZERO); 536 if (!m) 537 return (0); 538 539 mtx_init(m, "NetCommSW Lock", NULL, MTX_DEF | MTX_DUPOK); 540 541 return (m); 542 } 543 544 void 545 XX_FreeSpinlock(t_Handle h_Spinlock) 546 { 547 struct mtx *m; 548 549 m = h_Spinlock; 550 551 mtx_destroy(m); 552 free(m, M_NETCOMMSW); 553 } 554 555 void 556 XX_LockSpinlock(t_Handle h_Spinlock) 557 { 558 struct mtx *m; 559 560 m = h_Spinlock; 561 mtx_lock(m); 562 } 563 564 void 565 XX_UnlockSpinlock(t_Handle h_Spinlock) 566 { 567 struct mtx *m; 568 569 m = h_Spinlock; 570 mtx_unlock(m); 571 } 572 573 uint32_t 574 XX_LockIntrSpinlock(t_Handle h_Spinlock) 575 { 576 577 XX_LockSpinlock(h_Spinlock); 578 return (0); 579 } 580 581 void 582 XX_UnlockIntrSpinlock(t_Handle h_Spinlock, uint32_t intrFlags) 583 { 584 585 XX_UnlockSpinlock(h_Spinlock); 586 } 587 588 uint32_t 589 XX_Sleep(uint32_t msecs) 590 { 591 592 XX_UDelay(1000 * msecs); 593 return (0); 594 } 595 596 void 597 XX_UDelay(uint32_t usecs) 598 { 599 DELAY(usecs); 600 } 601 602 t_Error 603 XX_IpcRegisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH], 604 t_IpcMsgHandler *f_MsgHandler, t_Handle h_Module, uint32_t replyLength) 605 { 606 607 /* 608 * This function returns fake E_OK status and does nothing 609 * as NetCommSW IPC is not used by FreeBSD drivers. 610 */ 611 return (E_OK); 612 } 613 614 t_Error 615 XX_IpcUnregisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH]) 616 { 617 /* 618 * This function returns fake E_OK status and does nothing 619 * as NetCommSW IPC is not used by FreeBSD drivers. 620 */ 621 return (E_OK); 622 } 623 624 625 t_Error 626 XX_IpcSendMessage(t_Handle h_Session, 627 uint8_t *p_Msg, uint32_t msgLength, uint8_t *p_Reply, 628 uint32_t *p_ReplyLength, t_IpcMsgCompletion *f_Completion, t_Handle h_Arg) 629 { 630 631 /* Should not be called */ 632 printf("NetCommSW: Unimplemented function %s() called!\n", __func__); 633 return (E_OK); 634 } 635 636 t_Handle 637 XX_IpcInitSession(char destAddr[XX_IPC_MAX_ADDR_NAME_LENGTH], 638 char srcAddr[XX_IPC_MAX_ADDR_NAME_LENGTH]) 639 { 640 641 /* Should not be called */ 642 printf("NetCommSW: Unimplemented function %s() called!\n", __func__); 643 return (NULL); 644 } 645 646 t_Error 647 XX_IpcFreeSession(t_Handle h_Session) 648 { 649 650 /* Should not be called */ 651 printf("NetCommSW: Unimplemented function %s() called!\n", __func__); 652 return (E_OK); 653 } 654 655 physAddress_t 656 XX_VirtToPhys(void *addr) 657 { 658 vm_paddr_t paddr; 659 int cpu; 660 661 cpu = PCPU_GET(cpuid); 662 663 /* Handle NULL address */ 664 if (addr == NULL) 665 return (-1); 666 667 /* Check CCSR */ 668 if ((vm_offset_t)addr >= ccsrbar_va && 669 (vm_offset_t)addr < ccsrbar_va + ccsrbar_size) 670 return (((vm_offset_t)addr - ccsrbar_va) + ccsrbar_pa); 671 672 /* Handle BMAN mappings */ 673 if (((vm_offset_t)addr >= XX_PInfo.portal_ce_va[BM_PORTAL]) && 674 ((vm_offset_t)addr < XX_PInfo.portal_ce_va[BM_PORTAL] + 675 XX_PInfo.portal_ce_size[BM_PORTAL][cpu])) 676 return (XX_PInfo.portal_ce_pa[BM_PORTAL][cpu] + 677 (vm_offset_t)addr - XX_PInfo.portal_ce_va[BM_PORTAL]); 678 679 if (((vm_offset_t)addr >= XX_PInfo.portal_ci_va[BM_PORTAL]) && 680 ((vm_offset_t)addr < XX_PInfo.portal_ci_va[BM_PORTAL] + 681 XX_PInfo.portal_ci_size[BM_PORTAL][cpu])) 682 return (XX_PInfo.portal_ci_pa[BM_PORTAL][cpu] + 683 (vm_offset_t)addr - XX_PInfo.portal_ci_va[BM_PORTAL]); 684 685 /* Handle QMAN mappings */ 686 if (((vm_offset_t)addr >= XX_PInfo.portal_ce_va[QM_PORTAL]) && 687 ((vm_offset_t)addr < XX_PInfo.portal_ce_va[QM_PORTAL] + 688 XX_PInfo.portal_ce_size[QM_PORTAL][cpu])) 689 return (XX_PInfo.portal_ce_pa[QM_PORTAL][cpu] + 690 (vm_offset_t)addr - XX_PInfo.portal_ce_va[QM_PORTAL]); 691 692 if (((vm_offset_t)addr >= XX_PInfo.portal_ci_va[QM_PORTAL]) && 693 ((vm_offset_t)addr < XX_PInfo.portal_ci_va[QM_PORTAL] + 694 XX_PInfo.portal_ci_size[QM_PORTAL][cpu])) 695 return (XX_PInfo.portal_ci_pa[QM_PORTAL][cpu] + 696 (vm_offset_t)addr - XX_PInfo.portal_ci_va[QM_PORTAL]); 697 698 if (PMAP_HAS_DMAP && (vm_offset_t)addr >= DMAP_BASE_ADDRESS && 699 (vm_offset_t)addr <= DMAP_MAX_ADDRESS) 700 return (DMAP_TO_PHYS((vm_offset_t)addr)); 701 else 702 paddr = pmap_kextract((vm_offset_t)addr); 703 704 if (paddr == 0) 705 printf("NetCommSW: " 706 "Unable to translate virtual address %p!\n", addr); 707 else 708 pmap_track_page(kernel_pmap, (vm_offset_t)addr); 709 710 return (paddr); 711 } 712 713 void * 714 XX_PhysToVirt(physAddress_t addr) 715 { 716 struct pv_entry *pv; 717 vm_page_t page; 718 int cpu; 719 720 /* Check CCSR */ 721 if (addr >= ccsrbar_pa && addr < ccsrbar_pa + ccsrbar_size) 722 return ((void *)((vm_offset_t)(addr - ccsrbar_pa) + 723 ccsrbar_va)); 724 725 cpu = PCPU_GET(cpuid); 726 727 /* Handle BMAN mappings */ 728 if ((addr >= XX_PInfo.portal_ce_pa[BM_PORTAL][cpu]) && 729 (addr < XX_PInfo.portal_ce_pa[BM_PORTAL][cpu] + 730 XX_PInfo.portal_ce_size[BM_PORTAL][cpu])) 731 return ((void *)(XX_PInfo.portal_ci_va[BM_PORTAL] + 732 (vm_offset_t)(addr - XX_PInfo.portal_ci_pa[BM_PORTAL][cpu]))); 733 734 if ((addr >= XX_PInfo.portal_ci_pa[BM_PORTAL][cpu]) && 735 (addr < XX_PInfo.portal_ci_pa[BM_PORTAL][cpu] + 736 XX_PInfo.portal_ci_size[BM_PORTAL][cpu])) 737 return ((void *)(XX_PInfo.portal_ci_va[BM_PORTAL] + 738 (vm_offset_t)(addr - XX_PInfo.portal_ci_pa[BM_PORTAL][cpu]))); 739 740 /* Handle QMAN mappings */ 741 if ((addr >= XX_PInfo.portal_ce_pa[QM_PORTAL][cpu]) && 742 (addr < XX_PInfo.portal_ce_pa[QM_PORTAL][cpu] + 743 XX_PInfo.portal_ce_size[QM_PORTAL][cpu])) 744 return ((void *)(XX_PInfo.portal_ce_va[QM_PORTAL] + 745 (vm_offset_t)(addr - XX_PInfo.portal_ce_pa[QM_PORTAL][cpu]))); 746 747 if ((addr >= XX_PInfo.portal_ci_pa[QM_PORTAL][cpu]) && 748 (addr < XX_PInfo.portal_ci_pa[QM_PORTAL][cpu] + 749 XX_PInfo.portal_ci_size[QM_PORTAL][cpu])) 750 return ((void *)(XX_PInfo.portal_ci_va[QM_PORTAL] + 751 (vm_offset_t)(addr - XX_PInfo.portal_ci_pa[QM_PORTAL][cpu]))); 752 753 page = PHYS_TO_VM_PAGE(addr); 754 pv = TAILQ_FIRST(&page->md.pv_list); 755 756 if (pv != NULL) 757 return ((void *)(pv->pv_va + ((vm_offset_t)addr & PAGE_MASK))); 758 759 if (PMAP_HAS_DMAP) 760 return ((void *)(uintptr_t)PHYS_TO_DMAP(addr)); 761 762 printf("NetCommSW: " 763 "Unable to translate physical address 0x%09jx!\n", (uintmax_t)addr); 764 765 return (NULL); 766 } 767 768 void 769 XX_PortalSetInfo(device_t dev) 770 { 771 char *dev_name; 772 struct dpaa_portals_softc *sc; 773 int i, type, len; 774 775 dev_name = malloc(sizeof(*dev_name), M_TEMP, M_WAITOK | 776 M_ZERO); 777 778 len = strlen("bman-portals"); 779 780 strncpy(dev_name, device_get_name(dev), len); 781 782 if (strncmp(dev_name, "bman-portals", len) && strncmp(dev_name, 783 "qman-portals", len)) 784 goto end; 785 786 if (strncmp(dev_name, "bman-portals", len) == 0) 787 type = BM_PORTAL; 788 else 789 type = QM_PORTAL; 790 791 sc = device_get_softc(dev); 792 793 for (i = 0; sc->sc_dp[i].dp_ce_pa != 0; i++) { 794 XX_PInfo.portal_ce_pa[type][i] = sc->sc_dp[i].dp_ce_pa; 795 XX_PInfo.portal_ci_pa[type][i] = sc->sc_dp[i].dp_ci_pa; 796 XX_PInfo.portal_ce_size[type][i] = sc->sc_dp[i].dp_ce_size; 797 XX_PInfo.portal_ci_size[type][i] = sc->sc_dp[i].dp_ci_size; 798 XX_PInfo.portal_intr[type][i] = sc->sc_dp[i].dp_intr_num; 799 } 800 801 XX_PInfo.portal_ce_va[type] = rman_get_bushandle(sc->sc_rres[0]); 802 XX_PInfo.portal_ci_va[type] = rman_get_bushandle(sc->sc_rres[1]); 803 end: 804 free(dev_name, M_TEMP); 805 } 806