1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * zuluvm module 28 * 29 * Provides services required by the XVR-4000 graphics accelerator (zulu) 30 * that are not provided by the ddi. See PSARC 2002/231. 31 * 32 * Zulu has 2 dma engines with built in MMUs. zuluvm provides TLB miss 33 * interrupt support obtaining virtual to physical address translations 34 * using the XHAT interface PSARC/2003/517. 35 * 36 * The module has 3 components. This file, sun4u/vm/zulu_hat.c, and the 37 * assembly language routines in sun4u/ml/zulu_asm.s and 38 * sun4u/ml/zulu_hat_asm.s. 39 * 40 * The interrupt handler is a data bearing mondo interrupt handled at TL=1 41 * If no translation is found in the zulu hat's tsb, or if the tsb is locked by 42 * C code, the handler posts a soft interrupt which wakes up a parked 43 * thread belonging to zuludaemon(1M). 44 */ 45 46 #include <sys/conf.h> 47 #include <sys/types.h> 48 #include <sys/kmem.h> 49 #include <sys/debug.h> 50 #include <sys/modctl.h> 51 #include <sys/autoconf.h> 52 #include <sys/ddi_impldefs.h> 53 #include <sys/ddi_subrdefs.h> 54 #include <sys/intr.h> 55 #include <sys/ddi.h> 56 #include <sys/sunndi.h> 57 #include <sys/proc.h> 58 #include <sys/thread.h> 59 #include <sys/machsystm.h> 60 #include <sys/ivintr.h> 61 #include <sys/tnf_probe.h> 62 #include <sys/intreg.h> 63 #include <sys/atomic.h> 64 #include <vm/as.h> 65 #include <vm/seg_enum.h> 66 #include <vm/faultcode.h> 67 #include <sys/dmv.h> 68 #include <sys/zulumod.h> 69 #include <sys/zulu_hat.h> 70 71 #define ZULUVM_GET_PAGE(val) \ 72 (caddr_t)((uintptr_t)(val) & PAGEMASK) 73 #define ZULUVM_GET_AS curthread->t_procp->p_as 74 75 #define ZULUVM_LOCK mutex_enter(&(zdev->dev_lck)) 76 #define ZULUVM_UNLOCK mutex_exit(&(zdev->dev_lck)) 77 78 #define ZULUVM_SET_STATE(_z, b, c) \ 79 atomic_cas_32((uint32_t *)&((_z)->zvm.state), c, b) 80 #define ZULUVM_GET_STATE(_z) \ 81 (_z)->zvm.state 82 #define ZULUVM_SET_IDLE(_z) \ 83 (_z)->zvm.state = ZULUVM_STATE_IDLE; 84 85 #define ZULUVM_INO_MASK ((1<<INO_SIZE)-1) 86 #define ZULUVM_IGN_MASK ((1<<IGN_SIZE)-1) 87 #define ZULUVM_MONDO(_zdev, _n) \ 88 ((ZULUVM_IGN_MASK & _zdev->agentid) << INO_SIZE) | \ 89 (ZULUVM_INO_MASK & (_n)) 90 91 static void zuluvm_stop(zuluvm_state_t *, int, char *); 92 static zuluvm_proc_t *zuluvm_find_proc(zuluvm_state_t *, struct as *); 93 static int zuluvm_proc_release(zuluvm_state_t *zdev, zuluvm_proc_t *proc); 94 static int zuluvm_get_intr_props(zuluvm_state_t *zdev, dev_info_t *devi); 95 static int zuluvm_driver_attach(zuluvm_state_t *); 96 static int zuluvm_driver_detach(zuluvm_state_t *); 97 static void zuluvm_retarget_intr(void *arg); 98 static void zuluvm_do_retarget(zuluvm_state_t *zdev); 99 100 extern const unsigned int _mmu_pageshift; 101 102 extern int zuluvm_base_pgsize; 103 static int zuluvm_pagesizes[ZULUM_MAX_PG_SIZES + 1]; 104 105 int zuluvm_fast_tlb = 1; 106 107 zuluvm_state_t *zuluvm_devtab[ZULUVM_MAX_DEV]; 108 kmutex_t zuluvm_lck; 109 110 #ifdef DEBUG 111 int zuluvm_debug_state = 0; 112 #endif 113 114 unsigned long zuluvm_ctx_locked = 0; 115 116 /* 117 * Module linkage information for the kernel. 118 */ 119 extern struct mod_ops mod_miscops; 120 121 static struct modlmisc modlmisc = { 122 &mod_miscops, 123 "sun4u support " ZULUVM_MOD_VERSION 124 }; 125 126 static struct modlinkage modlinkage = { 127 MODREV_1, 128 (void *)&modlmisc, 129 NULL 130 }; 131 132 int 133 _init(void) 134 { 135 zuluvm_base_pgsize = (_mmu_pageshift - 13) / 3; 136 if (zulu_hat_init() != 0) { 137 return (ZULUVM_ERROR); 138 } 139 mutex_init(&zuluvm_lck, NULL, MUTEX_DEFAULT, NULL); 140 return (mod_install(&modlinkage)); 141 } 142 143 int 144 _fini(void) 145 { 146 mutex_destroy(&zuluvm_lck); 147 (void) zulu_hat_destroy(); 148 return (mod_remove(&modlinkage)); 149 } 150 151 int 152 _info(struct modinfo *modinfop) 153 { 154 return (mod_info(&modlinkage, modinfop)); 155 } 156 157 /* 158 * currently the kernel driver makes the following assumptions: 159 * - there is only one TLB miss per zulu device handled at 160 * any given time 161 * ==> we only need local data storage per device, not per DMA 162 * ==> a page fault will block the DMA engine until the fault 163 * is resolved 164 * ==> a pagefault will not trigger a zulu DMA context switch 165 * 166 * If we want to implement asynnchronous zulu page fault, then we 167 * need to keep track of outstanding faults while zulu DMA runs 168 * in a different context. 169 */ 170 static int 171 zuluvm_write_tte(zuluvm_state_t *zdev, void *arg, caddr_t addr, 172 int t_pfn, int t_perm, int t_size, uint64_t tag, 173 int tlbtype, int *size) 174 { 175 int error; 176 177 (void) addr; 178 179 ZULUVM_STATS_MISS(zdev, t_size); 180 181 if (tag == 0) { /* not coming from preload */ 182 int state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_WRITE_TTE, 183 ZULUVM_STATE_INTR_PENDING); 184 if (state != ZULUVM_STATE_INTR_PENDING) { 185 zuluvm_stop(zdev, state, "zuluvm_write_tte"); 186 return (ZULUVM_MISS_CANCELED); 187 } 188 } 189 190 if (!(tlbtype & ZULUVM_ITLB_FLAG) && 191 t_size != zuluvm_base_pgsize && 192 t_size != ZULU_TTE4M) { 193 t_size = zuluvm_base_pgsize; 194 TNF_PROBE_2(zuluvm_write_tte_new_pfn, "zuluvm", /* */, 195 tnf_opaque, t_pfn, t_pfn, tnf_int, pagesize, t_size); 196 } 197 TNF_PROBE_1(zuluvm_write_tte, "zuluvm", /* */, 198 tnf_opaque, t_pfn, t_pfn); 199 /* 200 * if the caller is zuluvm_preload, then we need to pass 201 * back the page size so it can add the right offset. 202 */ 203 if (size) 204 *size = t_size; 205 206 error = zulud_write_tte(zdev, arg, t_size, tag, t_pfn, 207 t_perm, tlbtype); 208 209 return (error); 210 } 211 212 static void 213 zuluvm_stop(zuluvm_state_t *zdev, int state, char *tag) 214 { 215 int ostate = state; 216 while (state != ZULUVM_STATE_STOPPED) { 217 state = ZULUVM_SET_STATE(zdev, 218 ZULUVM_STATE_STOPPED, state); 219 #ifdef DEBUG 220 if (zuluvm_debug_state) 221 cmn_err(CE_NOTE, "zuluvm_stop(%s): (loop) state %d\n", 222 tag, state); 223 #endif 224 } 225 TNF_PROBE_2(zuluvm_stop, "zuluvm", /* */, 226 tnf_string, tag, tag, 227 tnf_int, state, ostate); 228 ZULUVM_STATS_CANCEL(zdev); 229 } 230 231 /* 232 * Executed with the context of the parked zulu deamon thread, 233 * uses zulu_hat_load to resolve the miss. 234 * The tte is loaded and miss done called by the function zuluvm_load_tte 235 * which is called from zulu_hat 236 * 237 * This function is synchronized with the zuluvm_as_free. 238 * zuluvm_as_free will block until miss servicing is complete. 239 * 240 * There is a race condition between as_free and the zulu tlb miss 241 * soft interrupt: 242 * - queue zulu interrupt 243 * - process dies, as_free runs 244 * - interrupt gets scheduled and runs as_fault on the 245 * already freed as. 246 * This is solved by keeping track of current zulu dma processes 247 * and invalidating them in zuluvm_as_free. 248 */ 249 uint_t 250 zuluvm_tlb_handler(caddr_t data) 251 { 252 zuluvm_state_t *zdev = (zuluvm_state_t *)data; 253 int error; 254 int flag = 0; 255 int wait = 0; 256 zuluvm_proc_t *proc = NULL; 257 struct zulu_hat *zhat = NULL; 258 caddr_t addr; 259 int tlbtype; 260 void *arg; 261 int state, newstate; 262 263 TNF_PROBE_1(zuluvm_tlb_handler_lwp, "zuluvm", /* */, 264 tnf_opaque, lwp, ttolwp(curthread)); 265 266 ZULUVM_LOCK; 267 error = ZULUVM_GET_TLB_ERRCODE(zdev); 268 addr = (caddr_t)ZULUVM_GET_TLB_ADDR(zdev); 269 tlbtype = ZULUVM_GET_TLB_TYPE(zdev); 270 arg = zdev->zvm.arg; 271 272 /* 273 * select the correct dma engine and remember the 274 * the as_free synchronization flags. 275 */ 276 switch (tlbtype) { 277 case ZULUVM_ITLB1: 278 case ZULUVM_DMA1: 279 proc = zdev->zvm.proc1; 280 flag |= ZULUVM_DO_INTR1; 281 wait |= ZULUVM_WAIT_INTR1; 282 break; 283 case ZULUVM_ITLB2: 284 case ZULUVM_DMA2: 285 proc = zdev->zvm.proc2; 286 flag |= ZULUVM_DO_INTR2; 287 wait |= ZULUVM_WAIT_INTR2; 288 break; 289 } 290 291 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_INTR_PENDING, 292 ZULUVM_STATE_INTR_QUEUED); 293 newstate = ZULUVM_GET_STATE(zdev); 294 295 TNF_PROBE_2(zuluvm_tlb_handler_state, "zuluvm", /* */, 296 tnf_int, oldstate, state, 297 tnf_int, newstate, newstate); 298 #ifdef DEBUG 299 if (zuluvm_debug_state) 300 cmn_err(CE_NOTE, "zuluvm_tlb_handler: state %d\n", state); 301 #endif 302 if (state != ZULUVM_STATE_INTR_PENDING && 303 state != ZULUVM_STATE_INTR_QUEUED) { 304 ZULUVM_UNLOCK; 305 306 zuluvm_stop(zdev, state, "softintr1"); 307 zulud_tlb_done(zdev, arg, tlbtype, ZULUVM_MISS_CANCELED); 308 return (1); 309 } 310 311 /* 312 * block the as_free callback in case it comes in 313 */ 314 zdev->intr_flags |= flag; 315 ZULUVM_UNLOCK; 316 317 mutex_enter(&zdev->proc_lck); 318 /* 319 * check if this as is still valid 320 */ 321 if (proc == NULL || proc->valid == 0 || proc->zhat == NULL) { 322 mutex_exit(&zdev->proc_lck); 323 /* 324 * we are on our way out, wake up the as_free 325 * callback if it is waiting for us 326 */ 327 ZULUVM_LOCK; 328 zdev->intr_flags &= ~flag; 329 if (zdev->intr_flags | wait) 330 cv_broadcast(&zdev->intr_wait); 331 ZULUVM_UNLOCK; 332 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_IDLE, 333 ZULUVM_STATE_INTR_PENDING); 334 if (state != ZULUVM_STATE_INTR_PENDING) { 335 zuluvm_stop(zdev, state, "softintr3"); 336 } 337 zulud_tlb_done(zdev, arg, tlbtype, ZULUVM_NO_HAT); 338 return (1); 339 } 340 zhat = proc->zhat; 341 mutex_exit(&zdev->proc_lck); 342 343 TNF_PROBE_1(zuluvm_tlb_handler, "zuluvm", /* */, 344 tnf_opaque, addr, addr); 345 346 switch (error) { 347 case ZULUVM_CTX_LOCKED: 348 /* 349 * trap handler found that zulu_hat had the lock bit set 350 * rather than block in the fast trap handler, it punts 351 * in this rare instance 352 */ 353 ++zuluvm_ctx_locked; 354 TNF_PROBE_1(zuluvm_ctx_locked, "zuluvm", /* CSTYLED */, 355 tnf_ulong, zuluvm_ctx_locked, zuluvm_ctx_locked); 356 357 /*FALLTHROUGH*/ 358 359 case ZULUVM_TTE_DELAY: 360 /* 361 * fast tlb handler was skipped, see zuluvm_fast_tlb flag 362 */ 363 /*FALLTHROUGH*/ 364 365 case ZULUVM_NO_TTE: 366 /* 367 * no TSB entry and TTE in the hash 368 */ 369 mutex_enter(&zdev->load_lck); 370 zdev->in_intr = 1; 371 error = zulu_hat_load(zhat, addr, 372 (tlbtype == ZULUVM_DMA2) ? S_WRITE : S_READ, NULL); 373 zdev->in_intr = 0; 374 mutex_exit(&zdev->load_lck); 375 if (error) { 376 377 error = ZULUVM_NO_MAP; 378 } else { 379 error = ZULUVM_SUCCESS; 380 TNF_PROBE_1(zuluvm_tlb_handler_done, "zuluvm", /* */, 381 tnf_int, error, error); 382 return (1); 383 } 384 385 default: 386 /* 387 * error case, fall through and tell zulu driver to abort DMA 388 */ 389 break; 390 } 391 392 if (error != ZULUVM_MISS_CANCELED) { 393 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_IDLE, 394 ZULUVM_STATE_WRITE_TTE); 395 newstate = ZULUVM_GET_STATE(zdev); 396 TNF_PROBE_2(zuluvm_tlb_handler_state_done, "zuluvm", /* */, 397 tnf_int, oldstate, state, 398 tnf_int, newstate, newstate); 399 if (state != ZULUVM_STATE_WRITE_TTE) { 400 zuluvm_stop(zdev, state, "softintr4"); 401 } 402 } 403 /* 404 * synchronize with as_free callback 405 * It will set the wait flag, in that case we send 406 * a wake up. 407 */ 408 ZULUVM_LOCK; 409 zdev->intr_flags &= ~flag; 410 if (zdev->intr_flags | wait) 411 cv_broadcast(&zdev->intr_wait); 412 ZULUVM_UNLOCK; 413 414 TNF_PROBE_1(zuluvm_tlb_handler_done, "zuluvm", /* */, 415 tnf_int, error, error); 416 417 zulud_tlb_done(zdev, arg, tlbtype, error); 418 419 return (1); 420 } 421 422 423 void 424 zuluvm_load_tte(struct zulu_hat *zhat, caddr_t addr, uint64_t pfn, 425 int perm, int size) 426 { 427 zuluvm_state_t *zdev = zhat->zdev; 428 int tlbtype = ZULUVM_GET_TLB_TYPE(zdev); 429 430 ASSERT(MUTEX_HELD(&zdev->load_lck)); 431 ASSERT(pfn != 0); 432 433 if (zdev->in_intr) { 434 int error; 435 int flag = 0; 436 int wait = 0; 437 438 error = zuluvm_write_tte(zdev, zdev->zvm.arg, addr, pfn, 439 perm, size, 0, tlbtype, NULL); 440 441 if (error != ZULUVM_MISS_CANCELED) { 442 int state, newstate; 443 444 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_IDLE, 445 ZULUVM_STATE_WRITE_TTE); 446 newstate = ZULUVM_GET_STATE(zdev); 447 TNF_PROBE_2(zuluvm_tlb_handler_state_done, "zuluvm", 448 /* */, tnf_int, oldstate, state, 449 tnf_int, newstate, newstate); 450 if (state != ZULUVM_STATE_WRITE_TTE) { 451 zuluvm_stop(zdev, state, "softintr4"); 452 } 453 } 454 /* 455 * synchronize with as_free callback 456 * It will set the wait flag, in that case we send 457 * a wake up. 458 */ 459 switch (tlbtype) { 460 case ZULUVM_ITLB1: 461 case ZULUVM_DMA1: 462 flag = ZULUVM_DO_INTR1; 463 wait = ZULUVM_WAIT_INTR1; 464 break; 465 case ZULUVM_ITLB2: 466 case ZULUVM_DMA2: 467 flag = ZULUVM_DO_INTR2; 468 wait = ZULUVM_WAIT_INTR2; 469 break; 470 } 471 472 ZULUVM_LOCK; 473 zdev->intr_flags &= ~flag; 474 if (zdev->intr_flags | wait) 475 cv_broadcast(&zdev->intr_wait); 476 ZULUVM_UNLOCK; 477 478 zulud_tlb_done(zdev, zdev->zvm.arg, tlbtype, error); 479 } else { 480 (void) zuluvm_write_tte(zdev, zdev->zvm.arg, addr, pfn, 481 perm, size, (uint64_t)addr | 482 zhat->zulu_ctx, tlbtype, NULL); 483 } 484 } 485 486 487 488 489 /* 490 * This function provides the faulting thread for zulu page faults 491 * It is call from the device driver in response to an ioctl issued 492 * by a zuludaemon thread. 493 * It sits in cv_wait_sig until it gets woken up by a signal or 494 * zulu tlb miss soft interrupt. 495 */ 496 int 497 zuluvm_park(zuluvm_info_t devp) 498 { 499 int rval; 500 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 501 mutex_enter(&zdev->park_lck); 502 zdev->parking = 1; 503 for (;;) { 504 rval = cv_wait_sig(&zdev->park_cv, &zdev->park_lck); 505 if (rval == 0) 506 break; 507 rval = zuluvm_tlb_handler(devp); 508 } 509 zdev->parking = 0; 510 mutex_exit(&zdev->park_lck); 511 return (rval); 512 } 513 514 /* 515 * zulu soft interrupt handler, just triggers the parked zulu fault 516 * thread 517 */ 518 /*ARGSUSED*/ 519 uint_t 520 zuluvm_softintr(caddr_t devp, caddr_t arg2) 521 { 522 int tlbtype; 523 void *arg; 524 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 525 mutex_enter(&zdev->park_lck); 526 if (zdev->parking) { 527 cv_signal(&zdev->park_cv); 528 mutex_exit(&zdev->park_lck); 529 TNF_PROBE_1(zuluvm_fast_intr, "zuluvm", /* */, 530 tnf_opaque, devp, devp); 531 } else { 532 mutex_exit(&zdev->park_lck); 533 cmn_err(CE_NOTE, "zuluvm: no page fault thread\n"); 534 ZULUVM_LOCK; 535 tlbtype = ZULUVM_GET_TLB_TYPE(zdev); 536 arg = zdev->zvm.arg; 537 ZULUVM_UNLOCK; 538 TNF_PROBE_0(zuluvm_fast_intr, "zuluvm", /* */); 539 zuluvm_stop(zdev, ZULUVM_STATE_INTR_QUEUED, "fast_intr"); 540 zulud_tlb_done(zdev, arg, tlbtype, ZULUVM_NO_TTE); 541 } 542 return (1); 543 } 544 545 /* ***** public interface for process mapping events (hat layer) ***** */ 546 547 /* 548 * If the page size matches the Zulu page sizes then just pass 549 * it thru. If not then emulate the page demap with demaps of 550 * smaller page size. 551 */ 552 /* ARGSUSED */ 553 void 554 zuluvm_demap_page(void *arg, struct hat *hat_ptr, short ctx, 555 caddr_t vaddr, uint_t size) 556 { 557 void *ddarg; 558 zuluvm_state_t *zdev = (zuluvm_state_t *)arg; 559 560 if (arg == NULL) 561 return; 562 563 ZULUVM_STATS_DEMAP_PAGE(zdev); 564 565 ddarg = zdev->zvm.arg; 566 567 TNF_PROBE_3(zuluvm_demap_page, "zuluvm", /* */, 568 tnf_opaque, addr, vaddr, 569 tnf_int, size, size, 570 tnf_int, ctx, ctx); 571 572 if (ddarg != NULL) { 573 if (size != zuluvm_base_pgsize && 574 size != ZULU_TTE4M) { 575 int i; 576 int cnt = size - zuluvm_base_pgsize; 577 cnt = ZULU_HAT_SZ_SHIFT(cnt); 578 for (i = 0; i < cnt; i++) { 579 uintptr_t addr = (uintptr_t)vaddr | 580 i << ZULU_HAT_BP_SHIFT; 581 zulud_demap_page(zdev, ddarg, 582 (caddr_t)addr, ctx); 583 } 584 } else { 585 zulud_demap_page(zdev, ddarg, vaddr, ctx); 586 } 587 TNF_PROBE_0(zuluvm_demap_page_done, "zuluvm", /* */); 588 } else { 589 TNF_PROBE_0(zuluvm_demap_page_null_ddarg, "zuluvm", /* */); 590 } 591 } 592 593 /* 594 * An entire context has gone away, just pass it thru 595 */ 596 void 597 zuluvm_demap_ctx(void *arg, short ctx) 598 { 599 void *ddarg; 600 zuluvm_state_t *zdev = (zuluvm_state_t *)arg; 601 602 if (arg == NULL) 603 return; 604 605 ZULUVM_STATS_DEMAP_CTX(zdev); 606 607 TNF_PROBE_1(zuluvm_demap_ctx, "zuluvm", /* */, 608 tnf_int, ctx, ctx); 609 ddarg = zdev->zvm.arg; 610 611 if (ddarg != NULL) 612 zulud_demap_ctx(zdev, ddarg, ctx); 613 } 614 615 static int 616 zuluvm_driver_attach(zuluvm_state_t *zdev) 617 { 618 int i; 619 mutex_enter(&zuluvm_lck); 620 for (i = 0; i < ZULUVM_MAX_DEV; i++) { 621 if (zuluvm_devtab[i] == NULL) { 622 zuluvm_devtab[i] = zdev; 623 ZULUVM_SET_IDLE(zdev); 624 break; 625 } 626 } 627 mutex_exit(&zuluvm_lck); 628 if (i >= ZULUVM_MAX_DEV) 629 return (ZULUVM_ERROR); 630 631 if (zulu_hat_attach((void *)zdev) != 0) { 632 return (ZULUVM_ERROR); 633 } 634 635 mutex_init(&zdev->dev_lck, NULL, MUTEX_DEFAULT, NULL); 636 mutex_init(&zdev->load_lck, NULL, MUTEX_DEFAULT, NULL); 637 mutex_init(&zdev->proc_lck, NULL, MUTEX_DEFAULT, NULL); 638 mutex_init(&zdev->park_lck, NULL, MUTEX_DEFAULT, NULL); 639 cv_init(&zdev->park_cv, NULL, CV_DEFAULT, NULL); 640 cv_init(&zdev->intr_wait, NULL, CV_DEFAULT, NULL); 641 zdev->parking = 0; 642 643 #ifdef ZULUVM_STATS 644 zdev->zvm.cancel = 0; 645 zdev->zvm.pagefault = 0; 646 zdev->zvm.no_mapping = 0; 647 zdev->zvm.preload = 0; 648 zdev->zvm.migrate = 0; 649 zdev->zvm.pagesize = 0; 650 zdev->zvm.tlb_miss[0] = 0; 651 zdev->zvm.tlb_miss[1] = 0; 652 zdev->zvm.tlb_miss[2] = 0; 653 zdev->zvm.tlb_miss[3] = 0; 654 zdev->zvm.itlb1miss = 0; 655 zdev->zvm.dtlb1miss = 0; 656 zdev->zvm.itlb2miss = 0; 657 zdev->zvm.dtlb2miss = 0; 658 #endif 659 zdev->zvm.pfncnt = 0; 660 for (i = 0; i < 50; i++) 661 zdev->zvm.pfnbuf[i] = 0; 662 663 zdev->zvm.mmu_pa = NULL; 664 zdev->zvm.proc1 = NULL; 665 zdev->zvm.proc2 = NULL; 666 zdev->procs = NULL; 667 return (ZULUVM_SUCCESS); 668 } 669 670 static int 671 zuluvm_driver_detach(zuluvm_state_t *zdev) 672 { 673 int i; 674 cv_destroy(&zdev->intr_wait); 675 cv_destroy(&zdev->park_cv); 676 mutex_destroy(&zdev->park_lck); 677 mutex_destroy(&zdev->proc_lck); 678 mutex_destroy(&zdev->dev_lck); 679 mutex_destroy(&zdev->load_lck); 680 zdev->dops = NULL; 681 682 mutex_enter(&zuluvm_lck); 683 for (i = 0; i < ZULUVM_MAX_DEV; i++) { 684 if (zuluvm_devtab[i] == zdev) { 685 zuluvm_devtab[i] = NULL; 686 break; 687 } 688 } 689 mutex_exit(&zuluvm_lck); 690 691 if (zulu_hat_detach((void *)zdev) == 0) { 692 return (ZULUVM_SUCCESS); 693 } else { 694 return (ZULUVM_ERROR); 695 } 696 } 697 698 zulud_ops_t *zuluvm_dops = NULL; 699 700 /* 701 * init the zulu kernel driver (variables, locks, etc) 702 */ 703 int 704 zuluvm_init(zulud_ops_t *ops, int **pagesizes) 705 { 706 int error = ZULUVM_SUCCESS; 707 int i; 708 int size = zuluvm_base_pgsize; /* MMU_PAGESIZE; */ 709 710 if (ops->version != ZULUVM_INTERFACE_VERSION) 711 return (ZULUVM_VERSION_MISMATCH); 712 713 zuluvm_dops = ops; 714 for (i = 0; i < ZULUM_MAX_PG_SIZES && size <= ZULU_TTE4M; i++) { 715 zuluvm_pagesizes[i] = size++; 716 } 717 zuluvm_pagesizes[i] = -1; 718 *pagesizes = zuluvm_pagesizes; 719 720 return (error); 721 } 722 723 /* 724 * cleanup afterwards 725 */ 726 int 727 zuluvm_fini(void) 728 { 729 zuluvm_dops = NULL; 730 return (ZULUVM_SUCCESS); 731 } 732 733 /* 734 * allocate a zulu kernel driver instance for this zulu device 735 */ 736 int 737 zuluvm_alloc_device(dev_info_t *devi, void *arg, zuluvm_info_t *devp, 738 caddr_t mmu, caddr_t imr) 739 { 740 uint64_t intr_num; 741 zuluvm_state_t *zdev; 742 int error = ZULUVM_SUCCESS; 743 744 TNF_PROBE_3(zuluvm_alloc_device, "zuluvm", /* */, 745 tnf_opaque, arg, arg, 746 tnf_opaque, mmu, mmu, 747 tnf_opaque, imr, imr); 748 749 zdev = kmem_zalloc(sizeof (zuluvm_state_t), KM_SLEEP); 750 zdev->dip = devi; 751 zdev->dops = zuluvm_dops; 752 error = zuluvm_driver_attach(zdev); 753 if (error != ZULUVM_SUCCESS) { 754 kmem_free(zdev, sizeof (zuluvm_state_t)); 755 return (ZULUVM_NO_DEV); 756 } 757 758 ZULUVM_LOCK; 759 error = zuluvm_get_intr_props(zdev, devi); 760 if (error != ZULUVM_SUCCESS) { 761 ZULUVM_UNLOCK; 762 error = zuluvm_driver_detach(zdev); 763 if (error != ZULUVM_SUCCESS) 764 return (error); 765 kmem_free(zdev, sizeof (zuluvm_state_t)); 766 return (ZULUVM_NO_DEV); 767 } 768 zdev->zvm.arg = arg; 769 zdev->zvm.mmu_pa = (uint64_t)va_to_pa((void *)mmu); 770 zdev->imr = (uint64_t *)imr; 771 zdev->zvm.dmv_intr = dmv_add_softintr(zuluvm_dmv_tlbmiss_tl1, 772 (void *)zdev); 773 zulud_set_itlb_pc(zdev, arg, DMV_MAKE_DMV(zdev->zvm.dmv_intr, 774 (void *)zdev)); 775 zulud_set_dtlb_pc(zdev, arg, DMV_MAKE_DMV(zdev->zvm.dmv_intr, 776 (void *)zdev)); 777 intr_dist_add(zuluvm_retarget_intr, (void *)zdev); 778 zuluvm_do_retarget(zdev); 779 intr_num = add_softintr(ZULUVM_PIL, zuluvm_softintr, 780 (caddr_t)zdev, SOFTINT_ST); 781 zdev->zvm.intr_num = intr_num; 782 *devp = (caddr_t)zdev; 783 ZULUVM_UNLOCK; 784 TNF_PROBE_1(zuluvm_alloc_device_done, "zuluvm", /* */, 785 tnf_opaque, devp, *devp); 786 return (ZULUVM_SUCCESS); 787 } 788 789 /* 790 * free a zulu kernel driver instance 791 */ 792 int 793 zuluvm_free_device(zuluvm_info_t devp) 794 { 795 int error; 796 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 797 798 TNF_PROBE_1(zuluvm_free_device, "zuluvm", /* */, 799 tnf_opaque, zdev, zdev); 800 801 if (zdev == NULL) 802 return (ZULUVM_NO_DEV); 803 ZULUVM_LOCK; 804 if (zdev->zvm.arg == NULL) { 805 ZULUVM_UNLOCK; 806 TNF_PROBE_1(zuluvm_free_device_done, "zuluvm", /* */, 807 tnf_int, error, ZULUVM_NO_DEV); 808 return (ZULUVM_NO_DEV); 809 } 810 (void) dmv_rem_intr(zdev->zvm.dmv_intr); 811 (void) rem_softintr(zdev->zvm.intr_num); 812 intr_dist_rem(zuluvm_retarget_intr, (void *)zdev); 813 zdev->zvm.arg = NULL; 814 ZULUVM_UNLOCK; 815 error = zuluvm_driver_detach(zdev); 816 if (error != ZULUVM_SUCCESS) 817 return (error); 818 zdev->dops = NULL; 819 kmem_free(zdev, sizeof (zuluvm_state_t)); 820 821 TNF_PROBE_0(zuluvm_free_device_done, "zuluvm", /* */); 822 return (ZULUVM_SUCCESS); 823 } 824 825 /* 826 * find the as in the list of active zulu processes 827 * The caller has to hold zdev->proc_lck 828 */ 829 static zuluvm_proc_t * 830 zuluvm_find_proc(zuluvm_state_t *zdev, struct as *asp) 831 { 832 zuluvm_proc_t *p; 833 TNF_PROBE_2(zuluvm_find_proc, "zuluvm", /* */, 834 tnf_opaque, zdev, zdev, 835 tnf_opaque, asp, asp); 836 for (p = zdev->procs; p != NULL; p = p->next) { 837 if (ZULU_HAT2AS(p->zhat) == asp) { 838 TNF_PROBE_1(zuluvm_find_proc_done, 839 "zuluvm", /* */, tnf_opaque, proc, p); 840 return (p); 841 } 842 } 843 TNF_PROBE_0(zuluvm_find_proc_fail, "zuluvm", /* */); 844 return (NULL); 845 } 846 847 void 848 zuluvm_as_free(struct as *as, void *arg, uint_t events) 849 { 850 zuluvm_proc_t *proc = (zuluvm_proc_t *)arg; 851 zuluvm_state_t *zdev = proc->zdev; 852 int wait = 0; 853 int flag = 0; 854 int valid; 855 856 (void) events; 857 858 TNF_PROBE_1(zuluvm_as_free, "zuluvm", /* */, 859 tnf_opaque, arg, arg); 860 861 (void) as_delete_callback(as, arg); 862 /* 863 * if this entry is still valid, then we need to sync 864 * with zuluvm_tlb_handler rountine. 865 */ 866 mutex_enter(&zdev->proc_lck); 867 valid = proc->valid; 868 proc->valid = 0; 869 mutex_exit(&zdev->proc_lck); 870 871 if (valid) { 872 ZULUVM_LOCK; 873 if (proc == zdev->zvm.proc1) { 874 flag |= ZULUVM_WAIT_INTR1; 875 wait |= ZULUVM_DO_INTR1; 876 } 877 if (proc == zdev->zvm.proc2) { 878 flag |= ZULUVM_WAIT_INTR2; 879 wait |= ZULUVM_DO_INTR2; 880 } 881 if (flag) { 882 zdev->intr_flags |= flag; 883 /* 884 * wait until the tlb miss is resloved 885 */ 886 while (zdev->intr_flags & wait) { 887 cv_wait(&zdev->intr_wait, &zdev->dev_lck); 888 } 889 zdev->intr_flags &= ~flag; 890 } 891 ZULUVM_UNLOCK; 892 } 893 894 if (proc->zhat != NULL) { 895 /* 896 * prevent any further tlb miss processing for this hat 897 */ 898 zulu_hat_terminate(proc->zhat); 899 } 900 901 /* 902 * decrement the ref count and do the appropriate 903 * if it drops to zero. 904 */ 905 mutex_enter(&zdev->proc_lck); 906 (void) zuluvm_proc_release(zdev, proc); 907 mutex_exit(&zdev->proc_lck); 908 } 909 910 /* 911 * notify zulu vm driver about a new process going to 912 * use zulu DMA. Create a zulu_hat. 913 */ 914 int 915 zuluvm_dma_add_proc(zuluvm_info_t devp, uint64_t *cookie) 916 { 917 zuluvm_proc_t *proc; 918 int refcnt; 919 struct as *asp = ZULUVM_GET_AS; 920 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 921 922 TNF_PROBE_1(zuluvm_dma_add_proc, "zuluvm", /* */, 923 tnf_opaque, zdev, zdev); 924 mutex_enter(&zdev->proc_lck); 925 proc = zuluvm_find_proc(zdev, asp); 926 if (proc == NULL) { 927 proc = kmem_zalloc(sizeof (zuluvm_proc_t), KM_SLEEP); 928 proc->zhat = zulu_hat_proc_attach(asp, zdev); 929 if (proc->zhat == NULL) { 930 mutex_exit(&zdev->proc_lck); 931 kmem_free(proc, sizeof (zuluvm_proc_t)); 932 TNF_PROBE_2(zuluvm_dma_add_proc_done, "zuluvm", /* */, 933 tnf_int, valid, 0, 934 tnf_int, error, ZULUVM_ERROR); 935 return (ZULUVM_ERROR); 936 } 937 proc->zdev = zdev; 938 proc->valid = 1; 939 proc->refcnt = 1; 940 proc->next = zdev->procs; 941 if (zdev->procs) 942 zdev->procs->prev = proc; 943 proc->prev = NULL; 944 zdev->procs = proc; 945 proc->refcnt++; 946 (void) as_add_callback(asp, zuluvm_as_free, proc, 947 AS_FREE_EVENT, 0, -1, KM_SLEEP); 948 } else { 949 if (proc->valid == 0) { 950 mutex_exit(&zdev->proc_lck); 951 TNF_PROBE_2(zuluvm_dma_add_proc_done, "zuluvm", /* */, 952 tnf_int, valid, 0, 953 tnf_int, error, ZULUVM_ERROR); 954 return (ZULUVM_ERROR); 955 } 956 proc->refcnt++; 957 } 958 refcnt = proc->refcnt; 959 mutex_exit(&zdev->proc_lck); 960 *cookie = (uint64_t)proc; 961 TNF_PROBE_2(zuluvm_dma_add_proc_done, "zuluvm", /* */, 962 tnf_int, refcnt, refcnt, 963 tnf_int, error, ZULUVM_SUCCESS); 964 return (ZULUVM_SUCCESS); 965 } 966 967 void 968 zuluvm_proc_hold(zuluvm_state_t *zdev, zuluvm_proc_t *proc) 969 { 970 mutex_enter(&zdev->proc_lck); 971 proc->refcnt++; 972 mutex_exit(&zdev->proc_lck); 973 } 974 975 /* 976 * decrement ref count and free data if it drops to zero 977 */ 978 static int 979 zuluvm_proc_release(zuluvm_state_t *zdev, zuluvm_proc_t *proc) 980 { 981 int refcnt; 982 ASSERT(MUTEX_HELD(&zdev->proc_lck)); 983 refcnt = --proc->refcnt; 984 TNF_PROBE_3(zuluvm_proc_release, "zuluvm", /* */, 985 tnf_opaque, zdev, zdev, 986 tnf_opaque, proc, proc, 987 tnf_int, refcnt, refcnt); 988 if (refcnt == 0) { 989 if (proc->next) 990 proc->next->prev = proc->prev; 991 if (proc->prev) 992 proc->prev->next = proc->next; 993 else 994 zdev->procs = proc->next; 995 kmem_free(proc, sizeof (zuluvm_proc_t)); 996 } 997 return (refcnt); 998 } 999 1000 /* 1001 * this process is not longer using DMA, all entries 1002 * have been removed from the TLB. 1003 */ 1004 int 1005 zuluvm_dma_delete_proc(zuluvm_info_t devp, uint64_t cookie) 1006 { 1007 int refcnt; 1008 zuluvm_proc_t *proc = (zuluvm_proc_t *)cookie; 1009 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1010 1011 TNF_PROBE_2(zuluvm_dma_delete_proc, "zuluvm", /* */, 1012 tnf_opaque, zdev, zdev, 1013 tnf_opaque, cookie, cookie); 1014 mutex_enter(&zdev->proc_lck); 1015 if (proc != NULL) { 1016 TNF_PROBE_1(zuluvm_dma_delete_proc, "zuluvm", /* */, 1017 tnf_opaque, proc, proc); 1018 if (proc->zhat != NULL) { 1019 zulu_hat_proc_detach(proc->zhat); 1020 proc->zhat = NULL; 1021 } 1022 refcnt = zuluvm_proc_release(zdev, proc); 1023 } 1024 mutex_exit(&zdev->proc_lck); 1025 1026 TNF_PROBE_2(zuluvm_dma_delete_proc_done, "zuluvm", /* */, 1027 tnf_int, refcnt, refcnt, 1028 tnf_int, error, ZULUVM_SUCCESS); 1029 return (ZULUVM_SUCCESS); 1030 } 1031 1032 /* 1033 * barrier sync for device driver 1034 * blocks until zuluvm_tlbmiss_tl1 function is done 1035 */ 1036 void 1037 zuluvm_fast_tlb_wait(caddr_t devp) 1038 { 1039 int state; 1040 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1041 int cnt = 0; 1042 1043 do { 1044 state = ZULUVM_GET_STATE(zdev); 1045 cnt++; 1046 } while (state == ZULUVM_STATE_TLB_PENDING); 1047 TNF_PROBE_1(zuluvm_fast_tlb_wait, "zuluvm", /* */, 1048 tnf_int, loop_cnt, cnt); 1049 } 1050 1051 /* 1052 * setup DMA handling for this handle 1053 */ 1054 int 1055 zuluvm_dma_alloc_ctx(zuluvm_info_t devp, int dma, short *mmuctx, 1056 uint64_t *tsbreg) 1057 { 1058 struct as *asp = ZULUVM_GET_AS; 1059 int error = ZULUVM_NO_DEV; 1060 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1061 int state, newstate; 1062 1063 if (asp == NULL) { 1064 TNF_PROBE_1(zuluvm_dma_alloc_ctx_done, "zuluvm", /* */, 1065 tnf_int, error, ZULUVM_NO_HAT); 1066 return (ZULUVM_NO_HAT); 1067 } 1068 1069 *tsbreg = 0; 1070 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_IDLE, 1071 ZULUVM_STATE_STOPPED); 1072 newstate = ZULUVM_GET_STATE(zdev); 1073 TNF_PROBE_4(zuluvm_dma_alloc_ctx, "zuluvm", /* */, 1074 tnf_opaque, devp, devp, 1075 tnf_int, dma, dma, 1076 tnf_int, oldstate, state, 1077 tnf_int, newstate, newstate); 1078 #ifdef DEBUG 1079 if (zuluvm_debug_state) 1080 cmn_err(CE_NOTE, "zuluvm_dma_alloc_ctx: state %d\n", state); 1081 #endif 1082 if (state != ZULUVM_STATE_STOPPED && state != ZULUVM_STATE_IDLE) { 1083 while (state != ZULUVM_STATE_IDLE) { 1084 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_IDLE, 1085 ZULUVM_STATE_STOPPED); 1086 #ifdef DEBUG 1087 if (zuluvm_debug_state) 1088 cmn_err(CE_NOTE, "zuluvm_dma_alloc_ctx: (loop)" 1089 " state %d\n", state); 1090 #endif 1091 if (state != ZULUVM_STATE_IDLE) 1092 delay(1); 1093 } 1094 } 1095 1096 if (zdev->zvm.arg != NULL) { 1097 struct zulu_hat *zhat; 1098 zuluvm_proc_t *proc; 1099 1100 mutex_enter(&zdev->proc_lck); 1101 proc = zuluvm_find_proc(zdev, asp); 1102 if (proc != NULL) { 1103 zhat = proc->zhat; 1104 proc->refcnt++; 1105 } 1106 mutex_exit(&zdev->proc_lck); 1107 1108 switch (dma) { 1109 case ZULUVM_DMA1: 1110 ZULUVM_LOCK; 1111 zdev->zvm.proc1 = proc; 1112 ZULUVM_UNLOCK; 1113 error = ZULUVM_SUCCESS; 1114 break; 1115 case ZULUVM_DMA2: 1116 ZULUVM_LOCK; 1117 zdev->zvm.proc2 = proc; 1118 ZULUVM_UNLOCK; 1119 error = ZULUVM_SUCCESS; 1120 break; 1121 default: 1122 mutex_enter(&zdev->proc_lck); 1123 (void) zuluvm_proc_release(zdev, proc); 1124 mutex_exit(&zdev->proc_lck); 1125 } 1126 1127 if (error == ZULUVM_SUCCESS) { 1128 zulu_hat_validate_ctx(zhat); 1129 if (zhat->zulu_ctx >= 0) { 1130 *mmuctx = zhat->zulu_ctx; 1131 } else { 1132 printf("invalid context value: %d\n", 1133 zhat->zulu_ctx); 1134 1135 mutex_enter(&zdev->proc_lck); 1136 (void) zuluvm_proc_release(zdev, proc); 1137 mutex_exit(&zdev->proc_lck); 1138 1139 error = ZULUVM_ERROR; 1140 } 1141 } else { 1142 error = ZULUVM_ERROR; 1143 } 1144 } 1145 TNF_PROBE_1(zuluvm_dma_alloc_ctx_done, "zuluvm", /* */, 1146 tnf_int, error, error); 1147 return (error); 1148 } 1149 1150 /* 1151 * preload TLB 1152 * this will try to pre-set the zulu tlb, mainly used for dma engine 2, 1153 * video read-back. 1154 */ 1155 int 1156 zuluvm_dma_preload(zuluvm_info_t devp, int dma, 1157 int num, zulud_preload_t *list) 1158 { 1159 int i; 1160 int error = ZULUVM_SUCCESS; 1161 struct zulu_hat *zhat; 1162 zuluvm_proc_t *proc = NULL; 1163 1164 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1165 1166 TNF_PROBE_4(zuluvm_dma_preload, "zuluvm", /* */, 1167 tnf_opaque, devp, devp, 1168 tnf_int, dma, dma, 1169 tnf_int, num, num, 1170 tnf_opaque, list, list); 1171 ZULUVM_LOCK; 1172 switch (dma) { 1173 case ZULUVM_DMA1: 1174 proc = zdev->zvm.proc1; 1175 break; 1176 case ZULUVM_DMA2: 1177 proc = zdev->zvm.proc2; 1178 break; 1179 } 1180 1181 mutex_enter(&zdev->proc_lck); 1182 if (proc == NULL || proc->valid == 0 || proc->zhat == NULL) { 1183 mutex_exit(&zdev->proc_lck); 1184 ZULUVM_UNLOCK; 1185 return (ZULUVM_NO_HAT); 1186 } 1187 mutex_exit(&zdev->proc_lck); 1188 1189 zhat = proc->zhat; 1190 /* 1191 * need to release this to avoid recursive enter in zuluvm_load_tte 1192 * which gets called from zulu_hat_memload() 1193 */ 1194 ZULUVM_UNLOCK; 1195 1196 mutex_enter(&zdev->load_lck); 1197 for (i = 0; i < num; i++) { 1198 int pg_size; 1199 int res; 1200 int first = 1; 1201 caddr_t addr = ZULUVM_GET_PAGE(list[i].addr); 1202 int64_t size = (int64_t)list[i].len; 1203 while (size > 0) { 1204 if (list[i].tlbtype & ~ZULUVM_DMA_MASK) { 1205 error = ZULUVM_INVALID_MISS; 1206 break; 1207 } 1208 res = zulu_hat_load(zhat, addr, 1209 (list[i].tlbtype == ZULUVM_DMA2) ? S_WRITE : S_READ, 1210 &pg_size); 1211 if ((res != 0) || (pg_size < 0)) { 1212 error = ZULUVM_NO_MAP; 1213 break; 1214 } 1215 ZULUVM_STATS_PRELOAD(zdev); 1216 TNF_PROBE_2(zuluvm_dma_preload_addr, "zuluvm", /* */, 1217 tnf_opaque, addr, addr, 1218 tnf_opaque, size, size); 1219 if (first) { 1220 first = 0; 1221 size -= ZULU_HAT_PGDIFF(list[i].addr, 1222 pg_size); 1223 } else { 1224 size -= ZULU_HAT_PGSZ(pg_size); 1225 } 1226 addr += ZULU_HAT_PGSZ(pg_size); 1227 } 1228 } 1229 mutex_exit(&zdev->load_lck); 1230 TNF_PROBE_1(zuluvm_dma_preload_done, "zuluvm", /* */, 1231 tnf_int, error, error); 1232 return (ZULUVM_SUCCESS); 1233 } 1234 1235 /* 1236 * destroy DMA handling for this handle 1237 */ 1238 int 1239 zuluvm_dma_free_ctx(zuluvm_info_t devp, int dma) 1240 { 1241 int error = ZULUVM_NO_DEV; 1242 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1243 int state, newstate; 1244 1245 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_STOPPED, 1246 ZULUVM_STATE_IDLE); 1247 newstate = ZULUVM_GET_STATE(zdev); 1248 TNF_PROBE_4(zuluvm_dma_free_ctx, "zuluvm", /* */, 1249 tnf_opaque, devp, devp, 1250 tnf_int, dma, dma, 1251 tnf_int, oldstate, state, 1252 tnf_int, newstate, newstate); 1253 #ifdef DEBUG 1254 if (zuluvm_debug_state) 1255 cmn_err(CE_NOTE, "zuluvm_dma_free_ctx: state %d\n", state); 1256 #endif 1257 if (state != ZULUVM_STATE_IDLE && state != ZULUVM_STATE_STOPPED) { 1258 int doit = 1; 1259 while (doit) { 1260 switch (state) { 1261 case ZULUVM_STATE_CANCELED: 1262 case ZULUVM_STATE_STOPPED: 1263 doit = 0; 1264 break; 1265 case ZULUVM_STATE_IDLE: 1266 state = ZULUVM_SET_STATE(zdev, 1267 ZULUVM_STATE_STOPPED, 1268 ZULUVM_STATE_IDLE); 1269 break; 1270 default: 1271 state = ZULUVM_SET_STATE(zdev, 1272 ZULUVM_STATE_CANCELED, state); 1273 } 1274 TNF_PROBE_1(zuluvm_dma_free_ctx, "zuluvm", /* */, 1275 tnf_int, state, state); 1276 #ifdef DEBUG 1277 if (zuluvm_debug_state) 1278 cmn_err(CE_NOTE, "zuluvm_dma_free_ctx: (loop1)" 1279 " state %d\n", state); 1280 #endif 1281 } 1282 } 1283 TNF_PROBE_1(zuluvm_dma_free_ctx, "zuluvm", /* */, 1284 tnf_int, state, state); 1285 1286 error = ZULUVM_SUCCESS; 1287 while (state != ZULUVM_STATE_STOPPED) { 1288 state = ZULUVM_GET_STATE(zdev); 1289 #ifdef DEBUG 1290 if (zuluvm_debug_state) 1291 cmn_err(CE_NOTE, "zuluvm_dma_free: (loop2) state %d\n", 1292 state); 1293 #endif 1294 if (state != ZULUVM_STATE_STOPPED) 1295 delay(1); 1296 } 1297 ZULUVM_LOCK; 1298 if (zdev->zvm.arg != NULL) { 1299 zuluvm_proc_t *proc = NULL; 1300 switch (dma) { 1301 case ZULUVM_DMA1: 1302 proc = zdev->zvm.proc1; 1303 zdev->zvm.proc1 = NULL; 1304 break; 1305 case ZULUVM_DMA2: 1306 proc = zdev->zvm.proc2; 1307 zdev->zvm.proc2 = NULL; 1308 break; 1309 default: 1310 error = ZULUVM_NO_DEV; 1311 } 1312 ZULUVM_UNLOCK; 1313 if (proc) { 1314 mutex_enter(&zdev->proc_lck); 1315 (void) zuluvm_proc_release(zdev, proc); 1316 mutex_exit(&zdev->proc_lck); 1317 } 1318 } else { 1319 ZULUVM_UNLOCK; 1320 error = ZULUVM_NO_DEV; 1321 } 1322 TNF_PROBE_1(zuluvm_dma_free_ctx_done, "zuluvm", /* */, 1323 tnf_int, error, error); 1324 return (error); 1325 } 1326 1327 static void 1328 zuluvm_do_retarget(zuluvm_state_t *zdev) 1329 { 1330 int i, idx; 1331 uint_t cpu; 1332 for (i = 0; i < ZULUVM_MAX_INTR; i++) { 1333 if (zdev->interrupts[i].ino != -1) { 1334 cpu = intr_dist_cpuid(); 1335 idx = zdev->interrupts[i].offset; 1336 if (zdev->imr[idx] & ZULUVM_IMR_V_MASK) 1337 zdev->imr[idx] = ZULUVM_IMR_V_MASK | 1338 (cpu<<ZULUVM_IMR_TARGET_SHIFT); 1339 else 1340 zdev->imr[idx] = 1341 cpu<<ZULUVM_IMR_TARGET_SHIFT; 1342 } 1343 } 1344 } 1345 1346 static void 1347 zuluvm_retarget_intr(void *arg) 1348 { 1349 zuluvm_state_t *zdev = (zuluvm_state_t *)arg; 1350 ZULUVM_LOCK; 1351 zuluvm_do_retarget(zdev); 1352 ZULUVM_UNLOCK; 1353 } 1354 1355 int 1356 zuluvm_add_intr(zuluvm_info_t devp, int ino, 1357 uint_t (*handler)(caddr_t), caddr_t arg) 1358 { 1359 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1360 if (devp == NULL) { 1361 TNF_PROBE_1(zuluvm_add_intr_done, "zuluvm", /* */, 1362 tnf_int, error, ZULUVM_NO_DEV); 1363 return (ZULUVM_NO_DEV); 1364 } 1365 if (ddi_add_intr(zdev->dip, ino, NULL, NULL, handler, arg) 1366 != DDI_SUCCESS) { 1367 TNF_PROBE_1(zuluvm_add_intr_done, "zuluvm", /* */, 1368 tnf_int, error, ZULUVM_ERROR); 1369 return (ZULUVM_ERROR); 1370 } 1371 return (ZULUVM_SUCCESS); 1372 } 1373 1374 int 1375 zuluvm_rem_intr(zuluvm_info_t devp, int ino) 1376 { 1377 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1378 if (devp == NULL) { 1379 TNF_PROBE_1(zuluvm_rem_intr_done, "zuluvm", /* */, 1380 tnf_int, error, ZULUVM_NO_DEV); 1381 return (ZULUVM_NO_DEV); 1382 } 1383 /* remove from distributin list */ 1384 ZULUVM_LOCK; 1385 zdev->imr[zdev->interrupts[ino].offset] &= ~ZULUVM_IMR_V_MASK; 1386 ZULUVM_UNLOCK; 1387 ddi_remove_intr(zdev->dip, ino, NULL); 1388 return (ZULUVM_SUCCESS); 1389 } 1390 1391 int 1392 zuluvm_enable_intr(zuluvm_info_t devp, int num) 1393 { 1394 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1395 1396 TNF_PROBE_2(zuluvm_enable_intr, "zuluvm_intr", /* */, 1397 tnf_opaque, devp, devp, 1398 tnf_int, num, num); 1399 if (devp == NULL) { 1400 TNF_PROBE_1(zuluvm_enable_intr_done, "zuluvm", /* */, 1401 tnf_int, error, ZULUVM_NO_DEV); 1402 return (ZULUVM_NO_DEV); 1403 } 1404 if (num < 0 || num > ZULUVM_IMR_MAX) { 1405 TNF_PROBE_1(zuluvm_enable_intr_done, "zuluvm", /* */, 1406 tnf_int, error, ZULUVM_BAD_IDX); 1407 return (ZULUVM_BAD_IDX); 1408 } 1409 ZULUVM_LOCK; 1410 zdev->imr[num] |= ZULUVM_IMR_V_MASK; 1411 ZULUVM_UNLOCK; 1412 TNF_PROBE_1(zuluvm_enable_intr_done, "zuluvm_intr", /* */, 1413 tnf_int, error, ZULUVM_SUCCESS); 1414 return (ZULUVM_SUCCESS); 1415 } 1416 1417 int 1418 zuluvm_disable_intr(zuluvm_info_t devp, int num) 1419 { 1420 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1421 1422 TNF_PROBE_2(zuluvm_disable_intr, "zuluvm_intr", /* */, 1423 tnf_opaque, devp, devp, 1424 tnf_int, num, num); 1425 if (devp == NULL) { 1426 TNF_PROBE_1(zuluvm_disable_intr_done, "zuluvm", /* */, 1427 tnf_int, error, ZULUVM_NO_DEV); 1428 return (ZULUVM_NO_DEV); 1429 } 1430 if (num < 0 || num > ZULUVM_IMR_MAX) { 1431 TNF_PROBE_1(zuluvm_disable_intr_done, "zuluvm", /* */, 1432 tnf_int, error, ZULUVM_BAD_IDX); 1433 return (ZULUVM_BAD_IDX); 1434 } 1435 ZULUVM_LOCK; 1436 zdev->imr[num] &= ~ZULUVM_IMR_V_MASK; 1437 ZULUVM_UNLOCK; 1438 TNF_PROBE_1(zuluvm_disable_intr_done, "zuluvm_intr", /* */, 1439 tnf_int, error, ZULUVM_SUCCESS); 1440 return (ZULUVM_SUCCESS); 1441 } 1442 1443 static int 1444 zuluvm_get_intr_props(zuluvm_state_t *zdev, 1445 dev_info_t *devi) 1446 { 1447 int *intr; 1448 int i; 1449 uint_t nintr; 1450 1451 zdev->agentid = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 1452 "portid", -1); 1453 if (zdev->agentid == -1) { 1454 cmn_err(CE_WARN, "%s%d: no portid property", 1455 ddi_get_name(devi), 1456 ddi_get_instance(devi)); 1457 return (ZULUVM_ERROR); 1458 } 1459 1460 for (i = 0; i < ZULUVM_MAX_INTR; i++) { 1461 zdev->interrupts[i].offset = 0; 1462 zdev->interrupts[i].ino = -1; 1463 } 1464 1465 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 1466 "interrupts", &intr, &nintr) == DDI_PROP_SUCCESS) { 1467 1468 if (nintr == 0) { 1469 cmn_err(CE_WARN, "%s%d: no interrupts in property", 1470 ddi_get_name(devi), 1471 ddi_get_instance(devi)); 1472 ddi_prop_free(intr); 1473 return (ZULUVM_ERROR); 1474 } 1475 if (nintr >= ZULUVM_MAX_INTR) { 1476 cmn_err(CE_WARN, "%s%d: to many interrupts (%d)", 1477 ddi_get_name(devi), 1478 ddi_get_instance(devi), nintr); 1479 ddi_prop_free(intr); 1480 return (ZULUVM_ERROR); 1481 } 1482 for (i = 0; i < nintr; i++) { 1483 zdev->interrupts[i].offset = intr[i]; 1484 zdev->interrupts[i].ino = i; 1485 } 1486 ddi_prop_free(intr); 1487 } else { 1488 cmn_err(CE_WARN, "%s%d: no interrupts property", 1489 ddi_get_name(devi), 1490 ddi_get_instance(devi)); 1491 } 1492 return (ZULUVM_SUCCESS); 1493 } 1494 1495 /* *** enf of zulu *** */ 1496