1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * zuluvm module 30 * 31 * Provides services required by the XVR-4000 graphics accelerator (zulu) 32 * that are not provided by the ddi. See PSARC 2002/231. 33 * 34 * Zulu has 2 dma engines with built in MMUs. zuluvm provides TLB miss 35 * interrupt support obtaining virtual to physical address translations 36 * using the XHAT interface PSARC/2003/517. 37 * 38 * The module has 3 components. This file, sun4u/vm/zulu_hat.c, and the 39 * assembly language routines in sun4u/ml/zulu_asm.s and 40 * sun4u/ml/zulu_hat_asm.s. 41 * 42 * The interrupt handler is a data bearing mondo interrupt handled at TL=1 43 * If no translation is found in the zulu hat's tsb, or if the tsb is locked by 44 * C code, the handler posts a soft interrupt which wakes up a parked 45 * thread belonging to zuludaemon(1M). 46 */ 47 48 #include <sys/conf.h> 49 #include <sys/types.h> 50 #include <sys/kmem.h> 51 #include <sys/debug.h> 52 #include <sys/modctl.h> 53 #include <sys/autoconf.h> 54 #include <sys/ddi_impldefs.h> 55 #include <sys/ddi_subrdefs.h> 56 #include <sys/intr.h> 57 #include <sys/ddi.h> 58 #include <sys/sunndi.h> 59 #include <sys/proc.h> 60 #include <sys/thread.h> 61 #include <sys/machsystm.h> 62 #include <sys/ivintr.h> 63 #include <sys/tnf_probe.h> 64 #include <sys/intreg.h> 65 #include <sys/atomic.h> 66 #include <vm/as.h> 67 #include <vm/seg_enum.h> 68 #include <vm/faultcode.h> 69 #include <sys/dmv.h> 70 #include <sys/zulumod.h> 71 #include <sys/zulu_hat.h> 72 73 #define ZULUVM_GET_PAGE(val) \ 74 (caddr_t)((uintptr_t)(val) & PAGEMASK) 75 #define ZULUVM_GET_AS curthread->t_procp->p_as 76 77 #define ZULUVM_LOCK mutex_enter(&(zdev->dev_lck)) 78 #define ZULUVM_UNLOCK mutex_exit(&(zdev->dev_lck)) 79 80 #define ZULUVM_SET_STATE(_z, b, c) \ 81 cas32((uint32_t *)&((_z)->zvm.state), c, b) 82 #define ZULUVM_GET_STATE(_z) \ 83 (_z)->zvm.state 84 #define ZULUVM_SET_IDLE(_z) \ 85 (_z)->zvm.state = ZULUVM_STATE_IDLE; 86 87 #define ZULUVM_INO_MASK ((1<<INO_SIZE)-1) 88 #define ZULUVM_IGN_MASK ((1<<IGN_SIZE)-1) 89 #define ZULUVM_MONDO(_zdev, _n) \ 90 ((ZULUVM_IGN_MASK & _zdev->agentid) << INO_SIZE) | \ 91 (ZULUVM_INO_MASK & (_n)) 92 93 static void zuluvm_stop(zuluvm_state_t *, int, char *); 94 static zuluvm_proc_t *zuluvm_find_proc(zuluvm_state_t *, struct as *); 95 static int zuluvm_proc_release(zuluvm_state_t *zdev, zuluvm_proc_t *proc); 96 static int zuluvm_get_intr_props(zuluvm_state_t *zdev, dev_info_t *devi); 97 static int zuluvm_driver_attach(zuluvm_state_t *); 98 static int zuluvm_driver_detach(zuluvm_state_t *); 99 static void zuluvm_retarget_intr(void *arg); 100 static void zuluvm_do_retarget(zuluvm_state_t *zdev); 101 102 extern const unsigned int _mmu_pageshift; 103 104 extern int zuluvm_base_pgsize; 105 static int zuluvm_pagesizes[ZULUM_MAX_PG_SIZES + 1]; 106 107 int zuluvm_fast_tlb = 1; 108 109 zuluvm_state_t *zuluvm_devtab[ZULUVM_MAX_DEV]; 110 kmutex_t zuluvm_lck; 111 112 #ifdef DEBUG 113 int zuluvm_debug_state = 0; 114 #endif 115 116 unsigned long zuluvm_ctx_locked = 0; 117 118 /* 119 * Module linkage information for the kernel. 120 */ 121 extern struct mod_ops mod_miscops; 122 123 static struct modlmisc modlmisc = { 124 &mod_miscops, 125 "sun4u support " ZULUVM_MOD_VERSION 126 }; 127 128 static struct modlinkage modlinkage = { 129 MODREV_1, 130 (void *)&modlmisc, 131 NULL 132 }; 133 134 int 135 _init(void) 136 { 137 zuluvm_base_pgsize = (_mmu_pageshift - 13) / 3; 138 if (zulu_hat_init() != 0) { 139 return (ZULUVM_ERROR); 140 } 141 mutex_init(&zuluvm_lck, NULL, MUTEX_DEFAULT, NULL); 142 return (mod_install(&modlinkage)); 143 } 144 145 int 146 _fini(void) 147 { 148 mutex_destroy(&zuluvm_lck); 149 (void) zulu_hat_destroy(); 150 return (mod_remove(&modlinkage)); 151 } 152 153 int 154 _info(struct modinfo *modinfop) 155 { 156 return (mod_info(&modlinkage, modinfop)); 157 } 158 159 /* 160 * currently the kernel driver makes the following assumptions: 161 * - there is only one TLB miss per zulu device handled at 162 * any given time 163 * ==> we only need local data storage per device, not per DMA 164 * ==> a page fault will block the DMA engine until the fault 165 * is resolved 166 * ==> a pagefault will not trigger a zulu DMA context switch 167 * 168 * If we want to implement asynnchronous zulu page fault, then we 169 * need to keep track of outstanding faults while zulu DMA runs 170 * in a different context. 171 */ 172 static int 173 zuluvm_write_tte(zuluvm_state_t *zdev, void *arg, caddr_t addr, 174 int t_pfn, int t_perm, int t_size, uint64_t tag, 175 int tlbtype, int *size) 176 { 177 int error; 178 179 (void) addr; 180 181 ZULUVM_STATS_MISS(zdev, t_size); 182 183 if (tag == 0) { /* not coming from preload */ 184 int state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_WRITE_TTE, 185 ZULUVM_STATE_INTR_PENDING); 186 if (state != ZULUVM_STATE_INTR_PENDING) { 187 zuluvm_stop(zdev, state, "zuluvm_write_tte"); 188 return (ZULUVM_MISS_CANCELED); 189 } 190 } 191 192 if (!(tlbtype & ZULUVM_ITLB_FLAG) && 193 t_size != zuluvm_base_pgsize && 194 t_size != ZULU_TTE4M) { 195 t_size = zuluvm_base_pgsize; 196 TNF_PROBE_2(zuluvm_write_tte_new_pfn, "zuluvm", /* */, 197 tnf_opaque, t_pfn, t_pfn, tnf_int, pagesize, t_size); 198 } 199 TNF_PROBE_1(zuluvm_write_tte, "zuluvm", /* */, 200 tnf_opaque, t_pfn, t_pfn); 201 /* 202 * if the caller is zuluvm_preload, then we need to pass 203 * back the page size so it can add the right offset. 204 */ 205 if (size) 206 *size = t_size; 207 208 error = zulud_write_tte(zdev, arg, t_size, tag, t_pfn, 209 t_perm, tlbtype); 210 211 return (error); 212 } 213 214 static void 215 zuluvm_stop(zuluvm_state_t *zdev, int state, char *tag) 216 { 217 int ostate = state; 218 while (state != ZULUVM_STATE_STOPPED) { 219 state = ZULUVM_SET_STATE(zdev, 220 ZULUVM_STATE_STOPPED, state); 221 #ifdef DEBUG 222 if (zuluvm_debug_state) 223 cmn_err(CE_NOTE, "zuluvm_stop(%s): (loop) state %d\n", 224 tag, state); 225 #endif 226 } 227 TNF_PROBE_2(zuluvm_stop, "zuluvm", /* */, 228 tnf_string, tag, tag, 229 tnf_int, state, ostate); 230 ZULUVM_STATS_CANCEL(zdev); 231 } 232 233 /* 234 * Executed with the context of the parked zulu deamon thread, 235 * uses zulu_hat_load to resolve the miss. 236 * The tte is loaded and miss done called by the function zuluvm_load_tte 237 * which is called from zulu_hat 238 * 239 * This function is synchronized with the zuluvm_as_free. 240 * zuluvm_as_free will block until miss servicing is complete. 241 * 242 * There is a race condition between as_free and the zulu tlb miss 243 * soft interrupt: 244 * - queue zulu interrupt 245 * - process dies, as_free runs 246 * - interrupt gets scheduled and runs as_fault on the 247 * already freed as. 248 * This is solved by keeping track of current zulu dma processes 249 * and invalidating them in zuluvm_as_free. 250 */ 251 uint_t 252 zuluvm_tlb_handler(caddr_t data) 253 { 254 zuluvm_state_t *zdev = (zuluvm_state_t *)data; 255 int error; 256 int flag = 0; 257 int wait = 0; 258 zuluvm_proc_t *proc = NULL; 259 struct zulu_hat *zhat = NULL; 260 caddr_t addr; 261 int tlbtype; 262 void *arg; 263 int state, newstate; 264 265 TNF_PROBE_1(zuluvm_tlb_handler_lwp, "zuluvm", /* */, 266 tnf_opaque, lwp, ttolwp(curthread)); 267 268 ZULUVM_LOCK; 269 error = ZULUVM_GET_TLB_ERRCODE(zdev); 270 addr = (caddr_t)ZULUVM_GET_TLB_ADDR(zdev); 271 tlbtype = ZULUVM_GET_TLB_TYPE(zdev); 272 arg = zdev->zvm.arg; 273 274 /* 275 * select the correct dma engine and remember the 276 * the as_free synchronization flags. 277 */ 278 switch (tlbtype) { 279 case ZULUVM_ITLB1: 280 case ZULUVM_DMA1: 281 proc = zdev->zvm.proc1; 282 flag |= ZULUVM_DO_INTR1; 283 wait |= ZULUVM_WAIT_INTR1; 284 break; 285 case ZULUVM_ITLB2: 286 case ZULUVM_DMA2: 287 proc = zdev->zvm.proc2; 288 flag |= ZULUVM_DO_INTR2; 289 wait |= ZULUVM_WAIT_INTR2; 290 break; 291 } 292 293 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_INTR_PENDING, 294 ZULUVM_STATE_INTR_QUEUED); 295 newstate = ZULUVM_GET_STATE(zdev); 296 297 TNF_PROBE_2(zuluvm_tlb_handler_state, "zuluvm", /* */, 298 tnf_int, oldstate, state, 299 tnf_int, newstate, newstate); 300 #ifdef DEBUG 301 if (zuluvm_debug_state) 302 cmn_err(CE_NOTE, "zuluvm_tlb_handler: state %d\n", state); 303 #endif 304 if (state != ZULUVM_STATE_INTR_PENDING && 305 state != ZULUVM_STATE_INTR_QUEUED) { 306 ZULUVM_UNLOCK; 307 308 zuluvm_stop(zdev, state, "softintr1"); 309 zulud_tlb_done(zdev, arg, tlbtype, ZULUVM_MISS_CANCELED); 310 return (1); 311 } 312 313 /* 314 * block the as_free callback in case it comes in 315 */ 316 zdev->intr_flags |= flag; 317 ZULUVM_UNLOCK; 318 319 mutex_enter(&zdev->proc_lck); 320 /* 321 * check if this as is still valid 322 */ 323 if (proc == NULL || proc->valid == 0 || proc->zhat == NULL) { 324 mutex_exit(&zdev->proc_lck); 325 /* 326 * we are on our way out, wake up the as_free 327 * callback if it is waiting for us 328 */ 329 ZULUVM_LOCK; 330 zdev->intr_flags &= ~flag; 331 if (zdev->intr_flags | wait) 332 cv_broadcast(&zdev->intr_wait); 333 ZULUVM_UNLOCK; 334 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_IDLE, 335 ZULUVM_STATE_INTR_PENDING); 336 if (state != ZULUVM_STATE_INTR_PENDING) { 337 zuluvm_stop(zdev, state, "softintr3"); 338 } 339 zulud_tlb_done(zdev, arg, tlbtype, ZULUVM_NO_HAT); 340 return (1); 341 } 342 zhat = proc->zhat; 343 mutex_exit(&zdev->proc_lck); 344 345 TNF_PROBE_1(zuluvm_tlb_handler, "zuluvm", /* */, 346 tnf_opaque, addr, addr); 347 348 switch (error) { 349 case ZULUVM_CTX_LOCKED: 350 /* 351 * trap handler found that zulu_hat had the lock bit set 352 * rather than block in the fast trap handler, it punts 353 * in this rare instance 354 */ 355 ++zuluvm_ctx_locked; 356 TNF_PROBE_1(zuluvm_ctx_locked, "zuluvm", /* CSTYLED */, 357 tnf_ulong, zuluvm_ctx_locked, zuluvm_ctx_locked); 358 359 /*FALLTHROUGH*/ 360 361 case ZULUVM_TTE_DELAY: 362 /* 363 * fast tlb handler was skipped, see zuluvm_fast_tlb flag 364 */ 365 /*FALLTHROUGH*/ 366 367 case ZULUVM_NO_TTE: 368 /* 369 * no TSB entry and TTE in the hash 370 */ 371 mutex_enter(&zdev->load_lck); 372 zdev->in_intr = 1; 373 error = zulu_hat_load(zhat, addr, 374 (tlbtype == ZULUVM_DMA2) ? S_WRITE : S_READ, NULL); 375 zdev->in_intr = 0; 376 mutex_exit(&zdev->load_lck); 377 if (error) { 378 379 error = ZULUVM_NO_MAP; 380 } else { 381 error = ZULUVM_SUCCESS; 382 TNF_PROBE_1(zuluvm_tlb_handler_done, "zuluvm", /* */, 383 tnf_int, error, error); 384 return (1); 385 } 386 387 default: 388 /* 389 * error case, fall through and tell zulu driver to abort DMA 390 */ 391 break; 392 } 393 394 if (error != ZULUVM_MISS_CANCELED) { 395 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_IDLE, 396 ZULUVM_STATE_WRITE_TTE); 397 newstate = ZULUVM_GET_STATE(zdev); 398 TNF_PROBE_2(zuluvm_tlb_handler_state_done, "zuluvm", /* */, 399 tnf_int, oldstate, state, 400 tnf_int, newstate, newstate); 401 if (state != ZULUVM_STATE_WRITE_TTE) { 402 zuluvm_stop(zdev, state, "softintr4"); 403 } 404 } 405 /* 406 * synchronize with as_free callback 407 * It will set the wait flag, in that case we send 408 * a wake up. 409 */ 410 ZULUVM_LOCK; 411 zdev->intr_flags &= ~flag; 412 if (zdev->intr_flags | wait) 413 cv_broadcast(&zdev->intr_wait); 414 ZULUVM_UNLOCK; 415 416 TNF_PROBE_1(zuluvm_tlb_handler_done, "zuluvm", /* */, 417 tnf_int, error, error); 418 419 zulud_tlb_done(zdev, arg, tlbtype, error); 420 421 return (1); 422 } 423 424 425 void 426 zuluvm_load_tte(struct zulu_hat *zhat, caddr_t addr, uint64_t pfn, 427 int perm, int size) 428 { 429 zuluvm_state_t *zdev = zhat->zdev; 430 int tlbtype = ZULUVM_GET_TLB_TYPE(zdev); 431 432 ASSERT(MUTEX_HELD(&zdev->load_lck)); 433 ASSERT(pfn != 0); 434 435 if (zdev->in_intr) { 436 int error; 437 int flag = 0; 438 int wait = 0; 439 440 error = zuluvm_write_tte(zdev, zdev->zvm.arg, addr, pfn, 441 perm, size, 0, tlbtype, NULL); 442 443 if (error != ZULUVM_MISS_CANCELED) { 444 int state, newstate; 445 446 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_IDLE, 447 ZULUVM_STATE_WRITE_TTE); 448 newstate = ZULUVM_GET_STATE(zdev); 449 TNF_PROBE_2(zuluvm_tlb_handler_state_done, "zuluvm", 450 /* */, tnf_int, oldstate, state, 451 tnf_int, newstate, newstate); 452 if (state != ZULUVM_STATE_WRITE_TTE) { 453 zuluvm_stop(zdev, state, "softintr4"); 454 } 455 } 456 /* 457 * synchronize with as_free callback 458 * It will set the wait flag, in that case we send 459 * a wake up. 460 */ 461 switch (tlbtype) { 462 case ZULUVM_ITLB1: 463 case ZULUVM_DMA1: 464 flag = ZULUVM_DO_INTR1; 465 wait = ZULUVM_WAIT_INTR1; 466 break; 467 case ZULUVM_ITLB2: 468 case ZULUVM_DMA2: 469 flag = ZULUVM_DO_INTR2; 470 wait = ZULUVM_WAIT_INTR2; 471 break; 472 } 473 474 ZULUVM_LOCK; 475 zdev->intr_flags &= ~flag; 476 if (zdev->intr_flags | wait) 477 cv_broadcast(&zdev->intr_wait); 478 ZULUVM_UNLOCK; 479 480 zulud_tlb_done(zdev, zdev->zvm.arg, tlbtype, error); 481 } else { 482 (void) zuluvm_write_tte(zdev, zdev->zvm.arg, addr, pfn, 483 perm, size, (uint64_t)addr | 484 zhat->zulu_ctx, tlbtype, NULL); 485 } 486 } 487 488 489 490 491 /* 492 * This function provides the faulting thread for zulu page faults 493 * It is call from the device driver in response to an ioctl issued 494 * by a zuludaemon thread. 495 * It sits in cv_wait_sig until it gets woken up by a signal or 496 * zulu tlb miss soft interrupt. 497 */ 498 int 499 zuluvm_park(zuluvm_info_t devp) 500 { 501 int rval; 502 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 503 mutex_enter(&zdev->park_lck); 504 zdev->parking = 1; 505 for (;;) { 506 rval = cv_wait_sig(&zdev->park_cv, &zdev->park_lck); 507 if (rval == 0) 508 break; 509 rval = zuluvm_tlb_handler(devp); 510 } 511 zdev->parking = 0; 512 mutex_exit(&zdev->park_lck); 513 return (rval); 514 } 515 516 /* 517 * zulu soft interrupt handler, just triggers the parked zulu fault 518 * thread 519 */ 520 /*ARGSUSED*/ 521 uint_t 522 zuluvm_softintr(caddr_t devp, caddr_t arg2) 523 { 524 int tlbtype; 525 void *arg; 526 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 527 mutex_enter(&zdev->park_lck); 528 if (zdev->parking) { 529 cv_signal(&zdev->park_cv); 530 mutex_exit(&zdev->park_lck); 531 TNF_PROBE_1(zuluvm_fast_intr, "zuluvm", /* */, 532 tnf_opaque, devp, devp); 533 } else { 534 mutex_exit(&zdev->park_lck); 535 cmn_err(CE_NOTE, "zuluvm: no page fault thread\n"); 536 ZULUVM_LOCK; 537 tlbtype = ZULUVM_GET_TLB_TYPE(zdev); 538 arg = zdev->zvm.arg; 539 ZULUVM_UNLOCK; 540 TNF_PROBE_0(zuluvm_fast_intr, "zuluvm", /* */); 541 zuluvm_stop(zdev, ZULUVM_STATE_INTR_QUEUED, "fast_intr"); 542 zulud_tlb_done(zdev, arg, tlbtype, ZULUVM_NO_TTE); 543 } 544 return (1); 545 } 546 547 /* ***** public interface for process mapping events (hat layer) ***** */ 548 549 /* 550 * If the page size matches the Zulu page sizes then just pass 551 * it thru. If not then emulate the page demap with demaps of 552 * smaller page size. 553 */ 554 /* ARGSUSED */ 555 void 556 zuluvm_demap_page(void *arg, struct hat *hat_ptr, short ctx, 557 caddr_t vaddr, uint_t size) 558 { 559 void *ddarg; 560 zuluvm_state_t *zdev = (zuluvm_state_t *)arg; 561 562 if (arg == NULL) 563 return; 564 565 ZULUVM_STATS_DEMAP_PAGE(zdev); 566 567 ddarg = zdev->zvm.arg; 568 569 TNF_PROBE_3(zuluvm_demap_page, "zuluvm", /* */, 570 tnf_opaque, addr, vaddr, 571 tnf_int, size, size, 572 tnf_int, ctx, ctx); 573 574 if (ddarg != NULL) { 575 if (size != zuluvm_base_pgsize && 576 size != ZULU_TTE4M) { 577 int i; 578 int cnt = size - zuluvm_base_pgsize; 579 cnt = ZULU_HAT_SZ_SHIFT(cnt); 580 for (i = 0; i < cnt; i++) { 581 uintptr_t addr = (uintptr_t)vaddr | 582 i << ZULU_HAT_BP_SHIFT; 583 zulud_demap_page(zdev, ddarg, 584 (caddr_t)addr, ctx); 585 } 586 } else { 587 zulud_demap_page(zdev, ddarg, vaddr, ctx); 588 } 589 TNF_PROBE_0(zuluvm_demap_page_done, "zuluvm", /* */); 590 } else { 591 TNF_PROBE_0(zuluvm_demap_page_null_ddarg, "zuluvm", /* */); 592 } 593 } 594 595 /* 596 * An entire context has gone away, just pass it thru 597 */ 598 void 599 zuluvm_demap_ctx(void *arg, short ctx) 600 { 601 void *ddarg; 602 zuluvm_state_t *zdev = (zuluvm_state_t *)arg; 603 604 if (arg == NULL) 605 return; 606 607 ZULUVM_STATS_DEMAP_CTX(zdev); 608 609 TNF_PROBE_1(zuluvm_demap_ctx, "zuluvm", /* */, 610 tnf_int, ctx, ctx); 611 ddarg = zdev->zvm.arg; 612 613 if (ddarg != NULL) 614 zulud_demap_ctx(zdev, ddarg, ctx); 615 } 616 617 static int 618 zuluvm_driver_attach(zuluvm_state_t *zdev) 619 { 620 int i; 621 mutex_enter(&zuluvm_lck); 622 for (i = 0; i < ZULUVM_MAX_DEV; i++) { 623 if (zuluvm_devtab[i] == NULL) { 624 zuluvm_devtab[i] = zdev; 625 ZULUVM_SET_IDLE(zdev); 626 break; 627 } 628 } 629 mutex_exit(&zuluvm_lck); 630 if (i >= ZULUVM_MAX_DEV) 631 return (ZULUVM_ERROR); 632 633 if (zulu_hat_attach((void *)zdev) != 0) { 634 return (ZULUVM_ERROR); 635 } 636 637 mutex_init(&zdev->dev_lck, NULL, MUTEX_DEFAULT, NULL); 638 mutex_init(&zdev->load_lck, NULL, MUTEX_DEFAULT, NULL); 639 mutex_init(&zdev->proc_lck, NULL, MUTEX_DEFAULT, NULL); 640 mutex_init(&zdev->park_lck, NULL, MUTEX_DEFAULT, NULL); 641 cv_init(&zdev->park_cv, NULL, CV_DEFAULT, NULL); 642 cv_init(&zdev->intr_wait, NULL, CV_DEFAULT, NULL); 643 zdev->parking = 0; 644 645 #ifdef ZULUVM_STATS 646 zdev->zvm.cancel = 0; 647 zdev->zvm.pagefault = 0; 648 zdev->zvm.no_mapping = 0; 649 zdev->zvm.preload = 0; 650 zdev->zvm.migrate = 0; 651 zdev->zvm.pagesize = 0; 652 zdev->zvm.tlb_miss[0] = 0; 653 zdev->zvm.tlb_miss[1] = 0; 654 zdev->zvm.tlb_miss[2] = 0; 655 zdev->zvm.tlb_miss[3] = 0; 656 zdev->zvm.itlb1miss = 0; 657 zdev->zvm.dtlb1miss = 0; 658 zdev->zvm.itlb2miss = 0; 659 zdev->zvm.dtlb2miss = 0; 660 #endif 661 zdev->zvm.pfncnt = 0; 662 for (i = 0; i < 50; i++) 663 zdev->zvm.pfnbuf[i] = 0; 664 665 zdev->zvm.mmu_pa = NULL; 666 zdev->zvm.proc1 = NULL; 667 zdev->zvm.proc2 = NULL; 668 zdev->procs = NULL; 669 return (ZULUVM_SUCCESS); 670 } 671 672 static int 673 zuluvm_driver_detach(zuluvm_state_t *zdev) 674 { 675 int i; 676 cv_destroy(&zdev->intr_wait); 677 cv_destroy(&zdev->park_cv); 678 mutex_destroy(&zdev->park_lck); 679 mutex_destroy(&zdev->proc_lck); 680 mutex_destroy(&zdev->dev_lck); 681 mutex_destroy(&zdev->load_lck); 682 zdev->dops = NULL; 683 684 mutex_enter(&zuluvm_lck); 685 for (i = 0; i < ZULUVM_MAX_DEV; i++) { 686 if (zuluvm_devtab[i] == zdev) { 687 zuluvm_devtab[i] = NULL; 688 break; 689 } 690 } 691 mutex_exit(&zuluvm_lck); 692 693 if (zulu_hat_detach((void *)zdev) == 0) { 694 return (ZULUVM_SUCCESS); 695 } else { 696 return (ZULUVM_ERROR); 697 } 698 } 699 700 zulud_ops_t *zuluvm_dops = NULL; 701 702 /* 703 * init the zulu kernel driver (variables, locks, etc) 704 */ 705 int 706 zuluvm_init(zulud_ops_t *ops, int **pagesizes) 707 { 708 int error = ZULUVM_SUCCESS; 709 int i; 710 int size = zuluvm_base_pgsize; /* MMU_PAGESIZE; */ 711 712 if (ops->version != ZULUVM_INTERFACE_VERSION) 713 return (ZULUVM_VERSION_MISMATCH); 714 715 zuluvm_dops = ops; 716 for (i = 0; i < ZULUM_MAX_PG_SIZES && size <= ZULU_TTE4M; i++) { 717 zuluvm_pagesizes[i] = size++; 718 } 719 zuluvm_pagesizes[i] = -1; 720 *pagesizes = zuluvm_pagesizes; 721 722 return (error); 723 } 724 725 /* 726 * cleanup afterwards 727 */ 728 int 729 zuluvm_fini(void) 730 { 731 zuluvm_dops = NULL; 732 return (ZULUVM_SUCCESS); 733 } 734 735 /* 736 * allocate a zulu kernel driver instance for this zulu device 737 */ 738 int 739 zuluvm_alloc_device(dev_info_t *devi, void *arg, zuluvm_info_t *devp, 740 caddr_t mmu, caddr_t imr) 741 { 742 uint64_t intr_num; 743 zuluvm_state_t *zdev; 744 int error = ZULUVM_SUCCESS; 745 746 TNF_PROBE_3(zuluvm_alloc_device, "zuluvm", /* */, 747 tnf_opaque, arg, arg, 748 tnf_opaque, mmu, mmu, 749 tnf_opaque, imr, imr); 750 751 zdev = kmem_zalloc(sizeof (zuluvm_state_t), KM_SLEEP); 752 zdev->dip = devi; 753 zdev->dops = zuluvm_dops; 754 error = zuluvm_driver_attach(zdev); 755 if (error != ZULUVM_SUCCESS) { 756 kmem_free(zdev, sizeof (zuluvm_state_t)); 757 return (ZULUVM_NO_DEV); 758 } 759 760 ZULUVM_LOCK; 761 error = zuluvm_get_intr_props(zdev, devi); 762 if (error != ZULUVM_SUCCESS) { 763 ZULUVM_UNLOCK; 764 error = zuluvm_driver_detach(zdev); 765 if (error != ZULUVM_SUCCESS) 766 return (error); 767 kmem_free(zdev, sizeof (zuluvm_state_t)); 768 return (ZULUVM_NO_DEV); 769 } 770 zdev->zvm.arg = arg; 771 zdev->zvm.mmu_pa = (uint64_t)va_to_pa((void *)mmu); 772 zdev->imr = (uint64_t *)imr; 773 zdev->zvm.dmv_intr = dmv_add_softintr(zuluvm_dmv_tlbmiss_tl1, 774 (void *)zdev); 775 zulud_set_itlb_pc(zdev, arg, DMV_MAKE_DMV(zdev->zvm.dmv_intr, 776 (void *)zdev)); 777 zulud_set_dtlb_pc(zdev, arg, DMV_MAKE_DMV(zdev->zvm.dmv_intr, 778 (void *)zdev)); 779 intr_dist_add(zuluvm_retarget_intr, (void *)zdev); 780 zuluvm_do_retarget(zdev); 781 intr_num = add_softintr(ZULUVM_PIL, zuluvm_softintr, 782 (caddr_t)zdev, SOFTINT_ST); 783 zdev->zvm.intr_num = intr_num; 784 *devp = (caddr_t)zdev; 785 ZULUVM_UNLOCK; 786 TNF_PROBE_1(zuluvm_alloc_device_done, "zuluvm", /* */, 787 tnf_opaque, devp, *devp); 788 return (ZULUVM_SUCCESS); 789 } 790 791 /* 792 * free a zulu kernel driver instance 793 */ 794 int 795 zuluvm_free_device(zuluvm_info_t devp) 796 { 797 int error; 798 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 799 800 TNF_PROBE_1(zuluvm_free_device, "zuluvm", /* */, 801 tnf_opaque, zdev, zdev); 802 803 if (zdev == NULL) 804 return (ZULUVM_NO_DEV); 805 ZULUVM_LOCK; 806 if (zdev->zvm.arg == NULL) { 807 ZULUVM_UNLOCK; 808 TNF_PROBE_1(zuluvm_free_device_done, "zuluvm", /* */, 809 tnf_int, error, ZULUVM_NO_DEV); 810 return (ZULUVM_NO_DEV); 811 } 812 (void) dmv_rem_intr(zdev->zvm.dmv_intr); 813 (void) rem_softintr(zdev->zvm.intr_num); 814 intr_dist_rem(zuluvm_retarget_intr, (void *)zdev); 815 zdev->zvm.arg = NULL; 816 ZULUVM_UNLOCK; 817 error = zuluvm_driver_detach(zdev); 818 if (error != ZULUVM_SUCCESS) 819 return (error); 820 zdev->dops = NULL; 821 kmem_free(zdev, sizeof (zuluvm_state_t)); 822 823 TNF_PROBE_0(zuluvm_free_device_done, "zuluvm", /* */); 824 return (ZULUVM_SUCCESS); 825 } 826 827 /* 828 * find the as in the list of active zulu processes 829 * The caller has to hold zdev->proc_lck 830 */ 831 static zuluvm_proc_t * 832 zuluvm_find_proc(zuluvm_state_t *zdev, struct as *asp) 833 { 834 zuluvm_proc_t *p; 835 TNF_PROBE_2(zuluvm_find_proc, "zuluvm", /* */, 836 tnf_opaque, zdev, zdev, 837 tnf_opaque, asp, asp); 838 for (p = zdev->procs; p != NULL; p = p->next) { 839 if (ZULU_HAT2AS(p->zhat) == asp) { 840 TNF_PROBE_1(zuluvm_find_proc_done, 841 "zuluvm", /* */, tnf_opaque, proc, p); 842 return (p); 843 } 844 } 845 TNF_PROBE_0(zuluvm_find_proc_fail, "zuluvm", /* */); 846 return (NULL); 847 } 848 849 void 850 zuluvm_as_free(struct as *as, void *arg, uint_t events) 851 { 852 zuluvm_proc_t *proc = (zuluvm_proc_t *)arg; 853 zuluvm_state_t *zdev = proc->zdev; 854 int wait = 0; 855 int flag = 0; 856 int valid; 857 858 (void) events; 859 860 TNF_PROBE_1(zuluvm_as_free, "zuluvm", /* */, 861 tnf_opaque, arg, arg); 862 863 (void) as_delete_callback(as, arg); 864 /* 865 * if this entry is still valid, then we need to sync 866 * with zuluvm_tlb_handler rountine. 867 */ 868 mutex_enter(&zdev->proc_lck); 869 valid = proc->valid; 870 proc->valid = 0; 871 mutex_exit(&zdev->proc_lck); 872 873 if (valid) { 874 ZULUVM_LOCK; 875 if (proc == zdev->zvm.proc1) { 876 flag |= ZULUVM_WAIT_INTR1; 877 wait |= ZULUVM_DO_INTR1; 878 } 879 if (proc == zdev->zvm.proc2) { 880 flag |= ZULUVM_WAIT_INTR2; 881 wait |= ZULUVM_DO_INTR2; 882 } 883 if (flag) { 884 zdev->intr_flags |= flag; 885 /* 886 * wait until the tlb miss is resloved 887 */ 888 while (zdev->intr_flags & wait) { 889 cv_wait(&zdev->intr_wait, &zdev->dev_lck); 890 } 891 zdev->intr_flags &= ~flag; 892 } 893 ZULUVM_UNLOCK; 894 } 895 896 if (proc->zhat != NULL) { 897 /* 898 * prevent any further tlb miss processing for this hat 899 */ 900 zulu_hat_terminate(proc->zhat); 901 } 902 903 /* 904 * decrement the ref count and do the appropriate 905 * if it drops to zero. 906 */ 907 mutex_enter(&zdev->proc_lck); 908 (void) zuluvm_proc_release(zdev, proc); 909 mutex_exit(&zdev->proc_lck); 910 } 911 912 /* 913 * notify zulu vm driver about a new process going to 914 * use zulu DMA. Create a zulu_hat. 915 */ 916 int 917 zuluvm_dma_add_proc(zuluvm_info_t devp, uint64_t *cookie) 918 { 919 zuluvm_proc_t *proc; 920 int refcnt; 921 struct as *asp = ZULUVM_GET_AS; 922 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 923 924 TNF_PROBE_1(zuluvm_dma_add_proc, "zuluvm", /* */, 925 tnf_opaque, zdev, zdev); 926 mutex_enter(&zdev->proc_lck); 927 proc = zuluvm_find_proc(zdev, asp); 928 if (proc == NULL) { 929 proc = kmem_zalloc(sizeof (zuluvm_proc_t), KM_SLEEP); 930 proc->zhat = zulu_hat_proc_attach(asp, zdev); 931 if (proc->zhat == NULL) { 932 mutex_exit(&zdev->proc_lck); 933 kmem_free(proc, sizeof (zuluvm_proc_t)); 934 TNF_PROBE_2(zuluvm_dma_add_proc_done, "zuluvm", /* */, 935 tnf_int, valid, 0, 936 tnf_int, error, ZULUVM_ERROR); 937 return (ZULUVM_ERROR); 938 } 939 proc->zdev = zdev; 940 proc->valid = 1; 941 proc->refcnt = 1; 942 proc->next = zdev->procs; 943 if (zdev->procs) 944 zdev->procs->prev = proc; 945 proc->prev = NULL; 946 zdev->procs = proc; 947 proc->refcnt++; 948 (void) as_add_callback(asp, zuluvm_as_free, proc, 949 AS_FREE_EVENT, 0, -1, KM_SLEEP); 950 } else { 951 if (proc->valid == 0) { 952 mutex_exit(&zdev->proc_lck); 953 TNF_PROBE_2(zuluvm_dma_add_proc_done, "zuluvm", /* */, 954 tnf_int, valid, 0, 955 tnf_int, error, ZULUVM_ERROR); 956 return (ZULUVM_ERROR); 957 } 958 proc->refcnt++; 959 } 960 refcnt = proc->refcnt; 961 mutex_exit(&zdev->proc_lck); 962 *cookie = (uint64_t)proc; 963 TNF_PROBE_2(zuluvm_dma_add_proc_done, "zuluvm", /* */, 964 tnf_int, refcnt, refcnt, 965 tnf_int, error, ZULUVM_SUCCESS); 966 return (ZULUVM_SUCCESS); 967 } 968 969 void 970 zuluvm_proc_hold(zuluvm_state_t *zdev, zuluvm_proc_t *proc) 971 { 972 mutex_enter(&zdev->proc_lck); 973 proc->refcnt++; 974 mutex_exit(&zdev->proc_lck); 975 } 976 977 /* 978 * decrement ref count and free data if it drops to zero 979 */ 980 static int 981 zuluvm_proc_release(zuluvm_state_t *zdev, zuluvm_proc_t *proc) 982 { 983 int refcnt; 984 ASSERT(MUTEX_HELD(&zdev->proc_lck)); 985 refcnt = --proc->refcnt; 986 TNF_PROBE_3(zuluvm_proc_release, "zuluvm", /* */, 987 tnf_opaque, zdev, zdev, 988 tnf_opaque, proc, proc, 989 tnf_int, refcnt, refcnt); 990 if (refcnt == 0) { 991 if (proc->next) 992 proc->next->prev = proc->prev; 993 if (proc->prev) 994 proc->prev->next = proc->next; 995 else 996 zdev->procs = proc->next; 997 kmem_free(proc, sizeof (zuluvm_proc_t)); 998 } 999 return (refcnt); 1000 } 1001 1002 /* 1003 * this process is not longer using DMA, all entries 1004 * have been removed from the TLB. 1005 */ 1006 int 1007 zuluvm_dma_delete_proc(zuluvm_info_t devp, uint64_t cookie) 1008 { 1009 int refcnt; 1010 zuluvm_proc_t *proc = (zuluvm_proc_t *)cookie; 1011 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1012 1013 TNF_PROBE_2(zuluvm_dma_delete_proc, "zuluvm", /* */, 1014 tnf_opaque, zdev, zdev, 1015 tnf_opaque, cookie, cookie); 1016 mutex_enter(&zdev->proc_lck); 1017 if (proc != NULL) { 1018 TNF_PROBE_1(zuluvm_dma_delete_proc, "zuluvm", /* */, 1019 tnf_opaque, proc, proc); 1020 if (proc->zhat != NULL) { 1021 zulu_hat_proc_detach(proc->zhat); 1022 proc->zhat = NULL; 1023 } 1024 refcnt = zuluvm_proc_release(zdev, proc); 1025 } 1026 mutex_exit(&zdev->proc_lck); 1027 1028 TNF_PROBE_2(zuluvm_dma_delete_proc_done, "zuluvm", /* */, 1029 tnf_int, refcnt, refcnt, 1030 tnf_int, error, ZULUVM_SUCCESS); 1031 return (ZULUVM_SUCCESS); 1032 } 1033 1034 /* 1035 * barrier sync for device driver 1036 * blocks until zuluvm_tlbmiss_tl1 function is done 1037 */ 1038 void 1039 zuluvm_fast_tlb_wait(caddr_t devp) 1040 { 1041 int state; 1042 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1043 int cnt = 0; 1044 1045 do { 1046 state = ZULUVM_GET_STATE(zdev); 1047 cnt++; 1048 } while (state == ZULUVM_STATE_TLB_PENDING); 1049 TNF_PROBE_1(zuluvm_fast_tlb_wait, "zuluvm", /* */, 1050 tnf_int, loop_cnt, cnt); 1051 } 1052 1053 /* 1054 * setup DMA handling for this handle 1055 */ 1056 int 1057 zuluvm_dma_alloc_ctx(zuluvm_info_t devp, int dma, short *mmuctx, 1058 uint64_t *tsbreg) 1059 { 1060 struct as *asp = ZULUVM_GET_AS; 1061 int error = ZULUVM_NO_DEV; 1062 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1063 int state, newstate; 1064 1065 if (asp == NULL) { 1066 TNF_PROBE_1(zuluvm_dma_alloc_ctx_done, "zuluvm", /* */, 1067 tnf_int, error, ZULUVM_NO_HAT); 1068 return (ZULUVM_NO_HAT); 1069 } 1070 1071 *tsbreg = 0; 1072 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_IDLE, 1073 ZULUVM_STATE_STOPPED); 1074 newstate = ZULUVM_GET_STATE(zdev); 1075 TNF_PROBE_4(zuluvm_dma_alloc_ctx, "zuluvm", /* */, 1076 tnf_opaque, devp, devp, 1077 tnf_int, dma, dma, 1078 tnf_int, oldstate, state, 1079 tnf_int, newstate, newstate); 1080 #ifdef DEBUG 1081 if (zuluvm_debug_state) 1082 cmn_err(CE_NOTE, "zuluvm_dma_alloc_ctx: state %d\n", state); 1083 #endif 1084 if (state != ZULUVM_STATE_STOPPED && state != ZULUVM_STATE_IDLE) { 1085 while (state != ZULUVM_STATE_IDLE) { 1086 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_IDLE, 1087 ZULUVM_STATE_STOPPED); 1088 #ifdef DEBUG 1089 if (zuluvm_debug_state) 1090 cmn_err(CE_NOTE, "zuluvm_dma_alloc_ctx: (loop)" 1091 " state %d\n", state); 1092 #endif 1093 if (state != ZULUVM_STATE_IDLE) 1094 delay(1); 1095 } 1096 } 1097 1098 if (zdev->zvm.arg != NULL) { 1099 struct zulu_hat *zhat; 1100 zuluvm_proc_t *proc; 1101 1102 mutex_enter(&zdev->proc_lck); 1103 proc = zuluvm_find_proc(zdev, asp); 1104 if (proc != NULL) { 1105 zhat = proc->zhat; 1106 proc->refcnt++; 1107 } 1108 mutex_exit(&zdev->proc_lck); 1109 1110 switch (dma) { 1111 case ZULUVM_DMA1: 1112 ZULUVM_LOCK; 1113 zdev->zvm.proc1 = proc; 1114 ZULUVM_UNLOCK; 1115 error = ZULUVM_SUCCESS; 1116 break; 1117 case ZULUVM_DMA2: 1118 ZULUVM_LOCK; 1119 zdev->zvm.proc2 = proc; 1120 ZULUVM_UNLOCK; 1121 error = ZULUVM_SUCCESS; 1122 break; 1123 default: 1124 mutex_enter(&zdev->proc_lck); 1125 (void) zuluvm_proc_release(zdev, proc); 1126 mutex_exit(&zdev->proc_lck); 1127 } 1128 1129 if (error == ZULUVM_SUCCESS) { 1130 zulu_hat_validate_ctx(zhat); 1131 if (zhat->zulu_ctx >= 0) { 1132 *mmuctx = zhat->zulu_ctx; 1133 } else { 1134 printf("invalid context value: %d\n", 1135 zhat->zulu_ctx); 1136 1137 mutex_enter(&zdev->proc_lck); 1138 (void) zuluvm_proc_release(zdev, proc); 1139 mutex_exit(&zdev->proc_lck); 1140 1141 error = ZULUVM_ERROR; 1142 } 1143 } else { 1144 error = ZULUVM_ERROR; 1145 } 1146 } 1147 TNF_PROBE_1(zuluvm_dma_alloc_ctx_done, "zuluvm", /* */, 1148 tnf_int, error, error); 1149 return (error); 1150 } 1151 1152 /* 1153 * preload TLB 1154 * this will try to pre-set the zulu tlb, mainly used for dma engine 2, 1155 * video read-back. 1156 */ 1157 int 1158 zuluvm_dma_preload(zuluvm_info_t devp, int dma, 1159 int num, zulud_preload_t *list) 1160 { 1161 int i; 1162 int error = ZULUVM_SUCCESS; 1163 struct zulu_hat *zhat; 1164 zuluvm_proc_t *proc = NULL; 1165 1166 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1167 1168 TNF_PROBE_4(zuluvm_dma_preload, "zuluvm", /* */, 1169 tnf_opaque, devp, devp, 1170 tnf_int, dma, dma, 1171 tnf_int, num, num, 1172 tnf_opaque, list, list); 1173 ZULUVM_LOCK; 1174 switch (dma) { 1175 case ZULUVM_DMA1: 1176 proc = zdev->zvm.proc1; 1177 break; 1178 case ZULUVM_DMA2: 1179 proc = zdev->zvm.proc2; 1180 break; 1181 } 1182 1183 mutex_enter(&zdev->proc_lck); 1184 if (proc == NULL || proc->valid == 0 || proc->zhat == NULL) { 1185 mutex_exit(&zdev->proc_lck); 1186 ZULUVM_UNLOCK; 1187 return (ZULUVM_NO_HAT); 1188 } 1189 mutex_exit(&zdev->proc_lck); 1190 1191 zhat = proc->zhat; 1192 /* 1193 * need to release this to avoid recursive enter in zuluvm_load_tte 1194 * which gets called from zulu_hat_memload() 1195 */ 1196 ZULUVM_UNLOCK; 1197 1198 mutex_enter(&zdev->load_lck); 1199 for (i = 0; i < num; i++) { 1200 int pg_size; 1201 int res; 1202 int first = 1; 1203 caddr_t addr = ZULUVM_GET_PAGE(list[i].addr); 1204 int64_t size = (int64_t)list[i].len; 1205 while (size > 0) { 1206 if (list[i].tlbtype & ~ZULUVM_DMA_MASK) { 1207 error = ZULUVM_INVALID_MISS; 1208 break; 1209 } 1210 res = zulu_hat_load(zhat, addr, 1211 (list[i].tlbtype == ZULUVM_DMA2) ? S_WRITE : S_READ, 1212 &pg_size); 1213 if ((res != 0) || (pg_size < 0)) { 1214 error = ZULUVM_NO_MAP; 1215 break; 1216 } 1217 ZULUVM_STATS_PRELOAD(zdev); 1218 TNF_PROBE_2(zuluvm_dma_preload_addr, "zuluvm", /* */, 1219 tnf_opaque, addr, addr, 1220 tnf_opaque, size, size); 1221 if (first) { 1222 first = 0; 1223 size -= ZULU_HAT_PGDIFF(list[i].addr, 1224 pg_size); 1225 } else { 1226 size -= ZULU_HAT_PGSZ(pg_size); 1227 } 1228 addr += ZULU_HAT_PGSZ(pg_size); 1229 } 1230 } 1231 mutex_exit(&zdev->load_lck); 1232 TNF_PROBE_1(zuluvm_dma_preload_done, "zuluvm", /* */, 1233 tnf_int, error, error); 1234 return (ZULUVM_SUCCESS); 1235 } 1236 1237 /* 1238 * destroy DMA handling for this handle 1239 */ 1240 int 1241 zuluvm_dma_free_ctx(zuluvm_info_t devp, int dma) 1242 { 1243 int error = ZULUVM_NO_DEV; 1244 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1245 int state, newstate; 1246 1247 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_STOPPED, 1248 ZULUVM_STATE_IDLE); 1249 newstate = ZULUVM_GET_STATE(zdev); 1250 TNF_PROBE_4(zuluvm_dma_free_ctx, "zuluvm", /* */, 1251 tnf_opaque, devp, devp, 1252 tnf_int, dma, dma, 1253 tnf_int, oldstate, state, 1254 tnf_int, newstate, newstate); 1255 #ifdef DEBUG 1256 if (zuluvm_debug_state) 1257 cmn_err(CE_NOTE, "zuluvm_dma_free_ctx: state %d\n", state); 1258 #endif 1259 if (state != ZULUVM_STATE_IDLE && state != ZULUVM_STATE_STOPPED) { 1260 int doit = 1; 1261 while (doit) { 1262 switch (state) { 1263 case ZULUVM_STATE_CANCELED: 1264 case ZULUVM_STATE_STOPPED: 1265 doit = 0; 1266 break; 1267 case ZULUVM_STATE_IDLE: 1268 state = ZULUVM_SET_STATE(zdev, 1269 ZULUVM_STATE_STOPPED, 1270 ZULUVM_STATE_IDLE); 1271 break; 1272 default: 1273 state = ZULUVM_SET_STATE(zdev, 1274 ZULUVM_STATE_CANCELED, state); 1275 } 1276 TNF_PROBE_1(zuluvm_dma_free_ctx, "zuluvm", /* */, 1277 tnf_int, state, state); 1278 #ifdef DEBUG 1279 if (zuluvm_debug_state) 1280 cmn_err(CE_NOTE, "zuluvm_dma_free_ctx: (loop1)" 1281 " state %d\n", state); 1282 #endif 1283 } 1284 } 1285 TNF_PROBE_1(zuluvm_dma_free_ctx, "zuluvm", /* */, 1286 tnf_int, state, state); 1287 1288 error = ZULUVM_SUCCESS; 1289 while (state != ZULUVM_STATE_STOPPED) { 1290 state = ZULUVM_GET_STATE(zdev); 1291 #ifdef DEBUG 1292 if (zuluvm_debug_state) 1293 cmn_err(CE_NOTE, "zuluvm_dma_free: (loop2) state %d\n", 1294 state); 1295 #endif 1296 if (state != ZULUVM_STATE_STOPPED) 1297 delay(1); 1298 } 1299 ZULUVM_LOCK; 1300 if (zdev->zvm.arg != NULL) { 1301 zuluvm_proc_t *proc = NULL; 1302 switch (dma) { 1303 case ZULUVM_DMA1: 1304 proc = zdev->zvm.proc1; 1305 zdev->zvm.proc1 = NULL; 1306 break; 1307 case ZULUVM_DMA2: 1308 proc = zdev->zvm.proc2; 1309 zdev->zvm.proc2 = NULL; 1310 break; 1311 default: 1312 error = ZULUVM_NO_DEV; 1313 } 1314 ZULUVM_UNLOCK; 1315 if (proc) { 1316 mutex_enter(&zdev->proc_lck); 1317 (void) zuluvm_proc_release(zdev, proc); 1318 mutex_exit(&zdev->proc_lck); 1319 } 1320 } else { 1321 ZULUVM_UNLOCK; 1322 error = ZULUVM_NO_DEV; 1323 } 1324 TNF_PROBE_1(zuluvm_dma_free_ctx_done, "zuluvm", /* */, 1325 tnf_int, error, error); 1326 return (error); 1327 } 1328 1329 static void 1330 zuluvm_do_retarget(zuluvm_state_t *zdev) 1331 { 1332 int i, idx; 1333 uint_t cpu; 1334 for (i = 0; i < ZULUVM_MAX_INTR; i++) { 1335 if (zdev->interrupts[i].ino != -1) { 1336 cpu = intr_dist_cpuid(); 1337 idx = zdev->interrupts[i].offset; 1338 if (zdev->imr[idx] & ZULUVM_IMR_V_MASK) 1339 zdev->imr[idx] = ZULUVM_IMR_V_MASK | 1340 (cpu<<ZULUVM_IMR_TARGET_SHIFT); 1341 else 1342 zdev->imr[idx] = 1343 cpu<<ZULUVM_IMR_TARGET_SHIFT; 1344 } 1345 } 1346 } 1347 1348 static void 1349 zuluvm_retarget_intr(void *arg) 1350 { 1351 zuluvm_state_t *zdev = (zuluvm_state_t *)arg; 1352 ZULUVM_LOCK; 1353 zuluvm_do_retarget(zdev); 1354 ZULUVM_UNLOCK; 1355 } 1356 1357 int 1358 zuluvm_add_intr(zuluvm_info_t devp, int ino, 1359 uint_t (*handler)(caddr_t), caddr_t arg) 1360 { 1361 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1362 if (devp == NULL) { 1363 TNF_PROBE_1(zuluvm_add_intr_done, "zuluvm", /* */, 1364 tnf_int, error, ZULUVM_NO_DEV); 1365 return (ZULUVM_NO_DEV); 1366 } 1367 if (ddi_add_intr(zdev->dip, ino, NULL, NULL, handler, arg) 1368 != DDI_SUCCESS) { 1369 TNF_PROBE_1(zuluvm_add_intr_done, "zuluvm", /* */, 1370 tnf_int, error, ZULUVM_ERROR); 1371 return (ZULUVM_ERROR); 1372 } 1373 return (ZULUVM_SUCCESS); 1374 } 1375 1376 int 1377 zuluvm_rem_intr(zuluvm_info_t devp, int ino) 1378 { 1379 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1380 if (devp == NULL) { 1381 TNF_PROBE_1(zuluvm_rem_intr_done, "zuluvm", /* */, 1382 tnf_int, error, ZULUVM_NO_DEV); 1383 return (ZULUVM_NO_DEV); 1384 } 1385 /* remove from distributin list */ 1386 ZULUVM_LOCK; 1387 zdev->imr[zdev->interrupts[ino].offset] &= ~ZULUVM_IMR_V_MASK; 1388 ZULUVM_UNLOCK; 1389 ddi_remove_intr(zdev->dip, ino, NULL); 1390 return (ZULUVM_SUCCESS); 1391 } 1392 1393 int 1394 zuluvm_enable_intr(zuluvm_info_t devp, int num) 1395 { 1396 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1397 1398 TNF_PROBE_2(zuluvm_enable_intr, "zuluvm_intr", /* */, 1399 tnf_opaque, devp, devp, 1400 tnf_int, num, num); 1401 if (devp == NULL) { 1402 TNF_PROBE_1(zuluvm_enable_intr_done, "zuluvm", /* */, 1403 tnf_int, error, ZULUVM_NO_DEV); 1404 return (ZULUVM_NO_DEV); 1405 } 1406 if (num < 0 || num > ZULUVM_IMR_MAX) { 1407 TNF_PROBE_1(zuluvm_enable_intr_done, "zuluvm", /* */, 1408 tnf_int, error, ZULUVM_BAD_IDX); 1409 return (ZULUVM_BAD_IDX); 1410 } 1411 ZULUVM_LOCK; 1412 zdev->imr[num] |= ZULUVM_IMR_V_MASK; 1413 ZULUVM_UNLOCK; 1414 TNF_PROBE_1(zuluvm_enable_intr_done, "zuluvm_intr", /* */, 1415 tnf_int, error, ZULUVM_SUCCESS); 1416 return (ZULUVM_SUCCESS); 1417 } 1418 1419 int 1420 zuluvm_disable_intr(zuluvm_info_t devp, int num) 1421 { 1422 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1423 1424 TNF_PROBE_2(zuluvm_disable_intr, "zuluvm_intr", /* */, 1425 tnf_opaque, devp, devp, 1426 tnf_int, num, num); 1427 if (devp == NULL) { 1428 TNF_PROBE_1(zuluvm_disable_intr_done, "zuluvm", /* */, 1429 tnf_int, error, ZULUVM_NO_DEV); 1430 return (ZULUVM_NO_DEV); 1431 } 1432 if (num < 0 || num > ZULUVM_IMR_MAX) { 1433 TNF_PROBE_1(zuluvm_disable_intr_done, "zuluvm", /* */, 1434 tnf_int, error, ZULUVM_BAD_IDX); 1435 return (ZULUVM_BAD_IDX); 1436 } 1437 ZULUVM_LOCK; 1438 zdev->imr[num] &= ~ZULUVM_IMR_V_MASK; 1439 ZULUVM_UNLOCK; 1440 TNF_PROBE_1(zuluvm_disable_intr_done, "zuluvm_intr", /* */, 1441 tnf_int, error, ZULUVM_SUCCESS); 1442 return (ZULUVM_SUCCESS); 1443 } 1444 1445 static int 1446 zuluvm_get_intr_props(zuluvm_state_t *zdev, 1447 dev_info_t *devi) 1448 { 1449 int *intr; 1450 int i; 1451 uint_t nintr; 1452 1453 zdev->agentid = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 1454 "portid", -1); 1455 if (zdev->agentid == -1) { 1456 cmn_err(CE_WARN, "%s%d: no portid property", 1457 ddi_get_name(devi), 1458 ddi_get_instance(devi)); 1459 return (ZULUVM_ERROR); 1460 } 1461 1462 for (i = 0; i < ZULUVM_MAX_INTR; i++) { 1463 zdev->interrupts[i].offset = 0; 1464 zdev->interrupts[i].ino = -1; 1465 } 1466 1467 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 1468 "interrupts", &intr, &nintr) == DDI_PROP_SUCCESS) { 1469 1470 if (nintr == 0) { 1471 cmn_err(CE_WARN, "%s%d: no interrupts in property", 1472 ddi_get_name(devi), 1473 ddi_get_instance(devi)); 1474 ddi_prop_free(intr); 1475 return (ZULUVM_ERROR); 1476 } 1477 if (nintr >= ZULUVM_MAX_INTR) { 1478 cmn_err(CE_WARN, "%s%d: to many interrupts (%d)", 1479 ddi_get_name(devi), 1480 ddi_get_instance(devi), nintr); 1481 ddi_prop_free(intr); 1482 return (ZULUVM_ERROR); 1483 } 1484 for (i = 0; i < nintr; i++) { 1485 zdev->interrupts[i].offset = intr[i]; 1486 zdev->interrupts[i].ino = i; 1487 } 1488 ddi_prop_free(intr); 1489 } else { 1490 cmn_err(CE_WARN, "%s%d: no interrupts property", 1491 ddi_get_name(devi), 1492 ddi_get_instance(devi)); 1493 } 1494 return (ZULUVM_SUCCESS); 1495 } 1496 1497 /* *** enf of zulu *** */ 1498