1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * zuluvm module 31 * 32 * Provides services required by the XVR-4000 graphics accelerator (zulu) 33 * that are not provided by the ddi. See PSARC 2002/231. 34 * 35 * Zulu has 2 dma engines with built in MMUs. zuluvm provides TLB miss 36 * interrupt support obtaining virtual to physical address translations 37 * using the XHAT interface PSARC/2003/517. 38 * 39 * The module has 3 components. This file, sun4u/vm/zulu_hat.c, and the 40 * assembly language routines in sun4u/ml/zulu_asm.s and 41 * sun4u/ml/zulu_hat_asm.s. 42 * 43 * The interrupt handler is a data bearing mondo interrupt handled at TL=1 44 * If no translation is found in the zulu hat's tsb, or if the tsb is locked by 45 * C code, the handler posts a soft interrupt which wakes up a parked 46 * thread belonging to zuludaemon(1M). 47 */ 48 49 #include <sys/conf.h> 50 #include <sys/types.h> 51 #include <sys/kmem.h> 52 #include <sys/debug.h> 53 #include <sys/modctl.h> 54 #include <sys/autoconf.h> 55 #include <sys/ddi_impldefs.h> 56 #include <sys/ddi_subrdefs.h> 57 #include <sys/intr.h> 58 #include <sys/ddi.h> 59 #include <sys/sunndi.h> 60 #include <sys/proc.h> 61 #include <sys/thread.h> 62 #include <sys/machsystm.h> 63 #include <sys/ivintr.h> 64 #include <sys/tnf_probe.h> 65 #include <sys/intreg.h> 66 #include <sys/atomic.h> 67 #include <vm/as.h> 68 #include <vm/seg_enum.h> 69 #include <vm/faultcode.h> 70 #include <sys/dmv.h> 71 #include <sys/zulumod.h> 72 #include <sys/zulu_hat.h> 73 74 #define ZULUVM_GET_PAGE(val) \ 75 (caddr_t)((uintptr_t)(val) & PAGEMASK) 76 #define ZULUVM_GET_AS curthread->t_procp->p_as 77 78 #define ZULUVM_LOCK mutex_enter(&(zdev->dev_lck)) 79 #define ZULUVM_UNLOCK mutex_exit(&(zdev->dev_lck)) 80 81 #define ZULUVM_SET_STATE(_z, b, c) \ 82 cas32((uint32_t *)&((_z)->zvm.state), c, b) 83 #define ZULUVM_GET_STATE(_z) \ 84 (_z)->zvm.state 85 #define ZULUVM_SET_IDLE(_z) \ 86 (_z)->zvm.state = ZULUVM_STATE_IDLE; 87 88 #define ZULUVM_INO_MASK ((1<<INO_SIZE)-1) 89 #define ZULUVM_IGN_MASK ((1<<IGN_SIZE)-1) 90 #define ZULUVM_MONDO(_zdev, _n) \ 91 ((ZULUVM_IGN_MASK & _zdev->agentid) << INO_SIZE) | \ 92 (ZULUVM_INO_MASK & (_n)) 93 94 static void zuluvm_stop(zuluvm_state_t *, int, char *); 95 static zuluvm_proc_t *zuluvm_find_proc(zuluvm_state_t *, struct as *); 96 static int zuluvm_proc_release(zuluvm_state_t *zdev, zuluvm_proc_t *proc); 97 static int zuluvm_get_intr_props(zuluvm_state_t *zdev, dev_info_t *devi); 98 static int zuluvm_driver_attach(zuluvm_state_t *); 99 static int zuluvm_driver_detach(zuluvm_state_t *); 100 static void zuluvm_retarget_intr(void *arg); 101 static void zuluvm_do_retarget(zuluvm_state_t *zdev); 102 103 extern const unsigned int _mmu_pageshift; 104 105 extern int zuluvm_base_pgsize; 106 static int zuluvm_pagesizes[ZULUM_MAX_PG_SIZES + 1]; 107 108 int zuluvm_fast_tlb = 1; 109 110 zuluvm_state_t *zuluvm_devtab[ZULUVM_MAX_DEV]; 111 kmutex_t zuluvm_lck; 112 113 #ifdef DEBUG 114 int zuluvm_debug_state = 0; 115 #endif 116 117 unsigned long zuluvm_ctx_locked = 0; 118 119 /* 120 * Module linkage information for the kernel. 121 */ 122 extern struct mod_ops mod_miscops; 123 124 static struct modlmisc modlmisc = { 125 &mod_miscops, 126 "sun4u support " ZULUVM_MOD_VERSION 127 }; 128 129 static struct modlinkage modlinkage = { 130 MODREV_1, 131 (void *)&modlmisc, 132 NULL 133 }; 134 135 int 136 _init(void) 137 { 138 zuluvm_base_pgsize = (_mmu_pageshift - 13) / 3; 139 if (zulu_hat_init() != 0) { 140 return (ZULUVM_ERROR); 141 } 142 mutex_init(&zuluvm_lck, NULL, MUTEX_DEFAULT, NULL); 143 return (mod_install(&modlinkage)); 144 } 145 146 int 147 _fini(void) 148 { 149 mutex_destroy(&zuluvm_lck); 150 (void) zulu_hat_destroy(); 151 return (mod_remove(&modlinkage)); 152 } 153 154 int 155 _info(struct modinfo *modinfop) 156 { 157 return (mod_info(&modlinkage, modinfop)); 158 } 159 160 /* 161 * currently the kernel driver makes the following assumptions: 162 * - there is only one TLB miss per zulu device handled at 163 * any given time 164 * ==> we only need local data storage per device, not per DMA 165 * ==> a page fault will block the DMA engine until the fault 166 * is resolved 167 * ==> a pagefault will not trigger a zulu DMA context switch 168 * 169 * If we want to implement asynnchronous zulu page fault, then we 170 * need to keep track of outstanding faults while zulu DMA runs 171 * in a different context. 172 */ 173 static int 174 zuluvm_write_tte(zuluvm_state_t *zdev, void *arg, caddr_t addr, 175 int t_pfn, int t_perm, int t_size, uint64_t tag, 176 int tlbtype, int *size) 177 { 178 int error; 179 180 (void) addr; 181 182 ZULUVM_STATS_MISS(zdev, t_size); 183 184 if (tag == 0) { /* not coming from preload */ 185 int state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_WRITE_TTE, 186 ZULUVM_STATE_INTR_PENDING); 187 if (state != ZULUVM_STATE_INTR_PENDING) { 188 zuluvm_stop(zdev, state, "zuluvm_write_tte"); 189 return (ZULUVM_MISS_CANCELED); 190 } 191 } 192 193 if (!(tlbtype & ZULUVM_ITLB_FLAG) && 194 t_size != zuluvm_base_pgsize && 195 t_size != ZULU_TTE4M) { 196 t_size = zuluvm_base_pgsize; 197 TNF_PROBE_2(zuluvm_write_tte_new_pfn, "zuluvm", /* */, 198 tnf_opaque, t_pfn, t_pfn, tnf_int, pagesize, t_size); 199 } 200 TNF_PROBE_1(zuluvm_write_tte, "zuluvm", /* */, 201 tnf_opaque, t_pfn, t_pfn); 202 /* 203 * if the caller is zuluvm_preload, then we need to pass 204 * back the page size so it can add the right offset. 205 */ 206 if (size) 207 *size = t_size; 208 209 error = zulud_write_tte(zdev, arg, t_size, tag, t_pfn, 210 t_perm, tlbtype); 211 212 return (error); 213 } 214 215 static void 216 zuluvm_stop(zuluvm_state_t *zdev, int state, char *tag) 217 { 218 int ostate = state; 219 while (state != ZULUVM_STATE_STOPPED) { 220 state = ZULUVM_SET_STATE(zdev, 221 ZULUVM_STATE_STOPPED, state); 222 #ifdef DEBUG 223 if (zuluvm_debug_state) 224 cmn_err(CE_NOTE, "zuluvm_stop(%s): (loop) state %d\n", 225 tag, state); 226 #endif 227 } 228 TNF_PROBE_2(zuluvm_stop, "zuluvm", /* */, 229 tnf_string, tag, tag, 230 tnf_int, state, ostate); 231 ZULUVM_STATS_CANCEL(zdev); 232 } 233 234 /* 235 * Executed with the context of the parked zulu deamon thread, 236 * uses zulu_hat_load to resolve the miss. 237 * The tte is loaded and miss done called by the function zuluvm_load_tte 238 * which is called from zulu_hat 239 * 240 * This function is synchronized with the zuluvm_as_free. 241 * zuluvm_as_free will block until miss servicing is complete. 242 * 243 * There is a race condition between as_free and the zulu tlb miss 244 * soft interrupt: 245 * - queue zulu interrupt 246 * - process dies, as_free runs 247 * - interrupt gets scheduled and runs as_fault on the 248 * already freed as. 249 * This is solved by keeping track of current zulu dma processes 250 * and invalidating them in zuluvm_as_free. 251 */ 252 uint_t 253 zuluvm_tlb_handler(caddr_t data) 254 { 255 zuluvm_state_t *zdev = (zuluvm_state_t *)data; 256 int error; 257 int flag = 0; 258 int wait = 0; 259 zuluvm_proc_t *proc = NULL; 260 struct zulu_hat *zhat = NULL; 261 caddr_t addr; 262 int tlbtype; 263 void *arg; 264 int state, newstate; 265 266 TNF_PROBE_1(zuluvm_tlb_handler_lwp, "zuluvm", /* */, 267 tnf_opaque, lwp, ttolwp(curthread)); 268 269 ZULUVM_LOCK; 270 error = ZULUVM_GET_TLB_ERRCODE(zdev); 271 addr = (caddr_t)ZULUVM_GET_TLB_ADDR(zdev); 272 tlbtype = ZULUVM_GET_TLB_TYPE(zdev); 273 arg = zdev->zvm.arg; 274 275 /* 276 * select the correct dma engine and remember the 277 * the as_free synchronization flags. 278 */ 279 switch (tlbtype) { 280 case ZULUVM_ITLB1: 281 case ZULUVM_DMA1: 282 proc = zdev->zvm.proc1; 283 flag |= ZULUVM_DO_INTR1; 284 wait |= ZULUVM_WAIT_INTR1; 285 break; 286 case ZULUVM_ITLB2: 287 case ZULUVM_DMA2: 288 proc = zdev->zvm.proc2; 289 flag |= ZULUVM_DO_INTR2; 290 wait |= ZULUVM_WAIT_INTR2; 291 break; 292 } 293 294 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_INTR_PENDING, 295 ZULUVM_STATE_INTR_QUEUED); 296 newstate = ZULUVM_GET_STATE(zdev); 297 298 TNF_PROBE_2(zuluvm_tlb_handler_state, "zuluvm", /* */, 299 tnf_int, oldstate, state, 300 tnf_int, newstate, newstate); 301 #ifdef DEBUG 302 if (zuluvm_debug_state) 303 cmn_err(CE_NOTE, "zuluvm_tlb_handler: state %d\n", state); 304 #endif 305 if (state != ZULUVM_STATE_INTR_PENDING && 306 state != ZULUVM_STATE_INTR_QUEUED) { 307 ZULUVM_UNLOCK; 308 309 zuluvm_stop(zdev, state, "softintr1"); 310 zulud_tlb_done(zdev, arg, tlbtype, ZULUVM_MISS_CANCELED); 311 return (1); 312 } 313 314 /* 315 * block the as_free callback in case it comes in 316 */ 317 zdev->intr_flags |= flag; 318 ZULUVM_UNLOCK; 319 320 mutex_enter(&zdev->proc_lck); 321 /* 322 * check if this as is still valid 323 */ 324 if (proc == NULL || proc->valid == 0 || proc->zhat == NULL) { 325 mutex_exit(&zdev->proc_lck); 326 /* 327 * we are on our way out, wake up the as_free 328 * callback if it is waiting for us 329 */ 330 ZULUVM_LOCK; 331 zdev->intr_flags &= ~flag; 332 if (zdev->intr_flags | wait) 333 cv_broadcast(&zdev->intr_wait); 334 ZULUVM_UNLOCK; 335 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_IDLE, 336 ZULUVM_STATE_INTR_PENDING); 337 if (state != ZULUVM_STATE_INTR_PENDING) { 338 zuluvm_stop(zdev, state, "softintr3"); 339 } 340 zulud_tlb_done(zdev, arg, tlbtype, ZULUVM_NO_HAT); 341 return (1); 342 } 343 zhat = proc->zhat; 344 mutex_exit(&zdev->proc_lck); 345 346 TNF_PROBE_1(zuluvm_tlb_handler, "zuluvm", /* */, 347 tnf_opaque, addr, addr); 348 349 switch (error) { 350 case ZULUVM_CTX_LOCKED: 351 /* 352 * trap handler found that zulu_hat had the lock bit set 353 * rather than block in the fast trap handler, it punts 354 * in this rare instance 355 */ 356 ++zuluvm_ctx_locked; 357 TNF_PROBE_1(zuluvm_ctx_locked, "zuluvm", /* CSTYLED */, 358 tnf_ulong, zuluvm_ctx_locked, zuluvm_ctx_locked); 359 360 /*FALLTHROUGH*/ 361 362 case ZULUVM_TTE_DELAY: 363 /* 364 * fast tlb handler was skipped, see zuluvm_fast_tlb flag 365 */ 366 /*FALLTHROUGH*/ 367 368 case ZULUVM_NO_TTE: 369 /* 370 * no TSB entry and TTE in the hash 371 */ 372 mutex_enter(&zdev->load_lck); 373 zdev->in_intr = 1; 374 error = zulu_hat_load(zhat, addr, 375 (tlbtype == ZULUVM_DMA2) ? S_WRITE : S_READ, NULL); 376 zdev->in_intr = 0; 377 mutex_exit(&zdev->load_lck); 378 if (error) { 379 380 error = ZULUVM_NO_MAP; 381 } else { 382 error = ZULUVM_SUCCESS; 383 TNF_PROBE_1(zuluvm_tlb_handler_done, "zuluvm", /* */, 384 tnf_int, error, error); 385 return (1); 386 } 387 388 default: 389 /* 390 * error case, fall through and tell zulu driver to abort DMA 391 */ 392 break; 393 } 394 395 if (error != ZULUVM_MISS_CANCELED) { 396 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_IDLE, 397 ZULUVM_STATE_WRITE_TTE); 398 newstate = ZULUVM_GET_STATE(zdev); 399 TNF_PROBE_2(zuluvm_tlb_handler_state_done, "zuluvm", /* */, 400 tnf_int, oldstate, state, 401 tnf_int, newstate, newstate); 402 if (state != ZULUVM_STATE_WRITE_TTE) { 403 zuluvm_stop(zdev, state, "softintr4"); 404 } 405 } 406 /* 407 * synchronize with as_free callback 408 * It will set the wait flag, in that case we send 409 * a wake up. 410 */ 411 ZULUVM_LOCK; 412 zdev->intr_flags &= ~flag; 413 if (zdev->intr_flags | wait) 414 cv_broadcast(&zdev->intr_wait); 415 ZULUVM_UNLOCK; 416 417 TNF_PROBE_1(zuluvm_tlb_handler_done, "zuluvm", /* */, 418 tnf_int, error, error); 419 420 zulud_tlb_done(zdev, arg, tlbtype, error); 421 422 return (1); 423 } 424 425 426 void 427 zuluvm_load_tte(struct zulu_hat *zhat, caddr_t addr, uint64_t pfn, 428 int perm, int size) 429 { 430 zuluvm_state_t *zdev = zhat->zdev; 431 int tlbtype = ZULUVM_GET_TLB_TYPE(zdev); 432 433 ASSERT(MUTEX_HELD(&zdev->load_lck)); 434 ASSERT(pfn != 0); 435 436 if (zdev->in_intr) { 437 int error; 438 int flag = 0; 439 int wait = 0; 440 441 error = zuluvm_write_tte(zdev, zdev->zvm.arg, addr, pfn, 442 perm, size, 0, tlbtype, NULL); 443 444 if (error != ZULUVM_MISS_CANCELED) { 445 int state, newstate; 446 447 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_IDLE, 448 ZULUVM_STATE_WRITE_TTE); 449 newstate = ZULUVM_GET_STATE(zdev); 450 TNF_PROBE_2(zuluvm_tlb_handler_state_done, "zuluvm", 451 /* */, tnf_int, oldstate, state, 452 tnf_int, newstate, newstate); 453 if (state != ZULUVM_STATE_WRITE_TTE) { 454 zuluvm_stop(zdev, state, "softintr4"); 455 } 456 } 457 /* 458 * synchronize with as_free callback 459 * It will set the wait flag, in that case we send 460 * a wake up. 461 */ 462 switch (tlbtype) { 463 case ZULUVM_ITLB1: 464 case ZULUVM_DMA1: 465 flag = ZULUVM_DO_INTR1; 466 wait = ZULUVM_WAIT_INTR1; 467 break; 468 case ZULUVM_ITLB2: 469 case ZULUVM_DMA2: 470 flag = ZULUVM_DO_INTR2; 471 wait = ZULUVM_WAIT_INTR2; 472 break; 473 } 474 475 ZULUVM_LOCK; 476 zdev->intr_flags &= ~flag; 477 if (zdev->intr_flags | wait) 478 cv_broadcast(&zdev->intr_wait); 479 ZULUVM_UNLOCK; 480 481 zulud_tlb_done(zdev, zdev->zvm.arg, tlbtype, error); 482 } else { 483 (void) zuluvm_write_tte(zdev, zdev->zvm.arg, addr, pfn, 484 perm, size, (uint64_t)addr | 485 zhat->zulu_ctx, tlbtype, NULL); 486 } 487 } 488 489 490 491 492 /* 493 * This function provides the faulting thread for zulu page faults 494 * It is call from the device driver in response to an ioctl issued 495 * by a zuludaemon thread. 496 * It sits in cv_wait_sig until it gets woken up by a signal or 497 * zulu tlb miss soft interrupt. 498 */ 499 int 500 zuluvm_park(zuluvm_info_t devp) 501 { 502 int rval; 503 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 504 mutex_enter(&zdev->park_lck); 505 zdev->parking = 1; 506 for (;;) { 507 rval = cv_wait_sig(&zdev->park_cv, &zdev->park_lck); 508 if (rval == 0) 509 break; 510 rval = zuluvm_tlb_handler(devp); 511 } 512 zdev->parking = 0; 513 mutex_exit(&zdev->park_lck); 514 return (rval); 515 } 516 517 /* 518 * zulu soft interrupt handler, just triggers the parked zulu fault 519 * thread 520 */ 521 /*ARGSUSED*/ 522 uint_t 523 zuluvm_softintr(caddr_t devp, caddr_t arg2) 524 { 525 int tlbtype; 526 void *arg; 527 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 528 mutex_enter(&zdev->park_lck); 529 if (zdev->parking) { 530 cv_signal(&zdev->park_cv); 531 mutex_exit(&zdev->park_lck); 532 TNF_PROBE_1(zuluvm_fast_intr, "zuluvm", /* */, 533 tnf_opaque, devp, devp); 534 } else { 535 mutex_exit(&zdev->park_lck); 536 cmn_err(CE_NOTE, "zuluvm: no page fault thread\n"); 537 ZULUVM_LOCK; 538 tlbtype = ZULUVM_GET_TLB_TYPE(zdev); 539 arg = zdev->zvm.arg; 540 ZULUVM_UNLOCK; 541 TNF_PROBE_0(zuluvm_fast_intr, "zuluvm", /* */); 542 zuluvm_stop(zdev, ZULUVM_STATE_INTR_QUEUED, "fast_intr"); 543 zulud_tlb_done(zdev, arg, tlbtype, ZULUVM_NO_TTE); 544 } 545 return (1); 546 } 547 548 /* ***** public interface for process mapping events (hat layer) ***** */ 549 550 /* 551 * If the page size matches the Zulu page sizes then just pass 552 * it thru. If not then emulate the page demap with demaps of 553 * smaller page size. 554 */ 555 /* ARGSUSED */ 556 void 557 zuluvm_demap_page(void *arg, struct hat *hat_ptr, short ctx, 558 caddr_t vaddr, uint_t size) 559 { 560 void *ddarg; 561 zuluvm_state_t *zdev = (zuluvm_state_t *)arg; 562 563 if (arg == NULL) 564 return; 565 566 ZULUVM_STATS_DEMAP_PAGE(zdev); 567 568 ddarg = zdev->zvm.arg; 569 570 TNF_PROBE_3(zuluvm_demap_page, "zuluvm", /* */, 571 tnf_opaque, addr, vaddr, 572 tnf_int, size, size, 573 tnf_int, ctx, ctx); 574 575 if (ddarg != NULL) { 576 if (size != zuluvm_base_pgsize && 577 size != ZULU_TTE4M) { 578 int i; 579 int cnt = size - zuluvm_base_pgsize; 580 cnt = ZULU_HAT_SZ_SHIFT(cnt); 581 for (i = 0; i < cnt; i++) { 582 uintptr_t addr = (uintptr_t)vaddr | 583 i << ZULU_HAT_BP_SHIFT; 584 zulud_demap_page(zdev, ddarg, 585 (caddr_t)addr, ctx); 586 } 587 } else { 588 zulud_demap_page(zdev, ddarg, vaddr, ctx); 589 } 590 TNF_PROBE_0(zuluvm_demap_page_done, "zuluvm", /* */); 591 } else { 592 TNF_PROBE_0(zuluvm_demap_page_null_ddarg, "zuluvm", /* */); 593 } 594 } 595 596 /* 597 * An entire context has gone away, just pass it thru 598 */ 599 void 600 zuluvm_demap_ctx(void *arg, short ctx) 601 { 602 void *ddarg; 603 zuluvm_state_t *zdev = (zuluvm_state_t *)arg; 604 605 if (arg == NULL) 606 return; 607 608 ZULUVM_STATS_DEMAP_CTX(zdev); 609 610 TNF_PROBE_1(zuluvm_demap_ctx, "zuluvm", /* */, 611 tnf_int, ctx, ctx); 612 ddarg = zdev->zvm.arg; 613 614 if (ddarg != NULL) 615 zulud_demap_ctx(zdev, ddarg, ctx); 616 } 617 618 static int 619 zuluvm_driver_attach(zuluvm_state_t *zdev) 620 { 621 int i; 622 mutex_enter(&zuluvm_lck); 623 for (i = 0; i < ZULUVM_MAX_DEV; i++) { 624 if (zuluvm_devtab[i] == NULL) { 625 zuluvm_devtab[i] = zdev; 626 ZULUVM_SET_IDLE(zdev); 627 break; 628 } 629 } 630 mutex_exit(&zuluvm_lck); 631 if (i >= ZULUVM_MAX_DEV) 632 return (ZULUVM_ERROR); 633 634 if (zulu_hat_attach((void *)zdev) != 0) { 635 return (ZULUVM_ERROR); 636 } 637 638 mutex_init(&zdev->dev_lck, NULL, MUTEX_DEFAULT, NULL); 639 mutex_init(&zdev->load_lck, NULL, MUTEX_DEFAULT, NULL); 640 mutex_init(&zdev->proc_lck, NULL, MUTEX_DEFAULT, NULL); 641 mutex_init(&zdev->park_lck, NULL, MUTEX_DEFAULT, NULL); 642 cv_init(&zdev->park_cv, NULL, CV_DEFAULT, NULL); 643 cv_init(&zdev->intr_wait, NULL, CV_DEFAULT, NULL); 644 zdev->parking = 0; 645 646 #ifdef ZULUVM_STATS 647 zdev->zvm.cancel = 0; 648 zdev->zvm.pagefault = 0; 649 zdev->zvm.no_mapping = 0; 650 zdev->zvm.preload = 0; 651 zdev->zvm.migrate = 0; 652 zdev->zvm.pagesize = 0; 653 zdev->zvm.tlb_miss[0] = 0; 654 zdev->zvm.tlb_miss[1] = 0; 655 zdev->zvm.tlb_miss[2] = 0; 656 zdev->zvm.tlb_miss[3] = 0; 657 zdev->zvm.itlb1miss = 0; 658 zdev->zvm.dtlb1miss = 0; 659 zdev->zvm.itlb2miss = 0; 660 zdev->zvm.dtlb2miss = 0; 661 #endif 662 zdev->zvm.pfncnt = 0; 663 for (i = 0; i < 50; i++) 664 zdev->zvm.pfnbuf[i] = 0; 665 666 zdev->zvm.mmu_pa = NULL; 667 zdev->zvm.proc1 = NULL; 668 zdev->zvm.proc2 = NULL; 669 zdev->procs = NULL; 670 return (ZULUVM_SUCCESS); 671 } 672 673 static int 674 zuluvm_driver_detach(zuluvm_state_t *zdev) 675 { 676 int i; 677 cv_destroy(&zdev->intr_wait); 678 cv_destroy(&zdev->park_cv); 679 mutex_destroy(&zdev->park_lck); 680 mutex_destroy(&zdev->proc_lck); 681 mutex_destroy(&zdev->dev_lck); 682 mutex_destroy(&zdev->load_lck); 683 zdev->dops = NULL; 684 685 mutex_enter(&zuluvm_lck); 686 for (i = 0; i < ZULUVM_MAX_DEV; i++) { 687 if (zuluvm_devtab[i] == zdev) { 688 zuluvm_devtab[i] = NULL; 689 break; 690 } 691 } 692 mutex_exit(&zuluvm_lck); 693 694 if (zulu_hat_detach((void *)zdev) == 0) { 695 return (ZULUVM_SUCCESS); 696 } else { 697 return (ZULUVM_ERROR); 698 } 699 } 700 701 zulud_ops_t *zuluvm_dops = NULL; 702 703 /* 704 * init the zulu kernel driver (variables, locks, etc) 705 */ 706 int 707 zuluvm_init(zulud_ops_t *ops, int **pagesizes) 708 { 709 int error = ZULUVM_SUCCESS; 710 int i; 711 int size = zuluvm_base_pgsize; /* MMU_PAGESIZE; */ 712 713 if (ops->version != ZULUVM_INTERFACE_VERSION) 714 return (ZULUVM_VERSION_MISMATCH); 715 716 zuluvm_dops = ops; 717 for (i = 0; i < ZULUM_MAX_PG_SIZES && size <= ZULU_TTE4M; i++) { 718 zuluvm_pagesizes[i] = size++; 719 } 720 zuluvm_pagesizes[i] = -1; 721 *pagesizes = zuluvm_pagesizes; 722 723 return (error); 724 } 725 726 /* 727 * cleanup afterwards 728 */ 729 int 730 zuluvm_fini(void) 731 { 732 zuluvm_dops = NULL; 733 return (ZULUVM_SUCCESS); 734 } 735 736 /* 737 * allocate a zulu kernel driver instance for this zulu device 738 */ 739 int 740 zuluvm_alloc_device(dev_info_t *devi, void *arg, zuluvm_info_t *devp, 741 caddr_t mmu, caddr_t imr) 742 { 743 uint_t intr_num; 744 zuluvm_state_t *zdev; 745 int error = ZULUVM_SUCCESS; 746 747 TNF_PROBE_3(zuluvm_alloc_device, "zuluvm", /* */, 748 tnf_opaque, arg, arg, 749 tnf_opaque, mmu, mmu, 750 tnf_opaque, imr, imr); 751 752 zdev = kmem_zalloc(sizeof (zuluvm_state_t), KM_SLEEP); 753 zdev->dip = devi; 754 zdev->dops = zuluvm_dops; 755 error = zuluvm_driver_attach(zdev); 756 if (error != ZULUVM_SUCCESS) { 757 kmem_free(zdev, sizeof (zuluvm_state_t)); 758 return (ZULUVM_NO_DEV); 759 } 760 761 ZULUVM_LOCK; 762 error = zuluvm_get_intr_props(zdev, devi); 763 if (error != ZULUVM_SUCCESS) { 764 ZULUVM_UNLOCK; 765 error = zuluvm_driver_detach(zdev); 766 if (error != ZULUVM_SUCCESS) 767 return (error); 768 kmem_free(zdev, sizeof (zuluvm_state_t)); 769 return (ZULUVM_NO_DEV); 770 } 771 zdev->zvm.arg = arg; 772 zdev->zvm.mmu_pa = (uint64_t)va_to_pa((void *)mmu); 773 zdev->imr = (uint64_t *)imr; 774 zdev->zvm.dmv_intr = dmv_add_softintr(zuluvm_dmv_tlbmiss_tl1, 775 (void *)zdev); 776 zulud_set_itlb_pc(zdev, arg, DMV_MAKE_DMV(zdev->zvm.dmv_intr, 777 (void *)zdev)); 778 zulud_set_dtlb_pc(zdev, arg, DMV_MAKE_DMV(zdev->zvm.dmv_intr, 779 (void *)zdev)); 780 intr_dist_add(zuluvm_retarget_intr, (void *)zdev); 781 zuluvm_do_retarget(zdev); 782 intr_num = add_softintr(ZULUVM_PIL, zuluvm_softintr, (caddr_t)zdev); 783 zdev->zvm.intr_num = intr_num; 784 *devp = (caddr_t)zdev; 785 ZULUVM_UNLOCK; 786 TNF_PROBE_1(zuluvm_alloc_device_done, "zuluvm", /* */, 787 tnf_opaque, devp, *devp); 788 return (ZULUVM_SUCCESS); 789 } 790 791 /* 792 * free a zulu kernel driver instance 793 */ 794 int 795 zuluvm_free_device(zuluvm_info_t devp) 796 { 797 int error; 798 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 799 800 TNF_PROBE_1(zuluvm_free_device, "zuluvm", /* */, 801 tnf_opaque, zdev, zdev); 802 803 if (zdev == NULL) 804 return (ZULUVM_NO_DEV); 805 ZULUVM_LOCK; 806 if (zdev->zvm.arg == NULL) { 807 ZULUVM_UNLOCK; 808 TNF_PROBE_1(zuluvm_free_device_done, "zuluvm", /* */, 809 tnf_int, error, ZULUVM_NO_DEV); 810 return (ZULUVM_NO_DEV); 811 } 812 (void) dmv_rem_intr(zdev->zvm.dmv_intr); 813 rem_softintr(zdev->zvm.intr_num); 814 intr_dist_rem(zuluvm_retarget_intr, (void *)zdev); 815 zdev->zvm.arg = NULL; 816 ZULUVM_UNLOCK; 817 error = zuluvm_driver_detach(zdev); 818 if (error != ZULUVM_SUCCESS) 819 return (error); 820 zdev->dops = NULL; 821 kmem_free(zdev, sizeof (zuluvm_state_t)); 822 823 TNF_PROBE_0(zuluvm_free_device_done, "zuluvm", /* */); 824 return (ZULUVM_SUCCESS); 825 } 826 827 /* 828 * find the as in the list of active zulu processes 829 * The caller has to hold zdev->proc_lck 830 */ 831 static zuluvm_proc_t * 832 zuluvm_find_proc(zuluvm_state_t *zdev, struct as *asp) 833 { 834 zuluvm_proc_t *p; 835 TNF_PROBE_2(zuluvm_find_proc, "zuluvm", /* */, 836 tnf_opaque, zdev, zdev, 837 tnf_opaque, asp, asp); 838 for (p = zdev->procs; p != NULL; p = p->next) { 839 if (ZULU_HAT2AS(p->zhat) == asp) { 840 TNF_PROBE_1(zuluvm_find_proc_done, 841 "zuluvm", /* */, tnf_opaque, proc, p); 842 return (p); 843 } 844 } 845 TNF_PROBE_0(zuluvm_find_proc_fail, "zuluvm", /* */); 846 return (NULL); 847 } 848 849 void 850 zuluvm_as_free(struct as *as, void *arg, uint_t events) 851 { 852 zuluvm_proc_t *proc = (zuluvm_proc_t *)arg; 853 zuluvm_state_t *zdev = proc->zdev; 854 int wait = 0; 855 int flag = 0; 856 int valid; 857 858 (void) events; 859 860 TNF_PROBE_1(zuluvm_as_free, "zuluvm", /* */, 861 tnf_opaque, arg, arg); 862 863 (void) as_delete_callback(as, arg); 864 /* 865 * if this entry is still valid, then we need to sync 866 * with zuluvm_tlb_handler rountine. 867 */ 868 mutex_enter(&zdev->proc_lck); 869 valid = proc->valid; 870 proc->valid = 0; 871 mutex_exit(&zdev->proc_lck); 872 873 if (valid) { 874 ZULUVM_LOCK; 875 if (proc == zdev->zvm.proc1) { 876 flag |= ZULUVM_WAIT_INTR1; 877 wait |= ZULUVM_DO_INTR1; 878 } 879 if (proc == zdev->zvm.proc2) { 880 flag |= ZULUVM_WAIT_INTR2; 881 wait |= ZULUVM_DO_INTR2; 882 } 883 if (flag) { 884 zdev->intr_flags |= flag; 885 /* 886 * wait until the tlb miss is resloved 887 */ 888 while (zdev->intr_flags & wait) { 889 cv_wait(&zdev->intr_wait, &zdev->dev_lck); 890 } 891 zdev->intr_flags &= ~flag; 892 } 893 ZULUVM_UNLOCK; 894 } 895 896 if (proc->zhat != NULL) { 897 /* 898 * prevent any further tlb miss processing for this hat 899 */ 900 zulu_hat_terminate(proc->zhat); 901 } 902 903 /* 904 * decrement the ref count and do the appropriate 905 * if it drops to zero. 906 */ 907 mutex_enter(&zdev->proc_lck); 908 (void) zuluvm_proc_release(zdev, proc); 909 mutex_exit(&zdev->proc_lck); 910 } 911 912 /* 913 * notify zulu vm driver about a new process going to 914 * use zulu DMA. Create a zulu_hat. 915 */ 916 int 917 zuluvm_dma_add_proc(zuluvm_info_t devp, uint64_t *cookie) 918 { 919 zuluvm_proc_t *proc; 920 int refcnt; 921 struct as *asp = ZULUVM_GET_AS; 922 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 923 924 TNF_PROBE_1(zuluvm_dma_add_proc, "zuluvm", /* */, 925 tnf_opaque, zdev, zdev); 926 mutex_enter(&zdev->proc_lck); 927 proc = zuluvm_find_proc(zdev, asp); 928 if (proc == NULL) { 929 proc = kmem_zalloc(sizeof (zuluvm_proc_t), KM_SLEEP); 930 proc->zhat = zulu_hat_proc_attach(asp, zdev); 931 if (proc->zhat == NULL) { 932 mutex_exit(&zdev->proc_lck); 933 kmem_free(proc, sizeof (zuluvm_proc_t)); 934 TNF_PROBE_2(zuluvm_dma_add_proc_done, "zuluvm", /* */, 935 tnf_int, valid, 0, 936 tnf_int, error, ZULUVM_ERROR); 937 return (ZULUVM_ERROR); 938 } 939 proc->zdev = zdev; 940 proc->valid = 1; 941 proc->refcnt = 1; 942 proc->next = zdev->procs; 943 if (zdev->procs) 944 zdev->procs->prev = proc; 945 proc->prev = NULL; 946 zdev->procs = proc; 947 proc->refcnt++; 948 (void) as_add_callback(asp, zuluvm_as_free, proc, 949 AS_FREE_EVENT, 0, -1, KM_SLEEP); 950 } else { 951 if (proc->valid == 0) { 952 mutex_exit(&zdev->proc_lck); 953 TNF_PROBE_2(zuluvm_dma_add_proc_done, "zuluvm", /* */, 954 tnf_int, valid, 0, 955 tnf_int, error, ZULUVM_ERROR); 956 return (ZULUVM_ERROR); 957 } 958 proc->refcnt++; 959 } 960 refcnt = proc->refcnt; 961 mutex_exit(&zdev->proc_lck); 962 *cookie = (uint64_t)proc; 963 TNF_PROBE_2(zuluvm_dma_add_proc_done, "zuluvm", /* */, 964 tnf_int, refcnt, refcnt, 965 tnf_int, error, ZULUVM_SUCCESS); 966 return (ZULUVM_SUCCESS); 967 } 968 969 void 970 zuluvm_proc_hold(zuluvm_state_t *zdev, zuluvm_proc_t *proc) 971 { 972 mutex_enter(&zdev->proc_lck); 973 proc->refcnt++; 974 mutex_exit(&zdev->proc_lck); 975 } 976 977 /* 978 * decrement ref count and free data if it drops to zero 979 */ 980 static int 981 zuluvm_proc_release(zuluvm_state_t *zdev, zuluvm_proc_t *proc) 982 { 983 int refcnt; 984 ASSERT(MUTEX_HELD(&zdev->proc_lck)); 985 refcnt = --proc->refcnt; 986 TNF_PROBE_3(zuluvm_proc_release, "zuluvm", /* */, 987 tnf_opaque, zdev, zdev, 988 tnf_opaque, proc, proc, 989 tnf_int, refcnt, refcnt); 990 if (refcnt == 0) { 991 if (proc->next) 992 proc->next->prev = proc->prev; 993 if (proc->prev) 994 proc->prev->next = proc->next; 995 else 996 zdev->procs = proc->next; 997 kmem_free(proc, sizeof (zuluvm_proc_t)); 998 } 999 return (refcnt); 1000 } 1001 1002 /* 1003 * this process is not longer using DMA, all entries 1004 * have been removed from the TLB. 1005 */ 1006 int 1007 zuluvm_dma_delete_proc(zuluvm_info_t devp, uint64_t cookie) 1008 { 1009 int refcnt; 1010 zuluvm_proc_t *proc = (zuluvm_proc_t *)cookie; 1011 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1012 1013 TNF_PROBE_2(zuluvm_dma_delete_proc, "zuluvm", /* */, 1014 tnf_opaque, zdev, zdev, 1015 tnf_opaque, cookie, cookie); 1016 mutex_enter(&zdev->proc_lck); 1017 if (proc != NULL) { 1018 TNF_PROBE_1(zuluvm_dma_delete_proc, "zuluvm", /* */, 1019 tnf_opaque, proc, proc); 1020 if (proc->zhat != NULL) { 1021 zulu_hat_proc_detach(proc->zhat); 1022 proc->zhat = NULL; 1023 } 1024 refcnt = zuluvm_proc_release(zdev, proc); 1025 } 1026 mutex_exit(&zdev->proc_lck); 1027 1028 TNF_PROBE_2(zuluvm_dma_delete_proc_done, "zuluvm", /* */, 1029 tnf_int, refcnt, refcnt, 1030 tnf_int, error, ZULUVM_SUCCESS); 1031 return (ZULUVM_SUCCESS); 1032 } 1033 1034 /* 1035 * barrier sync for device driver 1036 * blocks until zuluvm_tlbmiss_tl1 function is done 1037 */ 1038 void 1039 zuluvm_fast_tlb_wait(caddr_t devp) 1040 { 1041 int state; 1042 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1043 int cnt = 0; 1044 1045 do { 1046 state = ZULUVM_GET_STATE(zdev); 1047 cnt++; 1048 } while (state == ZULUVM_STATE_TLB_PENDING); 1049 TNF_PROBE_1(zuluvm_fast_tlb_wait, "zuluvm", /* */, 1050 tnf_int, loop_cnt, cnt); 1051 } 1052 1053 /* 1054 * setup DMA handling for this handle 1055 */ 1056 int 1057 zuluvm_dma_alloc_ctx(zuluvm_info_t devp, int dma, short *mmuctx, 1058 uint64_t *tsbreg) 1059 { 1060 struct as *asp = ZULUVM_GET_AS; 1061 int error = ZULUVM_NO_DEV; 1062 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1063 int state, newstate; 1064 1065 if (asp == NULL) { 1066 TNF_PROBE_1(zuluvm_dma_alloc_ctx_done, "zuluvm", /* */, 1067 tnf_int, error, ZULUVM_NO_HAT); 1068 return (ZULUVM_NO_HAT); 1069 } 1070 1071 *tsbreg = 0; 1072 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_IDLE, 1073 ZULUVM_STATE_STOPPED); 1074 newstate = ZULUVM_GET_STATE(zdev); 1075 TNF_PROBE_4(zuluvm_dma_alloc_ctx, "zuluvm", /* */, 1076 tnf_opaque, devp, devp, 1077 tnf_int, dma, dma, 1078 tnf_int, oldstate, state, 1079 tnf_int, newstate, newstate); 1080 #ifdef DEBUG 1081 if (zuluvm_debug_state) 1082 cmn_err(CE_NOTE, "zuluvm_dma_alloc_ctx: state %d\n", state); 1083 #endif 1084 if (state != ZULUVM_STATE_STOPPED && state != ZULUVM_STATE_IDLE) { 1085 while (state != ZULUVM_STATE_IDLE) { 1086 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_IDLE, 1087 ZULUVM_STATE_STOPPED); 1088 #ifdef DEBUG 1089 if (zuluvm_debug_state) 1090 cmn_err(CE_NOTE, "zuluvm_dma_alloc_ctx: (loop)" 1091 " state %d\n", state); 1092 #endif 1093 if (state != ZULUVM_STATE_IDLE) 1094 delay(1); 1095 } 1096 } 1097 1098 if (zdev->zvm.arg != NULL) { 1099 struct zulu_hat *zhat; 1100 zuluvm_proc_t *proc; 1101 1102 mutex_enter(&zdev->proc_lck); 1103 proc = zuluvm_find_proc(zdev, asp); 1104 if (proc != NULL) { 1105 zhat = proc->zhat; 1106 proc->refcnt++; 1107 } 1108 mutex_exit(&zdev->proc_lck); 1109 1110 switch (dma) { 1111 case ZULUVM_DMA1: 1112 ZULUVM_LOCK; 1113 zdev->zvm.proc1 = proc; 1114 ZULUVM_UNLOCK; 1115 error = ZULUVM_SUCCESS; 1116 break; 1117 case ZULUVM_DMA2: 1118 ZULUVM_LOCK; 1119 zdev->zvm.proc2 = proc; 1120 ZULUVM_UNLOCK; 1121 error = ZULUVM_SUCCESS; 1122 break; 1123 default: 1124 mutex_enter(&zdev->proc_lck); 1125 (void) zuluvm_proc_release(zdev, proc); 1126 mutex_exit(&zdev->proc_lck); 1127 } 1128 1129 if (error == ZULUVM_SUCCESS) { 1130 zulu_hat_validate_ctx(zhat); 1131 if (zhat->zulu_ctx >= 0) { 1132 *mmuctx = zhat->zulu_ctx; 1133 } else { 1134 printf("invalid context value: %d\n", 1135 zhat->zulu_ctx); 1136 1137 mutex_enter(&zdev->proc_lck); 1138 (void) zuluvm_proc_release(zdev, proc); 1139 mutex_exit(&zdev->proc_lck); 1140 1141 error = ZULUVM_ERROR; 1142 } 1143 } else { 1144 error = ZULUVM_ERROR; 1145 } 1146 } 1147 TNF_PROBE_1(zuluvm_dma_alloc_ctx_done, "zuluvm", /* */, 1148 tnf_int, error, error); 1149 return (error); 1150 } 1151 1152 /* 1153 * preload TLB 1154 * this will try to pre-set the zulu tlb, mainly used for dma engine 2, 1155 * video read-back. 1156 */ 1157 int 1158 zuluvm_dma_preload(zuluvm_info_t devp, int dma, 1159 int num, zulud_preload_t *list) 1160 { 1161 int i; 1162 int error = ZULUVM_SUCCESS; 1163 struct zulu_hat *zhat; 1164 zuluvm_proc_t *proc = NULL; 1165 1166 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1167 1168 TNF_PROBE_4(zuluvm_dma_preload, "zuluvm", /* */, 1169 tnf_opaque, devp, devp, 1170 tnf_int, dma, dma, 1171 tnf_int, num, num, 1172 tnf_opaque, list, list); 1173 ZULUVM_LOCK; 1174 switch (dma) { 1175 case ZULUVM_DMA1: 1176 proc = zdev->zvm.proc1; 1177 break; 1178 case ZULUVM_DMA2: 1179 proc = zdev->zvm.proc2; 1180 break; 1181 } 1182 1183 mutex_enter(&zdev->proc_lck); 1184 if (proc == NULL || proc->valid == 0 || proc->zhat == NULL) { 1185 mutex_exit(&zdev->proc_lck); 1186 ZULUVM_UNLOCK; 1187 return (ZULUVM_NO_HAT); 1188 } 1189 mutex_exit(&zdev->proc_lck); 1190 1191 zhat = proc->zhat; 1192 /* 1193 * need to release this to avoid recursive enter in zuluvm_load_tte 1194 * which gets called from zulu_hat_memload() 1195 */ 1196 ZULUVM_UNLOCK; 1197 1198 mutex_enter(&zdev->load_lck); 1199 for (i = 0; i < num; i++) { 1200 int pg_size; 1201 int res; 1202 int first = 1; 1203 caddr_t addr = ZULUVM_GET_PAGE(list[i].addr); 1204 int64_t size = (int64_t)list[i].len; 1205 while (size > 0) { 1206 if (list[i].tlbtype & ~ZULUVM_DMA_MASK) { 1207 error = ZULUVM_INVALID_MISS; 1208 break; 1209 } 1210 res = zulu_hat_load(zhat, addr, 1211 (list[i].tlbtype == ZULUVM_DMA2) ? S_WRITE : S_READ, 1212 &pg_size); 1213 if ((res != 0) || (pg_size < 0)) { 1214 error = ZULUVM_NO_MAP; 1215 break; 1216 } 1217 ZULUVM_STATS_PRELOAD(zdev); 1218 TNF_PROBE_2(zuluvm_dma_preload_addr, "zuluvm", /* */, 1219 tnf_opaque, addr, addr, 1220 tnf_opaque, size, size); 1221 if (first) { 1222 first = 0; 1223 size -= ZULU_HAT_PGDIFF(list[i].addr, 1224 pg_size); 1225 } else { 1226 size -= ZULU_HAT_PGSZ(pg_size); 1227 } 1228 addr += ZULU_HAT_PGSZ(pg_size); 1229 } 1230 } 1231 mutex_exit(&zdev->load_lck); 1232 TNF_PROBE_1(zuluvm_dma_preload_done, "zuluvm", /* */, 1233 tnf_int, error, error); 1234 return (ZULUVM_SUCCESS); 1235 } 1236 1237 /* 1238 * destroy DMA handling for this handle 1239 */ 1240 int 1241 zuluvm_dma_free_ctx(zuluvm_info_t devp, int dma) 1242 { 1243 int error = ZULUVM_NO_DEV; 1244 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1245 int state, newstate; 1246 1247 state = ZULUVM_SET_STATE(zdev, ZULUVM_STATE_STOPPED, 1248 ZULUVM_STATE_IDLE); 1249 newstate = ZULUVM_GET_STATE(zdev); 1250 TNF_PROBE_4(zuluvm_dma_free_ctx, "zuluvm", /* */, 1251 tnf_opaque, devp, devp, 1252 tnf_int, dma, dma, 1253 tnf_int, oldstate, state, 1254 tnf_int, newstate, newstate); 1255 #ifdef DEBUG 1256 if (zuluvm_debug_state) 1257 cmn_err(CE_NOTE, "zuluvm_dma_free_ctx: state %d\n", state); 1258 #endif 1259 if (state != ZULUVM_STATE_IDLE && state != ZULUVM_STATE_STOPPED) { 1260 int doit = 1; 1261 while (doit) { 1262 switch (state) { 1263 case ZULUVM_STATE_CANCELED: 1264 case ZULUVM_STATE_STOPPED: 1265 doit = 0; 1266 break; 1267 case ZULUVM_STATE_IDLE: 1268 state = ZULUVM_SET_STATE(zdev, 1269 ZULUVM_STATE_STOPPED, 1270 ZULUVM_STATE_IDLE); 1271 break; 1272 default: 1273 state = ZULUVM_SET_STATE(zdev, 1274 ZULUVM_STATE_CANCELED, state); 1275 } 1276 TNF_PROBE_1(zuluvm_dma_free_ctx, "zuluvm", /* */, 1277 tnf_int, state, state); 1278 #ifdef DEBUG 1279 if (zuluvm_debug_state) 1280 cmn_err(CE_NOTE, "zuluvm_dma_free_ctx: (loop1)" 1281 " state %d\n", state); 1282 #endif 1283 } 1284 } 1285 TNF_PROBE_1(zuluvm_dma_free_ctx, "zuluvm", /* */, 1286 tnf_int, state, state); 1287 1288 error = ZULUVM_SUCCESS; 1289 while (state != ZULUVM_STATE_STOPPED) { 1290 state = ZULUVM_GET_STATE(zdev); 1291 #ifdef DEBUG 1292 if (zuluvm_debug_state) 1293 cmn_err(CE_NOTE, "zuluvm_dma_free: (loop2) state %d\n", 1294 state); 1295 #endif 1296 if (state != ZULUVM_STATE_STOPPED) 1297 delay(1); 1298 } 1299 ZULUVM_LOCK; 1300 if (zdev->zvm.arg != NULL) { 1301 zuluvm_proc_t *proc = NULL; 1302 switch (dma) { 1303 case ZULUVM_DMA1: 1304 proc = zdev->zvm.proc1; 1305 zdev->zvm.proc1 = NULL; 1306 break; 1307 case ZULUVM_DMA2: 1308 proc = zdev->zvm.proc2; 1309 zdev->zvm.proc2 = NULL; 1310 break; 1311 default: 1312 error = ZULUVM_NO_DEV; 1313 } 1314 ZULUVM_UNLOCK; 1315 if (proc) { 1316 mutex_enter(&zdev->proc_lck); 1317 (void) zuluvm_proc_release(zdev, proc); 1318 mutex_exit(&zdev->proc_lck); 1319 } 1320 } else { 1321 ZULUVM_UNLOCK; 1322 error = ZULUVM_NO_DEV; 1323 } 1324 TNF_PROBE_1(zuluvm_dma_free_ctx_done, "zuluvm", /* */, 1325 tnf_int, error, error); 1326 return (error); 1327 } 1328 1329 static void 1330 zuluvm_do_retarget(zuluvm_state_t *zdev) 1331 { 1332 int i, idx; 1333 uint_t cpu; 1334 for (i = 0; i < ZULUVM_MAX_INTR; i++) { 1335 if (zdev->interrupts[i].ino != -1) { 1336 cpu = intr_dist_cpuid(); 1337 idx = zdev->interrupts[i].offset; 1338 if (zdev->imr[idx] & ZULUVM_IMR_V_MASK) 1339 zdev->imr[idx] = ZULUVM_IMR_V_MASK | 1340 (cpu<<ZULUVM_IMR_TARGET_SHIFT); 1341 else 1342 zdev->imr[idx] = 1343 cpu<<ZULUVM_IMR_TARGET_SHIFT; 1344 } 1345 } 1346 } 1347 1348 static void 1349 zuluvm_retarget_intr(void *arg) 1350 { 1351 zuluvm_state_t *zdev = (zuluvm_state_t *)arg; 1352 ZULUVM_LOCK; 1353 zuluvm_do_retarget(zdev); 1354 ZULUVM_UNLOCK; 1355 } 1356 1357 int 1358 zuluvm_add_intr(zuluvm_info_t devp, int ino, 1359 uint_t (*handler)(caddr_t), caddr_t arg) 1360 { 1361 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1362 if (devp == NULL) { 1363 TNF_PROBE_1(zuluvm_add_intr_done, "zuluvm", /* */, 1364 tnf_int, error, ZULUVM_NO_DEV); 1365 return (ZULUVM_NO_DEV); 1366 } 1367 if (ddi_add_intr(zdev->dip, ino, NULL, NULL, handler, arg) 1368 != DDI_SUCCESS) { 1369 TNF_PROBE_1(zuluvm_add_intr_done, "zuluvm", /* */, 1370 tnf_int, error, ZULUVM_ERROR); 1371 return (ZULUVM_ERROR); 1372 } 1373 return (ZULUVM_SUCCESS); 1374 } 1375 1376 int 1377 zuluvm_rem_intr(zuluvm_info_t devp, int ino) 1378 { 1379 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1380 if (devp == NULL) { 1381 TNF_PROBE_1(zuluvm_rem_intr_done, "zuluvm", /* */, 1382 tnf_int, error, ZULUVM_NO_DEV); 1383 return (ZULUVM_NO_DEV); 1384 } 1385 /* remove from distributin list */ 1386 ZULUVM_LOCK; 1387 zdev->imr[zdev->interrupts[ino].offset] &= ~ZULUVM_IMR_V_MASK; 1388 ZULUVM_UNLOCK; 1389 ddi_remove_intr(zdev->dip, ino, NULL); 1390 return (ZULUVM_SUCCESS); 1391 } 1392 1393 int 1394 zuluvm_enable_intr(zuluvm_info_t devp, int num) 1395 { 1396 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1397 1398 TNF_PROBE_2(zuluvm_enable_intr, "zuluvm_intr", /* */, 1399 tnf_opaque, devp, devp, 1400 tnf_int, num, num); 1401 if (devp == NULL) { 1402 TNF_PROBE_1(zuluvm_enable_intr_done, "zuluvm", /* */, 1403 tnf_int, error, ZULUVM_NO_DEV); 1404 return (ZULUVM_NO_DEV); 1405 } 1406 if (num < 0 || num > ZULUVM_IMR_MAX) { 1407 TNF_PROBE_1(zuluvm_enable_intr_done, "zuluvm", /* */, 1408 tnf_int, error, ZULUVM_BAD_IDX); 1409 return (ZULUVM_BAD_IDX); 1410 } 1411 ZULUVM_LOCK; 1412 zdev->imr[num] |= ZULUVM_IMR_V_MASK; 1413 ZULUVM_UNLOCK; 1414 TNF_PROBE_1(zuluvm_enable_intr_done, "zuluvm_intr", /* */, 1415 tnf_int, error, ZULUVM_SUCCESS); 1416 return (ZULUVM_SUCCESS); 1417 } 1418 1419 int 1420 zuluvm_disable_intr(zuluvm_info_t devp, int num) 1421 { 1422 zuluvm_state_t *zdev = (zuluvm_state_t *)devp; 1423 1424 TNF_PROBE_2(zuluvm_disable_intr, "zuluvm_intr", /* */, 1425 tnf_opaque, devp, devp, 1426 tnf_int, num, num); 1427 if (devp == NULL) { 1428 TNF_PROBE_1(zuluvm_disable_intr_done, "zuluvm", /* */, 1429 tnf_int, error, ZULUVM_NO_DEV); 1430 return (ZULUVM_NO_DEV); 1431 } 1432 if (num < 0 || num > ZULUVM_IMR_MAX) { 1433 TNF_PROBE_1(zuluvm_disable_intr_done, "zuluvm", /* */, 1434 tnf_int, error, ZULUVM_BAD_IDX); 1435 return (ZULUVM_BAD_IDX); 1436 } 1437 ZULUVM_LOCK; 1438 zdev->imr[num] &= ~ZULUVM_IMR_V_MASK; 1439 ZULUVM_UNLOCK; 1440 TNF_PROBE_1(zuluvm_disable_intr_done, "zuluvm_intr", /* */, 1441 tnf_int, error, ZULUVM_SUCCESS); 1442 return (ZULUVM_SUCCESS); 1443 } 1444 1445 static int 1446 zuluvm_get_intr_props(zuluvm_state_t *zdev, 1447 dev_info_t *devi) 1448 { 1449 int *intr; 1450 int i; 1451 uint_t nintr; 1452 1453 zdev->agentid = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 1454 "portid", -1); 1455 if (zdev->agentid == -1) { 1456 cmn_err(CE_WARN, "%s%d: no portid property", 1457 ddi_get_name(devi), 1458 ddi_get_instance(devi)); 1459 return (ZULUVM_ERROR); 1460 } 1461 1462 for (i = 0; i < ZULUVM_MAX_INTR; i++) { 1463 zdev->interrupts[i].offset = 0; 1464 zdev->interrupts[i].ino = -1; 1465 } 1466 1467 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 1468 "interrupts", &intr, &nintr) == DDI_PROP_SUCCESS) { 1469 1470 if (nintr == 0) { 1471 cmn_err(CE_WARN, "%s%d: no interrupts in property", 1472 ddi_get_name(devi), 1473 ddi_get_instance(devi)); 1474 ddi_prop_free(intr); 1475 return (ZULUVM_ERROR); 1476 } 1477 if (nintr >= ZULUVM_MAX_INTR) { 1478 cmn_err(CE_WARN, "%s%d: to many interrupts (%d)", 1479 ddi_get_name(devi), 1480 ddi_get_instance(devi), nintr); 1481 ddi_prop_free(intr); 1482 return (ZULUVM_ERROR); 1483 } 1484 for (i = 0; i < nintr; i++) { 1485 zdev->interrupts[i].offset = intr[i]; 1486 zdev->interrupts[i].ino = i; 1487 } 1488 ddi_prop_free(intr); 1489 } else { 1490 cmn_err(CE_WARN, "%s%d: no interrupts property", 1491 ddi_get_name(devi), 1492 ddi_get_instance(devi)); 1493 } 1494 return (ZULUVM_SUCCESS); 1495 } 1496 1497 /* *** enf of zulu *** */ 1498