1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22/* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27#pragma ident "%Z%%M% %I% %E% SMI" 28 29#if defined(lint) 30#include <sys/types.h> 31#include <sys/thread.h> 32#else /* lint */ 33#include "assym.h" 34#endif /* lint */ 35 36#include <sys/asm_linkage.h> 37#include <sys/machthread.h> 38#include <sys/machcpuvar.h> 39#include <sys/intreg.h> 40#include <sys/cmn_err.h> 41#include <sys/ftrace.h> 42#include <sys/machasi.h> 43#include <sys/error.h> 44#define INTR_REPORT_SIZE 64 45 46#ifdef TRAPTRACE 47#include <sys/traptrace.h> 48#endif /* TRAPTRACE */ 49 50#if defined(lint) 51 52void 53cpu_mondo(void) 54{} 55 56#else /* lint */ 57 58 59/* 60 * (TT 0x7c, TL>0) CPU Mondo Queue Handler 61 * Globals are the Interrupt Globals. 62 */ 63 ENTRY_NP(cpu_mondo) 64 ! 65 ! Register Usage:- 66 ! %g5 PC for fasttrap TL>0 handler 67 ! %g1 arg 1 68 ! %g2 arg 2 69 ! %g3 queue base VA 70 ! %g4 queue size mask 71 ! %g6 head ptr 72 ! %g7 tail ptr 73 mov CPU_MONDO_Q_HD, %g3 74 ldxa [%g3]ASI_QUEUE, %g6 ! %g6 = head ptr 75 mov CPU_MONDO_Q_TL, %g4 76 ldxa [%g4]ASI_QUEUE, %g7 ! %g7 = tail ptr 77 cmp %g6, %g7 78 be,pn %xcc, 0f ! head == tail 79 nop 80 81 CPU_ADDR(%g1,%g2) 82 add %g1, CPU_MCPU, %g2 83 ldx [%g2 + MCPU_CPU_Q_BASE], %g3 ! %g3 = queue base PA 84 ldx [%g2 + MCPU_CPU_Q_SIZE], %g4 ! queue size 85 sub %g4, 1, %g4 ! %g4 = queue size mask 86 87 ! Load interrupt receive data registers 1 and 2 to fetch 88 ! the arguments for the fast trap handler. 89 ! 90 ! XXX - Since the data words in the interrupt report are not defined yet 91 ! we assume that the consective words contain valid data and preserve 92 ! sun4u's xcall mondo arguments. 93 ! Register usage: 94 ! %g5 PC for fasttrap TL>0 handler 95 ! %g1 arg 1 96 ! %g2 arg 2 97 98 ldxa [%g3 + %g6]ASI_MEM, %g5 ! get PC from q base + head 99 add %g6, 0x8, %g6 ! inc head 100 ldxa [%g3 + %g6]ASI_MEM, %g1 ! read data word 1 101 add %g6, 0x8, %g6 ! inc head 102 ldxa [%g3 + %g6]ASI_MEM, %g2 ! read data word 2 103 add %g6, (INTR_REPORT_SIZE - 16) , %g6 ! inc head to next record 104 and %g6, %g4, %g6 ! and size mask for wrap around 105 mov CPU_MONDO_Q_HD, %g3 106 stxa %g6, [%g3]ASI_QUEUE ! store head pointer 107 membar #Sync 108 109#ifdef TRAPTRACE 110 TRACE_PTR(%g4, %g6) 111 GET_TRACE_TICK(%g6) 112 stxa %g6, [%g4 + TRAP_ENT_TICK]%asi 113 TRACE_SAVE_TL_GL_REGS(%g4, %g6) 114 rdpr %tt, %g6 115 stha %g6, [%g4 + TRAP_ENT_TT]%asi 116 rdpr %tpc, %g6 117 stna %g6, [%g4 + TRAP_ENT_TPC]%asi 118 rdpr %tstate, %g6 119 stxa %g6, [%g4 + TRAP_ENT_TSTATE]%asi 120 stna %sp, [%g4 + TRAP_ENT_SP]%asi 121 stna %g5, [%g4 + TRAP_ENT_TR]%asi ! pc of the TL>0 handler 122 stna %g1, [%g4 + TRAP_ENT_F1]%asi ! arg1 123 stna %g2, [%g4 + TRAP_ENT_F3]%asi ! arg2 124 mov CPU_MONDO_Q_HD, %g6 125 ldxa [%g6]ASI_QUEUE, %g6 ! new head offset 126 stna %g6, [%g4 + TRAP_ENT_F2]%asi 127 stna %g7, [%g4 + TRAP_ENT_F4]%asi ! tail offset 128 TRACE_NEXT(%g4, %g6, %g3) 129#endif /* TRAPTRACE */ 130 131 /* 132 * For now catch invalid PC being passed via cpu_mondo queue 133 */ 134 set KERNELBASE, %g4 135 cmp %g5, %g4 136 bl,a,pn %xcc, 1f ! branch if bad %pc 137 nop 138 139 jmp %g5 ! jump to traphandler 140 nop 1411: 142 ! invalid trap handler, discard it for now 143 set cpu_mondo_inval, %g4 144 ldx [%g4], %g5 145 inc %g5 146 stx %g5, [%g4] 1470: 148 retry 149 /* Never Reached */ 150 SET_SIZE(cpu_mondo) 151 152#endif /* lint */ 153 154#if defined(lint) 155 156void 157dev_mondo(void) 158{} 159 160#else /* lint */ 161 162 163/* 164 * (TT 0x7d, TL>0) Dev Mondo Queue Handler 165 * Globals are the Interrupt Globals. 166 * We only process one interrupt at a time causing us to keep 167 * taking this trap till the queue is empty. 168 * We really should drain the whole queue for better performance 169 * but this will do for now. 170 */ 171 ENTRY_NP(dev_mondo) 172 ! 173 ! Register Usage:- 174 ! %g5 PC for fasttrap TL>0 handler 175 ! %g1 arg 1 176 ! %g2 arg 2 177 ! %g3 queue base PA 178 ! %g4 queue size mask 179 ! %g6 head ptr 180 ! %g7 tail ptr 181 mov DEV_MONDO_Q_HD, %g3 182 ldxa [%g3]ASI_QUEUE, %g6 ! %g6 = head ptr 183 mov DEV_MONDO_Q_TL, %g4 184 ldxa [%g4]ASI_QUEUE, %g7 ! %g7 = tail ptr 185 cmp %g6, %g7 186 be,pn %xcc, 0f ! head == tail 187 nop 188 189 CPU_ADDR(%g1,%g2) 190 add %g1, CPU_MCPU, %g2 191 ldx [%g2 + MCPU_DEV_Q_BASE], %g3 ! %g3 = queue base PA 192 193 ! Register usage: 194 ! %g5 - inum 195 ! %g1 - cpu struct pointer used below in TRAPTRACE 196 ! 197 ldxa [%g3 + %g6]ASI_MEM, %g5 ! get inum from q base + head 198 199 ! 200 ! We verify that inum is valid ( < MAXVNUM). If it is greater 201 ! than MAXVNUM, we let setsoftint_tl1 take care of it. 202 ! 203 set MAXIVNUM, %g4 204 cmp %g5, %g4 205 bgeu,a,pn %xcc, 1f 206 ldx [%g2 + MCPU_DEV_Q_SIZE], %g4 ! queue size - delay slot 207 208 ! 209 ! Copy 64-byte payload to the *iv_payload if it is not NULL 210 ! 211 set intr_vector, %g1 212 sll %g5, INTR_VECTOR_SHIFT, %g7 213 add %g1, %g7, %g1 ! %g1 = &intr_vector[inum] 214 ldx [%g1 + IV_PAYLOAD_BUF], %g1 ! %g1 = iv_payload_buf 215 brz,a,pt %g1, 1f ! if it is NULL 216 ldx [%g2 + MCPU_DEV_Q_SIZE], %g4 ! queue size - delay slot 217 218 ! 219 ! Now move 64 byte payload from mondo queue to buf 220 ! 221 mov %g6, %g7 ! %g7 = head ptr 222 ldxa [%g3 + %g7]ASI_MEM, %g4 223 stx %g4, [%g1 + 0] ! byte 0 - 7 224 add %g7, 8, %g7 225 ldxa [%g3 + %g7]ASI_MEM, %g4 226 stx %g4, [%g1 + 8] ! byte 8 - 15 227 add %g7, 8, %g7 228 ldxa [%g3 + %g7]ASI_MEM, %g4 229 stx %g4, [%g1 + 16] ! byte 16 - 23 230 add %g7, 8, %g7 231 ldxa [%g3 + %g7]ASI_MEM, %g4 232 stx %g4, [%g1 + 24] ! byte 24 - 31 233 add %g7, 8, %g7 234 ldxa [%g3 + %g7]ASI_MEM, %g4 235 stx %g4, [%g1 + 32] ! byte 32 - 39 236 add %g7, 8, %g7 237 ldxa [%g3 + %g7]ASI_MEM, %g4 238 stx %g4, [%g1 + 40] ! byte 40 - 47 239 add %g7, 8, %g7 240 ldxa [%g3 + %g7]ASI_MEM, %g4 241 stx %g4, [%g1 + 48] ! byte 48 - 55 242 add %g7, 8, %g7 243 ldxa [%g3 + %g7]ASI_MEM, %g4 244 stx %g4, [%g1 + 56] ! byte 56 - 63 245 ldx [%g2 + MCPU_DEV_Q_SIZE], %g4 ! queue size 246 2471: sub %g4, 1, %g4 ! %g4 = queue size mask 248 add %g6, INTR_REPORT_SIZE , %g6 ! inc head to next record 249 and %g6, %g4, %g6 ! and mask for wrap around 250 mov DEV_MONDO_Q_HD, %g3 251 stxa %g6, [%g3]ASI_QUEUE ! increment head offset 252 membar #Sync 253 254#ifdef TRAPTRACE 255 TRACE_PTR(%g4, %g6) 256 GET_TRACE_TICK(%g6) 257 stxa %g6, [%g4 + TRAP_ENT_TICK]%asi 258 TRACE_SAVE_TL_GL_REGS(%g4, %g6) 259 rdpr %tt, %g6 260 stha %g6, [%g4 + TRAP_ENT_TT]%asi 261 rdpr %tpc, %g6 262 stna %g6, [%g4 + TRAP_ENT_TPC]%asi 263 rdpr %tstate, %g6 264 stxa %g6, [%g4 + TRAP_ENT_TSTATE]%asi 265 ! move head to sp 266 ldx [%g2 + MCPU_DEV_Q_BASE], %g6 267 stna %g6, [%g4 + TRAP_ENT_SP]%asi ! Device Queue Base PA 268 stna %g5, [%g4 + TRAP_ENT_TR]%asi ! Inum 269 mov DEV_MONDO_Q_HD, %g6 270 ldxa [%g6]ASI_QUEUE, %g6 ! New head offset 271 stna %g6, [%g4 + TRAP_ENT_F1]%asi 272#ifdef __sparcv9 273 ldx [%g2 + MCPU_DEV_Q_SIZE], %g6 274 stna %g6, [%g4 + TRAP_ENT_F2]%asi ! Q Size 275 stna %g7, [%g4 + TRAP_ENT_F3]%asi ! tail offset 276 stna %g0, [%g4 + TRAP_ENT_F4]%asi 277#endif 278 TRACE_NEXT(%g4, %g6, %g3) 279#endif /* TRAPTRACE */ 280 281 ! 282 ! setsoftint_tl1 will do all the work, and finish with a retry 283 ! 284 ba,pt %xcc, setsoftint_tl1 285 mov %g5, %g1 ! setsoftint_tl1 expects inum in %g1 286 2870: retry 288 289 /* Never Reached */ 290 SET_SIZE(dev_mondo) 291#endif /* lint */ 292 293#if defined(lint) 294uint64_t cpu_mondo_inval; 295#else /* lint */ 296 .seg ".data" 297 .global cpu_mondo_inval 298 .align 8 299cpu_mondo_inval: 300 .skip 8 301 302 .seg ".text" 303#endif /* lint */ 304 305 306#if defined(lint) 307 308void 309resumable_error(void) 310{} 311 312#else /* lint */ 313 314/* 315 * (TT 0x7e, TL>0) Resumeable Error Queue Handler 316 * We keep a shadow copy of the queue in kernel buf. 317 * Read the resumable queue head and tail offset 318 * If there are entries on the queue, move them to 319 * the kernel buf, which is next to the resumable 320 * queue in the memory. Call C routine to process. 321 */ 322 ENTRY_NP(resumable_error) 323 mov CPU_RQ_HD, %g4 324 ldxa [%g4]ASI_QUEUE, %g2 ! %g2 = Q head offset 325 mov CPU_RQ_TL, %g4 326 ldxa [%g4]ASI_QUEUE, %g3 ! %g3 = Q tail offset 327 mov %g2, %g6 ! save head in %g2 328 329 cmp %g6, %g3 330 be,pn %xcc, 0f ! head == tail 331 nop 332 333 CPU_ADDR(%g1, %g4) ! %g1 = cpu struct addr 334 3352: set CPU_RQ_BASE_OFF, %g4 336 ldx [%g1 + %g4], %g4 ! %g4 = queue base PA 337 add %g6, %g4, %g4 ! %g4 = PA of ER in Q 338 set CPU_RQ_SIZE, %g7 339 add %g4, %g7, %g7 ! %g7=PA of ER in kernel buf 340 341 ldxa [%g7]ASI_MEM, %g5 ! %g5=first 8 byte of ER buf 342 cmp 0, %g5 343 bne,pn %xcc, 1f ! first 8 byte is not 0 344 nop 345 346 /* Now we can move 64 bytes from queue to buf */ 347 set 0, %g5 348 ldxa [%g4 + %g5]ASI_MEM, %g1 349 stxa %g1, [%g7 + %g5]ASI_MEM ! byte 0 - 7 350 add %g5, 8, %g5 351 ldxa [%g4 + %g5]ASI_MEM, %g1 352 stxa %g1, [%g7 + %g5]ASI_MEM ! byte 8 - 15 353 add %g5, 8, %g5 354 ldxa [%g4 + %g5]ASI_MEM, %g1 355 stxa %g1, [%g7 + %g5]ASI_MEM ! byte 16 - 23 356 add %g5, 8, %g5 357 ldxa [%g4 + %g5]ASI_MEM, %g1 358 stxa %g1, [%g7 + %g5]ASI_MEM ! byte 24 - 31 359 add %g5, 8, %g5 360 ldxa [%g4 + %g5]ASI_MEM, %g1 361 stxa %g1, [%g7 + %g5]ASI_MEM ! byte 32 - 39 362 add %g5, 8, %g5 363 ldxa [%g4 + %g5]ASI_MEM, %g1 364 stxa %g1, [%g7 + %g5]ASI_MEM ! byte 40 - 47 365 add %g5, 8, %g5 366 ldxa [%g4 + %g5]ASI_MEM, %g1 367 stxa %g1, [%g7 + %g5]ASI_MEM ! byte 48 - 55 368 add %g5, 8, %g5 369 ldxa [%g4 + %g5]ASI_MEM, %g1 370 stxa %g1, [%g7 + %g5]ASI_MEM ! byte 56 - 63 371 372 set CPU_RQ_SIZE, %g5 ! %g5 = queue size 373 sub %g5, 1, %g5 ! %g5 = queu size mask 374 375 add %g6, Q_ENTRY_SIZE, %g6 ! increment q head to next 376 and %g6, %g5, %g6 ! size mask for warp around 377 cmp %g6, %g3 ! head == tail ?? 378 379 bne,pn %xcc, 2b ! still have more to process 380 nop 381 382 /* 383 * head equals to tail now, we can update the queue head 384 * and call sys_trap 385 */ 386 mov CPU_RQ_HD, %g4 387 stxa %g6, [%g4]ASI_QUEUE ! update head offset 388 389 /* 390 * Call sys_trap at PIL 14 unless we're already at PIL 15. %g2.l is 391 * head offset(arg2) and %g3 is tail 392 * offset(arg3). 393 */ 394 set process_resumable_error, %g1 395 rdpr %pil, %g4 396 cmp %g4, PIL_14 397 ba sys_trap 398 movl %icc, PIL_14, %g4 399 400 /* 401 * We are here because the C routine is not able to process 402 * errors in time. So the first 8 bytes of ER in buf has not 403 * been cleared. We update head to tail and call sys_trap to 404 * print out an error message 405 */ 406 4071: mov CPU_RQ_HD, %g4 408 stxa %g3, [%g4]ASI_QUEUE ! set head equal to tail 409 410 /* 411 * Set %g2 to %g6, which is current head offset. %g2 412 * is arg2 of the C routine. %g3 is the tail offset, 413 * which is arg3 of the C routine. 414 * Call rq_overflow at PIL 14 unless we're already at PIL 15. 415 */ 416 mov %g6, %g2 417 set rq_overflow, %g1 418 rdpr %pil, %g4 419 cmp %g4, PIL_14 420 ba sys_trap 421 movl %icc, PIL_14, %g4 422 4230: retry 424 425 /*NOTREACHED*/ 426 SET_SIZE(resumable_error) 427#endif /* lint */ 428 429#if defined(lint) 430 431void 432nonresumable_error(void) 433{} 434 435#else /* lint */ 436 437/* 438 * (TT 0x7f, TL>0) Non-resumeable Error Queue Handler 439 * We keep a shadow copy of the queue in kernel buf. 440 * Read non-resumable queue head and tail offset 441 * If there are entries on the queue, move them to 442 * the kernel buf, which is next to the non-resumable 443 * queue in the memory. Call C routine to process. 444 */ 445 ENTRY_NP(nonresumable_error) 446 mov CPU_NRQ_HD, %g4 447 ldxa [%g4]ASI_QUEUE, %g2 ! %g2 = Q head offset 448 mov CPU_NRQ_TL, %g4 449 ldxa [%g4]ASI_QUEUE, %g3 ! %g3 = Q tail offset 450 mov %g2, %g6 ! save head in %g2 451 452 cmp %g6, %g3 453 be,pn %xcc, 0f ! head == tail 454 nop 455 456 CPU_ADDR(%g1, %g4) ! %g1 = cpu struct addr 457 4582: set CPU_NRQ_BASE_OFF, %g4 459 ldx [%g1 + %g4], %g4 ! %g4 = queue base PA 460 add %g6, %g4, %g4 ! %g4 = PA of ER in Q 461 set CPU_NRQ_SIZE, %g7 462 add %g4, %g7, %g7 ! %g7 = PA of ER in kernel buf 463 464 ldxa [%g7]ASI_MEM, %g5 ! %g5 = first 8 byte of ER buf 465 cmp 0, %g5 466 bne,pn %xcc, 1f ! first 8 byte is not 0 467 nop 468 469 /* Now we can move 64 bytes from queue to buf */ 470 set 0, %g5 471 ldxa [%g4 + %g5]ASI_MEM, %g1 472 stxa %g1, [%g7 + %g5]ASI_MEM ! byte 0 - 7 473 add %g5, 8, %g5 474 ldxa [%g4 + %g5]ASI_MEM, %g1 475 stxa %g1, [%g7 + %g5]ASI_MEM ! byte 8 - 15 476 add %g5, 8, %g5 477 ldxa [%g4 + %g5]ASI_MEM, %g1 478 stxa %g1, [%g7 + %g5]ASI_MEM ! byte 16 - 23 479 add %g5, 8, %g5 480 ldxa [%g4 + %g5]ASI_MEM, %g1 481 stxa %g1, [%g7 + %g5]ASI_MEM ! byte 24 - 31 482 add %g5, 8, %g5 483 ldxa [%g4 + %g5]ASI_MEM, %g1 484 stxa %g1, [%g7 + %g5]ASI_MEM ! byte 32 - 39 485 add %g5, 8, %g5 486 ldxa [%g4 + %g5]ASI_MEM, %g1 487 stxa %g1, [%g7 + %g5]ASI_MEM ! byte 40 - 47 488 add %g5, 8, %g5 489 ldxa [%g4 + %g5]ASI_MEM, %g1 490 stxa %g1, [%g7 + %g5]ASI_MEM ! byte 48 - 55 491 add %g5, 8, %g5 492 ldxa [%g4 + %g5]ASI_MEM, %g1 493 stxa %g1, [%g7 + %g5]ASI_MEM ! byte 56 - 63 494 495 set CPU_NRQ_SIZE, %g5 ! %g5 = queue size 496 sub %g5, 1, %g5 ! %g5 = queu size mask 497 498 add %g6, Q_ENTRY_SIZE, %g6 ! increment q head to next 499 and %g6, %g5, %g6 ! size mask for warp around 500 cmp %g6, %g3 ! head == tail ?? 501 502 bne,pn %xcc, 2b ! still have more to process 503 nop 504 505 /* 506 * head equals to tail now, we can update the queue head 507 * and call sys_trap 508 */ 509 mov CPU_NRQ_HD, %g4 510 stxa %g6, [%g4]ASI_QUEUE ! update head offset 511 512 /* 513 * Call sys_trap. %g2 is TL(arg2), %g3 is head and tail 514 * offset(arg3). 515 * %g3 looks like following: 516 * +--------------------+--------------------+ 517 * | tail offset | head offset | 518 * +--------------------+--------------------+ 519 * 63 32 31 0 520 * 521 * Run at PIL 14 unless we're already at PIL 15. 522 */ 523 sllx %g3, 32, %g3 ! %g3.h = tail offset 524 or %g3, %g2, %g3 ! %g3.l = head offset 525 rdpr %tl, %g2 ! %g2 = current tl 526 sub %g2, 1, %g2 ! %g2 = previous tl, arg2 527 528 set process_nonresumable_error, %g1 529 rdpr %pil, %g4 530 cmp %g4, PIL_14 531 ba sys_trap 532 movl %icc, PIL_14, %g4 533 534 /* 535 * We are here because the C routine is not able to process 536 * errors in time. So the first 8 bytes of ER in buf has not 537 * been cleared. We update head to tail and call sys_trap to 538 * print out an error message 539 */ 540 5411: mov CPU_NRQ_HD, %g4 542 stxa %g3, [%g4]ASI_QUEUE ! set head equal to tail 543 5440: retry 545 546 /*NOTREACHED*/ 547 SET_SIZE(nonresumable_error) 548#endif /* lint */ 549