1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Driver to retire/unretire L2/L3 cachelines on panther 30 */ 31 #include <sys/types.h> 32 #include <sys/types32.h> 33 #include <sys/time.h> 34 #include <sys/errno.h> 35 #include <sys/cmn_err.h> 36 #include <sys/param.h> 37 #include <sys/modctl.h> 38 #include <sys/conf.h> 39 #include <sys/open.h> 40 #include <sys/stat.h> 41 #include <sys/ddi.h> 42 #include <sys/sunddi.h> 43 #include <sys/file.h> 44 #include <sys/cpuvar.h> 45 #include <sys/x_call.h> 46 #include <sys/cheetahregs.h> 47 #include <sys/mem_cache.h> 48 #include <sys/mem_cache_ioctl.h> 49 50 extern int retire_l2(uint64_t, uint64_t); 51 extern int retire_l2_alternate(uint64_t, uint64_t); 52 extern int unretire_l2(uint64_t, uint64_t); 53 extern int unretire_l2_alternate(uint64_t, uint64_t); 54 extern int retire_l3(uint64_t, uint64_t); 55 extern int retire_l3_alternate(uint64_t, uint64_t); 56 extern int unretire_l3(uint64_t, uint64_t); 57 extern int unretire_l3_alternate(uint64_t, uint64_t); 58 59 extern void retire_l2_start(uint64_t, uint64_t); 60 extern void retire_l2_end(uint64_t, uint64_t); 61 extern void unretire_l2_start(uint64_t, uint64_t); 62 extern void unretire_l2_end(uint64_t, uint64_t); 63 extern void retire_l3_start(uint64_t, uint64_t); 64 extern void retire_l3_end(uint64_t, uint64_t); 65 extern void unretire_l3_start(uint64_t, uint64_t); 66 extern void unretire_l3_end(uint64_t, uint64_t); 67 68 extern void get_ecache_dtags_tl1(uint64_t, ch_cpu_logout_t *); 69 extern void get_l2_tag_tl1(uint64_t, uint64_t); 70 extern void get_l3_tag_tl1(uint64_t, uint64_t); 71 72 73 /* Macro for putting 64-bit onto stack as two 32-bit ints */ 74 #define PRTF_64_TO_32(x) (uint32_t)((x)>>32), (uint32_t)(x) 75 76 77 uint_t l2_flush_retries_done = 0; 78 int mem_cache_debug = 0x0; 79 uint64_t pattern = 0; 80 uint32_t retire_failures = 0; 81 uint32_t last_error_injected_way = 0; 82 uint8_t last_error_injected_bit = 0; 83 uint32_t last_l3tag_error_injected_way = 0; 84 uint8_t last_l3tag_error_injected_bit = 0; 85 uint32_t last_l2tag_error_injected_way = 0; 86 uint8_t last_l2tag_error_injected_bit = 0; 87 uint32_t last_l3data_error_injected_way = 0; 88 uint8_t last_l3data_error_injected_bit = 0; 89 uint32_t last_l2data_error_injected_way = 0; 90 uint8_t last_l2data_error_injected_bit = 0; 91 92 /* dev_ops and cb_ops entry point function declarations */ 93 static int mem_cache_attach(dev_info_t *, ddi_attach_cmd_t); 94 static int mem_cache_detach(dev_info_t *, ddi_detach_cmd_t); 95 static int mem_cache_getinfo(dev_info_t *, ddi_info_cmd_t, void *, 96 void **); 97 static int mem_cache_open(dev_t *, int, int, cred_t *); 98 static int mem_cache_close(dev_t, int, int, cred_t *); 99 static int mem_cache_ioctl_ops(int, int, cache_info_t *); 100 static int mem_cache_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 101 102 struct cb_ops mem_cache_cb_ops = { 103 mem_cache_open, 104 mem_cache_close, 105 nodev, 106 nodev, 107 nodev, /* dump */ 108 nodev, 109 nodev, 110 mem_cache_ioctl, 111 nodev, /* devmap */ 112 nodev, 113 ddi_segmap, /* segmap */ 114 nochpoll, 115 ddi_prop_op, 116 NULL, /* for STREAMS drivers */ 117 D_NEW | D_MP /* driver compatibility flag */ 118 }; 119 120 static struct dev_ops mem_cache_dev_ops = { 121 DEVO_REV, /* driver build version */ 122 0, /* device reference count */ 123 mem_cache_getinfo, 124 nulldev, 125 nulldev, /* probe */ 126 mem_cache_attach, 127 mem_cache_detach, 128 nulldev, /* reset */ 129 &mem_cache_cb_ops, 130 (struct bus_ops *)NULL, 131 nulldev /* power */ 132 }; 133 134 /* 135 * Soft state 136 */ 137 struct mem_cache_softc { 138 dev_info_t *dip; 139 kmutex_t mutex; 140 }; 141 #define getsoftc(inst) ((struct mem_cache_softc *)ddi_get_soft_state(statep,\ 142 (inst))) 143 144 /* module configuration stuff */ 145 static void *statep; 146 extern struct mod_ops mod_driverops; 147 148 static struct modldrv modldrv = { 149 &mod_driverops, 150 "mem_cache_driver (08/01/30) ", 151 &mem_cache_dev_ops 152 }; 153 154 static struct modlinkage modlinkage = { 155 MODREV_1, 156 &modldrv, 157 0 158 }; 159 160 int 161 _init(void) 162 { 163 int e; 164 165 if (e = ddi_soft_state_init(&statep, sizeof (struct mem_cache_softc), 166 MAX_MEM_CACHE_INSTANCES)) { 167 return (e); 168 } 169 170 if ((e = mod_install(&modlinkage)) != 0) 171 ddi_soft_state_fini(&statep); 172 173 return (e); 174 } 175 176 int 177 _fini(void) 178 { 179 int e; 180 181 if ((e = mod_remove(&modlinkage)) != 0) 182 return (e); 183 184 ddi_soft_state_fini(&statep); 185 186 return (DDI_SUCCESS); 187 } 188 189 int 190 _info(struct modinfo *modinfop) 191 { 192 return (mod_info(&modlinkage, modinfop)); 193 } 194 195 /*ARGSUSED*/ 196 static int 197 mem_cache_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 198 { 199 int inst; 200 int retval = DDI_SUCCESS; 201 struct mem_cache_softc *softc; 202 203 inst = getminor((dev_t)arg); 204 205 switch (cmd) { 206 case DDI_INFO_DEVT2DEVINFO: 207 if ((softc = getsoftc(inst)) == NULL) { 208 *result = (void *)NULL; 209 retval = DDI_FAILURE; 210 } else 211 *result = (void *)softc->dip; 212 break; 213 214 case DDI_INFO_DEVT2INSTANCE: 215 *result = (void *)((uintptr_t)inst); 216 break; 217 218 default: 219 retval = DDI_FAILURE; 220 } 221 222 return (retval); 223 } 224 225 static int 226 mem_cache_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 227 { 228 int inst; 229 struct mem_cache_softc *softc = NULL; 230 char name[80]; 231 232 switch (cmd) { 233 case DDI_ATTACH: 234 inst = ddi_get_instance(dip); 235 if (inst >= MAX_MEM_CACHE_INSTANCES) { 236 cmn_err(CE_WARN, "attach failed, too many instances\n"); 237 return (DDI_FAILURE); 238 } 239 (void) sprintf(name, MEM_CACHE_DRIVER_NAME"%d", inst); 240 if (ddi_create_priv_minor_node(dip, name, 241 S_IFCHR, 242 inst, 243 DDI_PSEUDO, 244 0, NULL, "all", 0640) == 245 DDI_FAILURE) { 246 ddi_remove_minor_node(dip, NULL); 247 return (DDI_FAILURE); 248 } 249 250 /* Allocate a soft state structure for this instance */ 251 if (ddi_soft_state_zalloc(statep, inst) != DDI_SUCCESS) { 252 cmn_err(CE_WARN, " ddi_soft_state_zalloc() failed " 253 "for inst %d\n", inst); 254 goto attach_failed; 255 } 256 257 /* Setup soft state */ 258 softc = getsoftc(inst); 259 softc->dip = dip; 260 mutex_init(&softc->mutex, NULL, MUTEX_DRIVER, NULL); 261 262 /* Create main environmental node */ 263 ddi_report_dev(dip); 264 265 return (DDI_SUCCESS); 266 267 case DDI_RESUME: 268 return (DDI_SUCCESS); 269 270 default: 271 return (DDI_FAILURE); 272 } 273 274 attach_failed: 275 276 /* Free soft state, if allocated. remove minor node if added earlier */ 277 if (softc) 278 ddi_soft_state_free(statep, inst); 279 280 ddi_remove_minor_node(dip, NULL); 281 282 return (DDI_FAILURE); 283 } 284 285 static int 286 mem_cache_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 287 { 288 int inst; 289 struct mem_cache_softc *softc; 290 291 switch (cmd) { 292 case DDI_DETACH: 293 inst = ddi_get_instance(dip); 294 if ((softc = getsoftc(inst)) == NULL) 295 return (ENXIO); 296 297 /* Free the soft state and remove minor node added earlier */ 298 mutex_destroy(&softc->mutex); 299 ddi_soft_state_free(statep, inst); 300 ddi_remove_minor_node(dip, NULL); 301 return (DDI_SUCCESS); 302 303 case DDI_SUSPEND: 304 return (DDI_SUCCESS); 305 306 default: 307 return (DDI_FAILURE); 308 } 309 } 310 311 /*ARGSUSED*/ 312 static int 313 mem_cache_open(dev_t *devp, int flag, int otyp, cred_t *credp) 314 { 315 int inst = getminor(*devp); 316 317 return (getsoftc(inst) == NULL ? ENXIO : 0); 318 } 319 320 /*ARGSUSED*/ 321 static int 322 mem_cache_close(dev_t dev, int flag, int otyp, cred_t *credp) 323 { 324 int inst = getminor(dev); 325 326 return (getsoftc(inst) == NULL ? ENXIO : 0); 327 } 328 329 static char *tstate_to_desc[] = { 330 "Invalid", /* 0 */ 331 "Shared", /* 1 */ 332 "Exclusive", /* 2 */ 333 "Owner", /* 3 */ 334 "Modified", /* 4 */ 335 "NA", /* 5 */ 336 "Owner/Shared", /* 6 */ 337 "Reserved(7)", /* 7 */ 338 }; 339 340 static char * 341 tag_state_to_desc(uint8_t tagstate) 342 { 343 return (tstate_to_desc[tagstate & CH_ECSTATE_MASK]); 344 } 345 346 void 347 print_l2_tag(uint64_t tag_addr, uint64_t l2_tag) 348 { 349 uint64_t l2_subaddr; 350 uint8_t l2_state; 351 352 l2_subaddr = PN_L2TAG_TO_PA(l2_tag); 353 l2_subaddr |= (tag_addr & PN_L2_INDEX_MASK); 354 355 l2_state = (l2_tag & CH_ECSTATE_MASK); 356 cmn_err(CE_CONT, 357 "PA=0x%08x.%08x E$tag 0x%08x.%08x E$state %s\n", 358 PRTF_64_TO_32(l2_subaddr), 359 PRTF_64_TO_32(l2_tag), 360 tag_state_to_desc(l2_state)); 361 } 362 363 void 364 print_l2cache_line(ch_cpu_logout_t *clop) 365 { 366 uint64_t l2_subaddr; 367 int i, offset; 368 uint8_t way, l2_state; 369 ch_ec_data_t *ecp; 370 371 372 for (way = 0; way < PN_CACHE_NWAYS; way++) { 373 ecp = &clop->clo_data.chd_l2_data[way]; 374 l2_subaddr = PN_L2TAG_TO_PA(ecp->ec_tag); 375 l2_subaddr |= (ecp->ec_idx & PN_L2_INDEX_MASK); 376 377 l2_state = (ecp->ec_tag & CH_ECSTATE_MASK); 378 cmn_err(CE_CONT, 379 "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n" 380 "E$tag 0x%08x.%08x E$state %s", 381 way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(l2_subaddr), 382 PRTF_64_TO_32(ecp->ec_tag), 383 tag_state_to_desc(l2_state)); 384 /* 385 * Dump out Ecache subblock data captured. 386 * For Cheetah, we need to compute the ECC for each 16-byte 387 * chunk and compare it with the captured chunk ECC to figure 388 * out which chunk is bad. 389 */ 390 for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) { 391 ec_data_elm_t *ecdptr; 392 uint64_t d_low, d_high; 393 uint32_t ecc; 394 int l2_data_idx = (i/2); 395 396 offset = i * 16; 397 ecdptr = &clop->clo_data.chd_l2_data[way].ec_data 398 [l2_data_idx]; 399 if ((i & 1) == 0) { 400 ecc = (ecdptr->ec_eccd >> 9) & 0x1ff; 401 d_high = ecdptr->ec_d8[0]; 402 d_low = ecdptr->ec_d8[1]; 403 } else { 404 ecc = ecdptr->ec_eccd & 0x1ff; 405 d_high = ecdptr->ec_d8[2]; 406 d_low = ecdptr->ec_d8[3]; 407 } 408 409 cmn_err(CE_CONT, 410 "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x" 411 " ECC 0x%03x", 412 offset, PRTF_64_TO_32(d_high), 413 PRTF_64_TO_32(d_low), ecc); 414 } 415 } /* end of for way loop */ 416 } 417 418 void 419 print_ecache_line(ch_cpu_logout_t *clop) 420 { 421 uint64_t ec_subaddr; 422 int i, offset; 423 uint8_t way, ec_state; 424 ch_ec_data_t *ecp; 425 426 427 for (way = 0; way < PN_CACHE_NWAYS; way++) { 428 ecp = &clop->clo_data.chd_ec_data[way]; 429 ec_subaddr = PN_L3TAG_TO_PA(ecp->ec_tag); 430 ec_subaddr |= (ecp->ec_idx & PN_L3_TAG_RD_MASK); 431 432 ec_state = (ecp->ec_tag & CH_ECSTATE_MASK); 433 cmn_err(CE_CONT, 434 "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n" 435 "E$tag 0x%08x.%08x E$state %s", 436 way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(ec_subaddr), 437 PRTF_64_TO_32(ecp->ec_tag), 438 tag_state_to_desc(ec_state)); 439 /* 440 * Dump out Ecache subblock data captured. 441 * For Cheetah, we need to compute the ECC for each 16-byte 442 * chunk and compare it with the captured chunk ECC to figure 443 * out which chunk is bad. 444 */ 445 for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) { 446 ec_data_elm_t *ecdptr; 447 uint64_t d_low, d_high; 448 uint32_t ecc; 449 int ec_data_idx = (i/2); 450 451 offset = i * 16; 452 ecdptr = 453 &clop->clo_data.chd_ec_data[way].ec_data 454 [ec_data_idx]; 455 if ((i & 1) == 0) { 456 ecc = (ecdptr->ec_eccd >> 9) & 0x1ff; 457 d_high = ecdptr->ec_d8[0]; 458 d_low = ecdptr->ec_d8[1]; 459 } else { 460 ecc = ecdptr->ec_eccd & 0x1ff; 461 d_high = ecdptr->ec_d8[2]; 462 d_low = ecdptr->ec_d8[3]; 463 } 464 465 cmn_err(CE_CONT, 466 "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x" 467 " ECC 0x%03x", 468 offset, PRTF_64_TO_32(d_high), 469 PRTF_64_TO_32(d_low), ecc); 470 } 471 } 472 } 473 474 static boolean_t 475 tag_addr_collides(uint64_t tag_addr, cache_id_t type, 476 retire_func_t start_of_func, retire_func_t end_of_func) 477 { 478 uint64_t start_paddr, end_paddr; 479 char *type_str; 480 481 start_paddr = va_to_pa((void *)start_of_func); 482 end_paddr = va_to_pa((void *)end_of_func); 483 switch (type) { 484 case L2_CACHE_TAG: 485 case L2_CACHE_DATA: 486 tag_addr &= PN_L2_INDEX_MASK; 487 start_paddr &= PN_L2_INDEX_MASK; 488 end_paddr &= PN_L2_INDEX_MASK; 489 type_str = "L2:"; 490 break; 491 case L3_CACHE_TAG: 492 case L3_CACHE_DATA: 493 tag_addr &= PN_L3_TAG_RD_MASK; 494 start_paddr &= PN_L3_TAG_RD_MASK; 495 end_paddr &= PN_L3_TAG_RD_MASK; 496 type_str = "L3:"; 497 break; 498 default: 499 /* 500 * Should never reach here. 501 */ 502 ASSERT(0); 503 return (B_FALSE); 504 } 505 if ((tag_addr > (start_paddr - 0x100)) && 506 (tag_addr < (end_paddr + 0x100))) { 507 if (mem_cache_debug & 0x1) 508 cmn_err(CE_CONT, 509 "%s collision detected tag_addr = 0x%08x" 510 " start_paddr = 0x%08x end_paddr = 0x%08x\n", 511 type_str, (uint32_t)tag_addr, (uint32_t)start_paddr, 512 (uint32_t)end_paddr); 513 return (B_TRUE); 514 } 515 else 516 return (B_FALSE); 517 } 518 519 static uint64_t 520 get_tag_addr(cache_info_t *cache_info) 521 { 522 uint64_t tag_addr, scratch; 523 524 switch (cache_info->cache) { 525 case L2_CACHE_TAG: 526 case L2_CACHE_DATA: 527 tag_addr = (uint64_t)(cache_info->index << 528 PN_CACHE_LINE_SHIFT); 529 scratch = (uint64_t)(cache_info->way << 530 PN_L2_WAY_SHIFT); 531 tag_addr |= scratch; 532 tag_addr |= PN_L2_IDX_HW_ECC_EN; 533 break; 534 case L3_CACHE_TAG: 535 case L3_CACHE_DATA: 536 tag_addr = (uint64_t)(cache_info->index << 537 PN_CACHE_LINE_SHIFT); 538 scratch = (uint64_t)(cache_info->way << 539 PN_L3_WAY_SHIFT); 540 tag_addr |= scratch; 541 tag_addr |= PN_L3_IDX_HW_ECC_EN; 542 break; 543 default: 544 /* 545 * Should never reach here. 546 */ 547 ASSERT(0); 548 return (uint64_t)(0); 549 } 550 return (tag_addr); 551 } 552 553 static int 554 mem_cache_ioctl_ops(int cmd, int mode, cache_info_t *cache_info) 555 { 556 int ret_val = 0; 557 uint64_t afar, tag_addr; 558 ch_cpu_logout_t clop; 559 uint64_t Lxcache_tag_data[PN_CACHE_NWAYS]; 560 int i, retire_retry_count; 561 cpu_t *cpu; 562 uint64_t tag_data; 563 uint8_t state; 564 565 switch (cache_info->cache) { 566 case L2_CACHE_TAG: 567 case L2_CACHE_DATA: 568 if (cache_info->way >= PN_CACHE_NWAYS) 569 return (EINVAL); 570 if (cache_info->index >= 571 (PN_L2_SET_SIZE/PN_L2_LINESIZE)) 572 return (EINVAL); 573 break; 574 case L3_CACHE_TAG: 575 case L3_CACHE_DATA: 576 if (cache_info->way >= PN_CACHE_NWAYS) 577 return (EINVAL); 578 if (cache_info->index >= 579 (PN_L3_SET_SIZE/PN_L3_LINESIZE)) 580 return (EINVAL); 581 break; 582 default: 583 return (ENOTSUP); 584 } 585 /* 586 * Check if we have a valid cpu ID and that 587 * CPU is ONLINE. 588 */ 589 mutex_enter(&cpu_lock); 590 cpu = cpu_get(cache_info->cpu_id); 591 if ((cpu == NULL) || (!cpu_is_online(cpu))) { 592 mutex_exit(&cpu_lock); 593 return (EINVAL); 594 } 595 mutex_exit(&cpu_lock); 596 switch (cmd) { 597 case MEM_CACHE_RETIRE: 598 if ((cache_info->bit & MSB_BIT_MASK) == 599 MSB_BIT_MASK) { 600 pattern = ((uint64_t)1 << 601 (cache_info->bit & TAG_BIT_MASK)); 602 } else { 603 pattern = 0; 604 } 605 tag_addr = get_tag_addr(cache_info); 606 pattern |= PN_ECSTATE_NA; 607 retire_retry_count = 0; 608 affinity_set(cache_info->cpu_id); 609 switch (cache_info->cache) { 610 case L2_CACHE_DATA: 611 case L2_CACHE_TAG: 612 retry_l2_retire: 613 if (tag_addr_collides(tag_addr, 614 cache_info->cache, 615 retire_l2_start, retire_l2_end)) 616 ret_val = 617 retire_l2_alternate( 618 tag_addr, pattern); 619 else 620 ret_val = retire_l2(tag_addr, 621 pattern); 622 if (ret_val == 1) { 623 /* 624 * cacheline was in retired 625 * STATE already. 626 * so return success. 627 */ 628 ret_val = 0; 629 } 630 if (ret_val < 0) { 631 cmn_err(CE_WARN, 632 "retire_l2() failed. index = 0x%x way %d. Retrying...\n", 633 cache_info->index, 634 cache_info->way); 635 if (retire_retry_count >= 2) { 636 retire_failures++; 637 affinity_clear(); 638 return (EIO); 639 } 640 retire_retry_count++; 641 goto retry_l2_retire; 642 } 643 if (ret_val == 2) 644 l2_flush_retries_done++; 645 /* 646 * We bind ourself to a CPU and send cross trap to 647 * ourself. On return from xt_one we can rely on the 648 * data in tag_data being filled in. Normally one would 649 * do a xt_sync to make sure that the CPU has completed 650 * the cross trap call xt_one. 651 */ 652 xt_one(cache_info->cpu_id, 653 (xcfunc_t *)(get_l2_tag_tl1), 654 tag_addr, (uint64_t)(&tag_data)); 655 state = tag_data & CH_ECSTATE_MASK; 656 if (state != PN_ECSTATE_NA) { 657 retire_failures++; 658 print_l2_tag(tag_addr, 659 tag_data); 660 cmn_err(CE_WARN, 661 "L2 RETIRE:failed for index 0x%x way %d. Retrying...\n", 662 cache_info->index, 663 cache_info->way); 664 if (retire_retry_count >= 2) { 665 retire_failures++; 666 affinity_clear(); 667 return (EIO); 668 } 669 retire_retry_count++; 670 goto retry_l2_retire; 671 } 672 break; 673 case L3_CACHE_TAG: 674 case L3_CACHE_DATA: 675 if (tag_addr_collides(tag_addr, 676 cache_info->cache, 677 retire_l3_start, retire_l3_end)) 678 ret_val = 679 retire_l3_alternate( 680 tag_addr, pattern); 681 else 682 ret_val = retire_l3(tag_addr, 683 pattern); 684 if (ret_val == 1) { 685 /* 686 * cacheline was in retired 687 * STATE already. 688 * so return success. 689 */ 690 ret_val = 0; 691 } 692 if (ret_val < 0) { 693 cmn_err(CE_WARN, 694 "retire_l3() failed. ret_val = %d index = 0x%x\n", 695 ret_val, 696 cache_info->index); 697 retire_failures++; 698 affinity_clear(); 699 return (EIO); 700 } 701 /* 702 * We bind ourself to a CPU and send cross trap to 703 * ourself. On return from xt_one we can rely on the 704 * data in tag_data being filled in. Normally one would 705 * do a xt_sync to make sure that the CPU has completed 706 * the cross trap call xt_one. 707 */ 708 xt_one(cache_info->cpu_id, 709 (xcfunc_t *)(get_l3_tag_tl1), 710 tag_addr, (uint64_t)(&tag_data)); 711 state = tag_data & CH_ECSTATE_MASK; 712 if (state != PN_ECSTATE_NA) { 713 cmn_err(CE_WARN, 714 "L3 RETIRE failed for index 0x%x\n", 715 cache_info->index); 716 retire_failures++; 717 affinity_clear(); 718 return (EIO); 719 } 720 721 break; 722 } 723 affinity_clear(); 724 break; 725 case MEM_CACHE_UNRETIRE: 726 tag_addr = get_tag_addr(cache_info); 727 pattern = PN_ECSTATE_INV; 728 affinity_set(cache_info->cpu_id); 729 switch (cache_info->cache) { 730 case L2_CACHE_DATA: 731 case L2_CACHE_TAG: 732 /* 733 * Check if the index/way is in NA state 734 */ 735 /* 736 * We bind ourself to a CPU and send cross trap to 737 * ourself. On return from xt_one we can rely on the 738 * data in tag_data being filled in. Normally one would 739 * do a xt_sync to make sure that the CPU has completed 740 * the cross trap call xt_one. 741 */ 742 xt_one(cache_info->cpu_id, 743 (xcfunc_t *)(get_l2_tag_tl1), 744 tag_addr, (uint64_t)(&tag_data)); 745 state = tag_data & CH_ECSTATE_MASK; 746 if (state != PN_ECSTATE_NA) { 747 affinity_clear(); 748 return (EINVAL); 749 } 750 if (tag_addr_collides(tag_addr, 751 cache_info->cache, 752 unretire_l2_start, unretire_l2_end)) 753 ret_val = 754 unretire_l2_alternate( 755 tag_addr, pattern); 756 else 757 ret_val = 758 unretire_l2(tag_addr, 759 pattern); 760 if (ret_val != 0) { 761 cmn_err(CE_WARN, 762 "unretire_l2() failed. ret_val = %d index = 0x%x\n", 763 ret_val, 764 cache_info->index); 765 retire_failures++; 766 affinity_clear(); 767 return (EIO); 768 } 769 break; 770 case L3_CACHE_TAG: 771 case L3_CACHE_DATA: 772 /* 773 * Check if the index/way is in NA state 774 */ 775 /* 776 * We bind ourself to a CPU and send cross trap to 777 * ourself. On return from xt_one we can rely on the 778 * data in tag_data being filled in. Normally one would 779 * do a xt_sync to make sure that the CPU has completed 780 * the cross trap call xt_one. 781 */ 782 xt_one(cache_info->cpu_id, 783 (xcfunc_t *)(get_l3_tag_tl1), 784 tag_addr, (uint64_t)(&tag_data)); 785 state = tag_data & CH_ECSTATE_MASK; 786 if (state != PN_ECSTATE_NA) { 787 affinity_clear(); 788 return (EINVAL); 789 } 790 if (tag_addr_collides(tag_addr, 791 cache_info->cache, 792 unretire_l3_start, unretire_l3_end)) 793 ret_val = 794 unretire_l3_alternate( 795 tag_addr, pattern); 796 else 797 ret_val = 798 unretire_l3(tag_addr, 799 pattern); 800 if (ret_val != 0) { 801 cmn_err(CE_WARN, 802 "unretire_l3() failed. ret_val = %d index = 0x%x\n", 803 ret_val, 804 cache_info->index); 805 affinity_clear(); 806 return (EIO); 807 } 808 break; 809 } 810 affinity_clear(); 811 break; 812 case MEM_CACHE_ISRETIRED: 813 case MEM_CACHE_STATE: 814 return (ENOTSUP); 815 case MEM_CACHE_READ_TAGS: 816 #ifdef DEBUG 817 case MEM_CACHE_READ_ERROR_INJECTED_TAGS: 818 #endif 819 /* 820 * Read tag and data for all the ways at a given afar 821 */ 822 afar = (uint64_t)(cache_info->index 823 << PN_CACHE_LINE_SHIFT); 824 affinity_set(cache_info->cpu_id); 825 /* 826 * We bind ourself to a CPU and send cross trap to 827 * ourself. On return from xt_one we can rely on the 828 * data in clop being filled in. Normally one would 829 * do a xt_sync to make sure that the CPU has completed 830 * the cross trap call xt_one. 831 */ 832 xt_one(cache_info->cpu_id, 833 (xcfunc_t *)(get_ecache_dtags_tl1), 834 afar, (uint64_t)(&clop)); 835 switch (cache_info->cache) { 836 case L2_CACHE_TAG: 837 for (i = 0; i < PN_CACHE_NWAYS; i++) { 838 Lxcache_tag_data[i] = 839 clop.clo_data.chd_l2_data 840 [i].ec_tag; 841 } 842 last_error_injected_bit = 843 last_l2tag_error_injected_bit; 844 last_error_injected_way = 845 last_l2tag_error_injected_way; 846 break; 847 case L3_CACHE_TAG: 848 for (i = 0; i < PN_CACHE_NWAYS; i++) { 849 Lxcache_tag_data[i] = 850 clop.clo_data.chd_ec_data 851 [i].ec_tag; 852 } 853 last_error_injected_bit = 854 last_l3tag_error_injected_bit; 855 last_error_injected_way = 856 last_l3tag_error_injected_way; 857 break; 858 default: 859 affinity_clear(); 860 return (ENOTSUP); 861 } /* end if switch(cache) */ 862 #ifdef DEBUG 863 if (cmd == MEM_CACHE_READ_ERROR_INJECTED_TAGS) { 864 pattern = ((uint64_t)1 << 865 last_error_injected_bit); 866 /* 867 * If error bit is ECC we need to make sure 868 * ECC on all all WAYS are corrupted. 869 */ 870 if ((last_error_injected_bit >= 6) && 871 (last_error_injected_bit <= 14)) { 872 for (i = 0; i < PN_CACHE_NWAYS; i++) 873 Lxcache_tag_data[i] ^= 874 pattern; 875 } else 876 Lxcache_tag_data 877 [last_error_injected_way] ^= 878 pattern; 879 } 880 #endif 881 if (ddi_copyout((caddr_t)Lxcache_tag_data, 882 (caddr_t)cache_info->datap, 883 sizeof (Lxcache_tag_data), mode) 884 != DDI_SUCCESS) { 885 affinity_clear(); 886 return (EFAULT); 887 } 888 affinity_clear(); 889 break; /* end of READ_TAGS */ 890 default: 891 return (ENOTSUP); 892 } /* end if switch(cmd) */ 893 return (ret_val); 894 } 895 896 /*ARGSUSED*/ 897 static int 898 mem_cache_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 899 int *rvalp) 900 { 901 int inst; 902 struct mem_cache_softc *softc; 903 cache_info_t cache_info; 904 cache_info32_t cache_info32; 905 int ret_val; 906 int is_panther; 907 908 inst = getminor(dev); 909 if ((softc = getsoftc(inst)) == NULL) 910 return (ENXIO); 911 912 mutex_enter(&softc->mutex); 913 914 #ifdef _MULTI_DATAMODEL 915 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) { 916 if (ddi_copyin((cache_info32_t *)arg, &cache_info32, 917 sizeof (cache_info32), mode) != DDI_SUCCESS) { 918 mutex_exit(&softc->mutex); 919 return (EFAULT); 920 } 921 cache_info.cache = cache_info32.cache; 922 cache_info.index = cache_info32.index; 923 cache_info.way = cache_info32.way; 924 cache_info.cpu_id = cache_info32.cpu_id; 925 cache_info.bit = cache_info32.bit; 926 cache_info.datap = (void *)((uint64_t)cache_info32.datap); 927 } else 928 #endif 929 if (ddi_copyin((cache_info_t *)arg, &cache_info, 930 sizeof (cache_info), mode) != DDI_SUCCESS) { 931 mutex_exit(&softc->mutex); 932 return (EFAULT); 933 } 934 935 if ((cache_info.cpu_id < 0) || (cache_info.cpu_id >= NCPU)) { 936 mutex_exit(&softc->mutex); 937 return (EINVAL); 938 } 939 940 is_panther = IS_PANTHER(cpunodes[cache_info.cpu_id].implementation); 941 if (!is_panther) { 942 mutex_exit(&softc->mutex); 943 return (ENOTSUP); 944 } 945 switch (cmd) { 946 case MEM_CACHE_RETIRE: 947 case MEM_CACHE_UNRETIRE: 948 if ((mode & FWRITE) == 0) { 949 ret_val = EBADF; 950 break; 951 } 952 /*FALLTHROUGH*/ 953 case MEM_CACHE_ISRETIRED: 954 case MEM_CACHE_STATE: 955 case MEM_CACHE_READ_TAGS: 956 #ifdef DEBUG 957 case MEM_CACHE_READ_ERROR_INJECTED_TAGS: 958 #endif 959 ret_val = mem_cache_ioctl_ops(cmd, mode, &cache_info); 960 break; 961 default: 962 ret_val = ENOTSUP; 963 break; 964 } 965 mutex_exit(&softc->mutex); 966 return (ret_val); 967 } 968