17bebe46cSjc25722 /*
27bebe46cSjc25722 * CDDL HEADER START
37bebe46cSjc25722 *
47bebe46cSjc25722 * The contents of this file are subject to the terms of the
57bebe46cSjc25722 * Common Development and Distribution License (the "License").
67bebe46cSjc25722 * You may not use this file except in compliance with the License.
77bebe46cSjc25722 *
87bebe46cSjc25722 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97bebe46cSjc25722 * or http://www.opensolaris.org/os/licensing.
107bebe46cSjc25722 * See the License for the specific language governing permissions
117bebe46cSjc25722 * and limitations under the License.
127bebe46cSjc25722 *
137bebe46cSjc25722 * When distributing Covered Code, include this CDDL HEADER in each
147bebe46cSjc25722 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157bebe46cSjc25722 * If applicable, add the following below this CDDL HEADER, with the
167bebe46cSjc25722 * fields enclosed by brackets "[]" replaced with your own identifying
177bebe46cSjc25722 * information: Portions Copyright [yyyy] [name of copyright owner]
187bebe46cSjc25722 *
197bebe46cSjc25722 * CDDL HEADER END
207bebe46cSjc25722 */
217bebe46cSjc25722 /*
2234a79eb7SChristopher Baumbauer - Sun Microsystems - San Diego United States * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
237bebe46cSjc25722 * Use is subject to license terms.
247bebe46cSjc25722 */
257bebe46cSjc25722
267bebe46cSjc25722 /*
277bebe46cSjc25722 * Driver to retire/unretire L2/L3 cachelines on panther
287bebe46cSjc25722 */
297bebe46cSjc25722 #include <sys/types.h>
307bebe46cSjc25722 #include <sys/types32.h>
317bebe46cSjc25722 #include <sys/time.h>
327bebe46cSjc25722 #include <sys/errno.h>
337bebe46cSjc25722 #include <sys/cmn_err.h>
347bebe46cSjc25722 #include <sys/param.h>
357bebe46cSjc25722 #include <sys/modctl.h>
367bebe46cSjc25722 #include <sys/conf.h>
377bebe46cSjc25722 #include <sys/open.h>
387bebe46cSjc25722 #include <sys/stat.h>
397bebe46cSjc25722 #include <sys/ddi.h>
407bebe46cSjc25722 #include <sys/sunddi.h>
417bebe46cSjc25722 #include <sys/file.h>
427bebe46cSjc25722 #include <sys/cpuvar.h>
437bebe46cSjc25722 #include <sys/x_call.h>
447bebe46cSjc25722 #include <sys/cheetahregs.h>
457bebe46cSjc25722 #include <sys/mem_cache.h>
467bebe46cSjc25722 #include <sys/mem_cache_ioctl.h>
477bebe46cSjc25722
487bebe46cSjc25722 extern int retire_l2(uint64_t, uint64_t);
497bebe46cSjc25722 extern int retire_l2_alternate(uint64_t, uint64_t);
507bebe46cSjc25722 extern int unretire_l2(uint64_t, uint64_t);
517bebe46cSjc25722 extern int unretire_l2_alternate(uint64_t, uint64_t);
527bebe46cSjc25722 extern int retire_l3(uint64_t, uint64_t);
537bebe46cSjc25722 extern int retire_l3_alternate(uint64_t, uint64_t);
547bebe46cSjc25722 extern int unretire_l3(uint64_t, uint64_t);
557bebe46cSjc25722 extern int unretire_l3_alternate(uint64_t, uint64_t);
567bebe46cSjc25722
577bebe46cSjc25722 extern void retire_l2_start(uint64_t, uint64_t);
587bebe46cSjc25722 extern void retire_l2_end(uint64_t, uint64_t);
597bebe46cSjc25722 extern void unretire_l2_start(uint64_t, uint64_t);
607bebe46cSjc25722 extern void unretire_l2_end(uint64_t, uint64_t);
617bebe46cSjc25722 extern void retire_l3_start(uint64_t, uint64_t);
627bebe46cSjc25722 extern void retire_l3_end(uint64_t, uint64_t);
637bebe46cSjc25722 extern void unretire_l3_start(uint64_t, uint64_t);
647bebe46cSjc25722 extern void unretire_l3_end(uint64_t, uint64_t);
657bebe46cSjc25722
667bebe46cSjc25722 extern void get_ecache_dtags_tl1(uint64_t, ch_cpu_logout_t *);
67142c9f13Sbala extern void get_l2_tag_tl1(uint64_t, uint64_t);
68142c9f13Sbala extern void get_l3_tag_tl1(uint64_t, uint64_t);
69a62774dfSSinanallur Balasubramanian extern const int _ncpu;
707bebe46cSjc25722
717bebe46cSjc25722 /* Macro for putting 64-bit onto stack as two 32-bit ints */
727bebe46cSjc25722 #define PRTF_64_TO_32(x) (uint32_t)((x)>>32), (uint32_t)(x)
737bebe46cSjc25722
747bebe46cSjc25722
757bebe46cSjc25722 uint_t l2_flush_retries_done = 0;
767bebe46cSjc25722 int mem_cache_debug = 0x0;
777bebe46cSjc25722 uint64_t pattern = 0;
787bebe46cSjc25722 uint32_t retire_failures = 0;
79a62774dfSSinanallur Balasubramanian #ifdef DEBUG
80a62774dfSSinanallur Balasubramanian int inject_anonymous_tag_error = 0;
81a62774dfSSinanallur Balasubramanian int32_t last_error_injected_way = 0;
827bebe46cSjc25722 uint8_t last_error_injected_bit = 0;
8315cf376dSSinanallur Balasubramanian int32_t last_l3tag_error_injected_way;
8415cf376dSSinanallur Balasubramanian uint8_t last_l3tag_error_injected_bit;
8515cf376dSSinanallur Balasubramanian int32_t last_l2tag_error_injected_way;
8615cf376dSSinanallur Balasubramanian uint8_t last_l2tag_error_injected_bit;
87a62774dfSSinanallur Balasubramanian #endif
887bebe46cSjc25722
897bebe46cSjc25722 /* dev_ops and cb_ops entry point function declarations */
907bebe46cSjc25722 static int mem_cache_attach(dev_info_t *, ddi_attach_cmd_t);
917bebe46cSjc25722 static int mem_cache_detach(dev_info_t *, ddi_detach_cmd_t);
927bebe46cSjc25722 static int mem_cache_getinfo(dev_info_t *, ddi_info_cmd_t, void *,
937bebe46cSjc25722 void **);
947bebe46cSjc25722 static int mem_cache_open(dev_t *, int, int, cred_t *);
957bebe46cSjc25722 static int mem_cache_close(dev_t, int, int, cred_t *);
967bebe46cSjc25722 static int mem_cache_ioctl_ops(int, int, cache_info_t *);
977bebe46cSjc25722 static int mem_cache_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
987bebe46cSjc25722
997bebe46cSjc25722 struct cb_ops mem_cache_cb_ops = {
1007bebe46cSjc25722 mem_cache_open,
1017bebe46cSjc25722 mem_cache_close,
1027bebe46cSjc25722 nodev,
1037bebe46cSjc25722 nodev,
1047bebe46cSjc25722 nodev, /* dump */
1057bebe46cSjc25722 nodev,
1067bebe46cSjc25722 nodev,
1077bebe46cSjc25722 mem_cache_ioctl,
1087bebe46cSjc25722 nodev, /* devmap */
1097bebe46cSjc25722 nodev,
1107bebe46cSjc25722 ddi_segmap, /* segmap */
1117bebe46cSjc25722 nochpoll,
1127bebe46cSjc25722 ddi_prop_op,
1137bebe46cSjc25722 NULL, /* for STREAMS drivers */
1147bebe46cSjc25722 D_NEW | D_MP /* driver compatibility flag */
1157bebe46cSjc25722 };
1167bebe46cSjc25722
1177bebe46cSjc25722 static struct dev_ops mem_cache_dev_ops = {
1187bebe46cSjc25722 DEVO_REV, /* driver build version */
1197bebe46cSjc25722 0, /* device reference count */
1207bebe46cSjc25722 mem_cache_getinfo,
1217bebe46cSjc25722 nulldev,
1227bebe46cSjc25722 nulldev, /* probe */
1237bebe46cSjc25722 mem_cache_attach,
1247bebe46cSjc25722 mem_cache_detach,
1257bebe46cSjc25722 nulldev, /* reset */
1267bebe46cSjc25722 &mem_cache_cb_ops,
1277bebe46cSjc25722 (struct bus_ops *)NULL,
12819397407SSherry Moore nulldev, /* power */
12919397407SSherry Moore ddi_quiesce_not_needed, /* quiesce */
1307bebe46cSjc25722 };
1317bebe46cSjc25722
1327bebe46cSjc25722 /*
1337bebe46cSjc25722 * Soft state
1347bebe46cSjc25722 */
1357bebe46cSjc25722 struct mem_cache_softc {
1367bebe46cSjc25722 dev_info_t *dip;
1377bebe46cSjc25722 kmutex_t mutex;
1387bebe46cSjc25722 };
1397bebe46cSjc25722 #define getsoftc(inst) ((struct mem_cache_softc *)ddi_get_soft_state(statep,\
1407bebe46cSjc25722 (inst)))
1417bebe46cSjc25722
1427bebe46cSjc25722 /* module configuration stuff */
1437bebe46cSjc25722 static void *statep;
1447bebe46cSjc25722 extern struct mod_ops mod_driverops;
1457bebe46cSjc25722
1467bebe46cSjc25722 static struct modldrv modldrv = {
1477bebe46cSjc25722 &mod_driverops,
1487bebe46cSjc25722 "mem_cache_driver (08/01/30) ",
1497bebe46cSjc25722 &mem_cache_dev_ops
1507bebe46cSjc25722 };
1517bebe46cSjc25722
1527bebe46cSjc25722 static struct modlinkage modlinkage = {
1537bebe46cSjc25722 MODREV_1,
1547bebe46cSjc25722 &modldrv,
1557bebe46cSjc25722 0
1567bebe46cSjc25722 };
1577bebe46cSjc25722
15834a79eb7SChristopher Baumbauer - Sun Microsystems - San Diego United States extern const int _ncpu; /* Pull the kernel's global _ncpu definition */
15934a79eb7SChristopher Baumbauer - Sun Microsystems - San Diego United States
1607bebe46cSjc25722 int
_init(void)1617bebe46cSjc25722 _init(void)
1627bebe46cSjc25722 {
1637bebe46cSjc25722 int e;
1647bebe46cSjc25722
1657bebe46cSjc25722 if (e = ddi_soft_state_init(&statep, sizeof (struct mem_cache_softc),
1667bebe46cSjc25722 MAX_MEM_CACHE_INSTANCES)) {
1677bebe46cSjc25722 return (e);
1687bebe46cSjc25722 }
1697bebe46cSjc25722
1707bebe46cSjc25722 if ((e = mod_install(&modlinkage)) != 0)
1717bebe46cSjc25722 ddi_soft_state_fini(&statep);
1727bebe46cSjc25722
1737bebe46cSjc25722 return (e);
1747bebe46cSjc25722 }
1757bebe46cSjc25722
1767bebe46cSjc25722 int
_fini(void)1777bebe46cSjc25722 _fini(void)
1787bebe46cSjc25722 {
1797bebe46cSjc25722 int e;
1807bebe46cSjc25722
1817bebe46cSjc25722 if ((e = mod_remove(&modlinkage)) != 0)
1827bebe46cSjc25722 return (e);
1837bebe46cSjc25722
1847bebe46cSjc25722 ddi_soft_state_fini(&statep);
1857bebe46cSjc25722
1867bebe46cSjc25722 return (DDI_SUCCESS);
1877bebe46cSjc25722 }
1887bebe46cSjc25722
1897bebe46cSjc25722 int
_info(struct modinfo * modinfop)1907bebe46cSjc25722 _info(struct modinfo *modinfop)
1917bebe46cSjc25722 {
1927bebe46cSjc25722 return (mod_info(&modlinkage, modinfop));
1937bebe46cSjc25722 }
1947bebe46cSjc25722
1957bebe46cSjc25722 /*ARGSUSED*/
1967bebe46cSjc25722 static int
mem_cache_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)1977bebe46cSjc25722 mem_cache_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
1987bebe46cSjc25722 {
1997bebe46cSjc25722 int inst;
2007bebe46cSjc25722 int retval = DDI_SUCCESS;
2017bebe46cSjc25722 struct mem_cache_softc *softc;
2027bebe46cSjc25722
2037bebe46cSjc25722 inst = getminor((dev_t)arg);
2047bebe46cSjc25722
2057bebe46cSjc25722 switch (cmd) {
2067bebe46cSjc25722 case DDI_INFO_DEVT2DEVINFO:
2077bebe46cSjc25722 if ((softc = getsoftc(inst)) == NULL) {
2087bebe46cSjc25722 *result = (void *)NULL;
2097bebe46cSjc25722 retval = DDI_FAILURE;
2107bebe46cSjc25722 } else
2117bebe46cSjc25722 *result = (void *)softc->dip;
2127bebe46cSjc25722 break;
2137bebe46cSjc25722
2147bebe46cSjc25722 case DDI_INFO_DEVT2INSTANCE:
2157bebe46cSjc25722 *result = (void *)((uintptr_t)inst);
2167bebe46cSjc25722 break;
2177bebe46cSjc25722
2187bebe46cSjc25722 default:
2197bebe46cSjc25722 retval = DDI_FAILURE;
2207bebe46cSjc25722 }
2217bebe46cSjc25722
2227bebe46cSjc25722 return (retval);
2237bebe46cSjc25722 }
2247bebe46cSjc25722
2257bebe46cSjc25722 static int
mem_cache_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)2267bebe46cSjc25722 mem_cache_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2277bebe46cSjc25722 {
2287bebe46cSjc25722 int inst;
2297bebe46cSjc25722 struct mem_cache_softc *softc = NULL;
2307bebe46cSjc25722 char name[80];
2317bebe46cSjc25722
2327bebe46cSjc25722 switch (cmd) {
2337bebe46cSjc25722 case DDI_ATTACH:
2347bebe46cSjc25722 inst = ddi_get_instance(dip);
2357bebe46cSjc25722 if (inst >= MAX_MEM_CACHE_INSTANCES) {
2367bebe46cSjc25722 cmn_err(CE_WARN, "attach failed, too many instances\n");
2377bebe46cSjc25722 return (DDI_FAILURE);
2387bebe46cSjc25722 }
2397bebe46cSjc25722 (void) sprintf(name, MEM_CACHE_DRIVER_NAME"%d", inst);
2407bebe46cSjc25722 if (ddi_create_priv_minor_node(dip, name,
2417bebe46cSjc25722 S_IFCHR,
2427bebe46cSjc25722 inst,
2437bebe46cSjc25722 DDI_PSEUDO,
2447bebe46cSjc25722 0, NULL, "all", 0640) ==
2457bebe46cSjc25722 DDI_FAILURE) {
2467bebe46cSjc25722 ddi_remove_minor_node(dip, NULL);
2477bebe46cSjc25722 return (DDI_FAILURE);
2487bebe46cSjc25722 }
2497bebe46cSjc25722
2507bebe46cSjc25722 /* Allocate a soft state structure for this instance */
2517bebe46cSjc25722 if (ddi_soft_state_zalloc(statep, inst) != DDI_SUCCESS) {
2527bebe46cSjc25722 cmn_err(CE_WARN, " ddi_soft_state_zalloc() failed "
2537bebe46cSjc25722 "for inst %d\n", inst);
2547bebe46cSjc25722 goto attach_failed;
2557bebe46cSjc25722 }
2567bebe46cSjc25722
2577bebe46cSjc25722 /* Setup soft state */
2587bebe46cSjc25722 softc = getsoftc(inst);
2597bebe46cSjc25722 softc->dip = dip;
2607bebe46cSjc25722 mutex_init(&softc->mutex, NULL, MUTEX_DRIVER, NULL);
2617bebe46cSjc25722
2627bebe46cSjc25722 /* Create main environmental node */
2637bebe46cSjc25722 ddi_report_dev(dip);
2647bebe46cSjc25722
2657bebe46cSjc25722 return (DDI_SUCCESS);
2667bebe46cSjc25722
2677bebe46cSjc25722 case DDI_RESUME:
2687bebe46cSjc25722 return (DDI_SUCCESS);
2697bebe46cSjc25722
2707bebe46cSjc25722 default:
2717bebe46cSjc25722 return (DDI_FAILURE);
2727bebe46cSjc25722 }
2737bebe46cSjc25722
2747bebe46cSjc25722 attach_failed:
2757bebe46cSjc25722
2767bebe46cSjc25722 /* Free soft state, if allocated. remove minor node if added earlier */
2777bebe46cSjc25722 if (softc)
2787bebe46cSjc25722 ddi_soft_state_free(statep, inst);
2797bebe46cSjc25722
2807bebe46cSjc25722 ddi_remove_minor_node(dip, NULL);
2817bebe46cSjc25722
2827bebe46cSjc25722 return (DDI_FAILURE);
2837bebe46cSjc25722 }
2847bebe46cSjc25722
2857bebe46cSjc25722 static int
mem_cache_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)2867bebe46cSjc25722 mem_cache_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2877bebe46cSjc25722 {
2887bebe46cSjc25722 int inst;
2897bebe46cSjc25722 struct mem_cache_softc *softc;
2907bebe46cSjc25722
2917bebe46cSjc25722 switch (cmd) {
2927bebe46cSjc25722 case DDI_DETACH:
2937bebe46cSjc25722 inst = ddi_get_instance(dip);
2947bebe46cSjc25722 if ((softc = getsoftc(inst)) == NULL)
2957bebe46cSjc25722 return (ENXIO);
2967bebe46cSjc25722
2977bebe46cSjc25722 /* Free the soft state and remove minor node added earlier */
2987bebe46cSjc25722 mutex_destroy(&softc->mutex);
2997bebe46cSjc25722 ddi_soft_state_free(statep, inst);
3007bebe46cSjc25722 ddi_remove_minor_node(dip, NULL);
3017bebe46cSjc25722 return (DDI_SUCCESS);
3027bebe46cSjc25722
3037bebe46cSjc25722 case DDI_SUSPEND:
3047bebe46cSjc25722 return (DDI_SUCCESS);
3057bebe46cSjc25722
3067bebe46cSjc25722 default:
3077bebe46cSjc25722 return (DDI_FAILURE);
3087bebe46cSjc25722 }
3097bebe46cSjc25722 }
3107bebe46cSjc25722
3117bebe46cSjc25722 /*ARGSUSED*/
3127bebe46cSjc25722 static int
mem_cache_open(dev_t * devp,int flag,int otyp,cred_t * credp)3137bebe46cSjc25722 mem_cache_open(dev_t *devp, int flag, int otyp, cred_t *credp)
3147bebe46cSjc25722 {
3157bebe46cSjc25722 int inst = getminor(*devp);
3167bebe46cSjc25722
3177bebe46cSjc25722 return (getsoftc(inst) == NULL ? ENXIO : 0);
3187bebe46cSjc25722 }
3197bebe46cSjc25722
3207bebe46cSjc25722 /*ARGSUSED*/
3217bebe46cSjc25722 static int
mem_cache_close(dev_t dev,int flag,int otyp,cred_t * credp)3227bebe46cSjc25722 mem_cache_close(dev_t dev, int flag, int otyp, cred_t *credp)
3237bebe46cSjc25722 {
3247bebe46cSjc25722 int inst = getminor(dev);
3257bebe46cSjc25722
3267bebe46cSjc25722 return (getsoftc(inst) == NULL ? ENXIO : 0);
3277bebe46cSjc25722 }
3287bebe46cSjc25722
3297bebe46cSjc25722 static char *tstate_to_desc[] = {
3307bebe46cSjc25722 "Invalid", /* 0 */
3317bebe46cSjc25722 "Shared", /* 1 */
3327bebe46cSjc25722 "Exclusive", /* 2 */
3337bebe46cSjc25722 "Owner", /* 3 */
3347bebe46cSjc25722 "Modified", /* 4 */
3357bebe46cSjc25722 "NA", /* 5 */
3367bebe46cSjc25722 "Owner/Shared", /* 6 */
3377bebe46cSjc25722 "Reserved(7)", /* 7 */
3387bebe46cSjc25722 };
3397bebe46cSjc25722
3407bebe46cSjc25722 static char *
tag_state_to_desc(uint8_t tagstate)3417bebe46cSjc25722 tag_state_to_desc(uint8_t tagstate)
3427bebe46cSjc25722 {
3437bebe46cSjc25722 return (tstate_to_desc[tagstate & CH_ECSTATE_MASK]);
3447bebe46cSjc25722 }
3457bebe46cSjc25722
3467bebe46cSjc25722 void
print_l2_tag(uint64_t tag_addr,uint64_t l2_tag)3477bebe46cSjc25722 print_l2_tag(uint64_t tag_addr, uint64_t l2_tag)
3487bebe46cSjc25722 {
3497bebe46cSjc25722 uint64_t l2_subaddr;
3507bebe46cSjc25722 uint8_t l2_state;
3517bebe46cSjc25722
3527bebe46cSjc25722 l2_subaddr = PN_L2TAG_TO_PA(l2_tag);
3537bebe46cSjc25722 l2_subaddr |= (tag_addr & PN_L2_INDEX_MASK);
3547bebe46cSjc25722
3557bebe46cSjc25722 l2_state = (l2_tag & CH_ECSTATE_MASK);
3567bebe46cSjc25722 cmn_err(CE_CONT,
3577bebe46cSjc25722 "PA=0x%08x.%08x E$tag 0x%08x.%08x E$state %s\n",
3587bebe46cSjc25722 PRTF_64_TO_32(l2_subaddr),
3597bebe46cSjc25722 PRTF_64_TO_32(l2_tag),
3607bebe46cSjc25722 tag_state_to_desc(l2_state));
3617bebe46cSjc25722 }
3627bebe46cSjc25722
3637bebe46cSjc25722 void
print_l2cache_line(ch_cpu_logout_t * clop)3647bebe46cSjc25722 print_l2cache_line(ch_cpu_logout_t *clop)
3657bebe46cSjc25722 {
3667bebe46cSjc25722 uint64_t l2_subaddr;
3677bebe46cSjc25722 int i, offset;
3687bebe46cSjc25722 uint8_t way, l2_state;
3697bebe46cSjc25722 ch_ec_data_t *ecp;
3707bebe46cSjc25722
3717bebe46cSjc25722
3727bebe46cSjc25722 for (way = 0; way < PN_CACHE_NWAYS; way++) {
3737bebe46cSjc25722 ecp = &clop->clo_data.chd_l2_data[way];
3747bebe46cSjc25722 l2_subaddr = PN_L2TAG_TO_PA(ecp->ec_tag);
3757bebe46cSjc25722 l2_subaddr |= (ecp->ec_idx & PN_L2_INDEX_MASK);
3767bebe46cSjc25722
3777bebe46cSjc25722 l2_state = (ecp->ec_tag & CH_ECSTATE_MASK);
3787bebe46cSjc25722 cmn_err(CE_CONT,
3797bebe46cSjc25722 "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
3807bebe46cSjc25722 "E$tag 0x%08x.%08x E$state %s",
3817bebe46cSjc25722 way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(l2_subaddr),
3827bebe46cSjc25722 PRTF_64_TO_32(ecp->ec_tag),
3837bebe46cSjc25722 tag_state_to_desc(l2_state));
3847bebe46cSjc25722 /*
3857bebe46cSjc25722 * Dump out Ecache subblock data captured.
3867bebe46cSjc25722 * For Cheetah, we need to compute the ECC for each 16-byte
3877bebe46cSjc25722 * chunk and compare it with the captured chunk ECC to figure
3887bebe46cSjc25722 * out which chunk is bad.
3897bebe46cSjc25722 */
3907bebe46cSjc25722 for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
3917bebe46cSjc25722 ec_data_elm_t *ecdptr;
3927bebe46cSjc25722 uint64_t d_low, d_high;
3937bebe46cSjc25722 uint32_t ecc;
3947bebe46cSjc25722 int l2_data_idx = (i/2);
3957bebe46cSjc25722
3967bebe46cSjc25722 offset = i * 16;
3977bebe46cSjc25722 ecdptr = &clop->clo_data.chd_l2_data[way].ec_data
3987bebe46cSjc25722 [l2_data_idx];
3997bebe46cSjc25722 if ((i & 1) == 0) {
4007bebe46cSjc25722 ecc = (ecdptr->ec_eccd >> 9) & 0x1ff;
4017bebe46cSjc25722 d_high = ecdptr->ec_d8[0];
4027bebe46cSjc25722 d_low = ecdptr->ec_d8[1];
4037bebe46cSjc25722 } else {
4047bebe46cSjc25722 ecc = ecdptr->ec_eccd & 0x1ff;
4057bebe46cSjc25722 d_high = ecdptr->ec_d8[2];
4067bebe46cSjc25722 d_low = ecdptr->ec_d8[3];
4077bebe46cSjc25722 }
4087bebe46cSjc25722
4097bebe46cSjc25722 cmn_err(CE_CONT,
4107bebe46cSjc25722 "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
4117bebe46cSjc25722 " ECC 0x%03x",
4127bebe46cSjc25722 offset, PRTF_64_TO_32(d_high),
4137bebe46cSjc25722 PRTF_64_TO_32(d_low), ecc);
4147bebe46cSjc25722 }
4157bebe46cSjc25722 } /* end of for way loop */
4167bebe46cSjc25722 }
4177bebe46cSjc25722
4187bebe46cSjc25722 void
print_ecache_line(ch_cpu_logout_t * clop)4197bebe46cSjc25722 print_ecache_line(ch_cpu_logout_t *clop)
4207bebe46cSjc25722 {
4217bebe46cSjc25722 uint64_t ec_subaddr;
4227bebe46cSjc25722 int i, offset;
4237bebe46cSjc25722 uint8_t way, ec_state;
4247bebe46cSjc25722 ch_ec_data_t *ecp;
4257bebe46cSjc25722
4267bebe46cSjc25722
4277bebe46cSjc25722 for (way = 0; way < PN_CACHE_NWAYS; way++) {
4287bebe46cSjc25722 ecp = &clop->clo_data.chd_ec_data[way];
4297bebe46cSjc25722 ec_subaddr = PN_L3TAG_TO_PA(ecp->ec_tag);
4307bebe46cSjc25722 ec_subaddr |= (ecp->ec_idx & PN_L3_TAG_RD_MASK);
4317bebe46cSjc25722
4327bebe46cSjc25722 ec_state = (ecp->ec_tag & CH_ECSTATE_MASK);
4337bebe46cSjc25722 cmn_err(CE_CONT,
4347bebe46cSjc25722 "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
4357bebe46cSjc25722 "E$tag 0x%08x.%08x E$state %s",
4367bebe46cSjc25722 way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(ec_subaddr),
4377bebe46cSjc25722 PRTF_64_TO_32(ecp->ec_tag),
4387bebe46cSjc25722 tag_state_to_desc(ec_state));
4397bebe46cSjc25722 /*
4407bebe46cSjc25722 * Dump out Ecache subblock data captured.
4417bebe46cSjc25722 * For Cheetah, we need to compute the ECC for each 16-byte
4427bebe46cSjc25722 * chunk and compare it with the captured chunk ECC to figure
4437bebe46cSjc25722 * out which chunk is bad.
4447bebe46cSjc25722 */
4457bebe46cSjc25722 for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
4467bebe46cSjc25722 ec_data_elm_t *ecdptr;
4477bebe46cSjc25722 uint64_t d_low, d_high;
4487bebe46cSjc25722 uint32_t ecc;
4497bebe46cSjc25722 int ec_data_idx = (i/2);
4507bebe46cSjc25722
4517bebe46cSjc25722 offset = i * 16;
4527bebe46cSjc25722 ecdptr =
4537bebe46cSjc25722 &clop->clo_data.chd_ec_data[way].ec_data
4547bebe46cSjc25722 [ec_data_idx];
4557bebe46cSjc25722 if ((i & 1) == 0) {
4567bebe46cSjc25722 ecc = (ecdptr->ec_eccd >> 9) & 0x1ff;
4577bebe46cSjc25722 d_high = ecdptr->ec_d8[0];
4587bebe46cSjc25722 d_low = ecdptr->ec_d8[1];
4597bebe46cSjc25722 } else {
4607bebe46cSjc25722 ecc = ecdptr->ec_eccd & 0x1ff;
4617bebe46cSjc25722 d_high = ecdptr->ec_d8[2];
4627bebe46cSjc25722 d_low = ecdptr->ec_d8[3];
4637bebe46cSjc25722 }
4647bebe46cSjc25722
4657bebe46cSjc25722 cmn_err(CE_CONT,
4667bebe46cSjc25722 "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
4677bebe46cSjc25722 " ECC 0x%03x",
4687bebe46cSjc25722 offset, PRTF_64_TO_32(d_high),
4697bebe46cSjc25722 PRTF_64_TO_32(d_low), ecc);
4707bebe46cSjc25722 }
4717bebe46cSjc25722 }
4727bebe46cSjc25722 }
4737bebe46cSjc25722
4747bebe46cSjc25722 static boolean_t
tag_addr_collides(uint64_t tag_addr,cache_id_t type,retire_func_t start_of_func,retire_func_t end_of_func)4757bebe46cSjc25722 tag_addr_collides(uint64_t tag_addr, cache_id_t type,
4767bebe46cSjc25722 retire_func_t start_of_func, retire_func_t end_of_func)
4777bebe46cSjc25722 {
4787bebe46cSjc25722 uint64_t start_paddr, end_paddr;
4797bebe46cSjc25722 char *type_str;
4807bebe46cSjc25722
4817bebe46cSjc25722 start_paddr = va_to_pa((void *)start_of_func);
4827bebe46cSjc25722 end_paddr = va_to_pa((void *)end_of_func);
4837bebe46cSjc25722 switch (type) {
4847bebe46cSjc25722 case L2_CACHE_TAG:
4857bebe46cSjc25722 case L2_CACHE_DATA:
4867bebe46cSjc25722 tag_addr &= PN_L2_INDEX_MASK;
4877bebe46cSjc25722 start_paddr &= PN_L2_INDEX_MASK;
4887bebe46cSjc25722 end_paddr &= PN_L2_INDEX_MASK;
4897bebe46cSjc25722 type_str = "L2:";
4907bebe46cSjc25722 break;
4917bebe46cSjc25722 case L3_CACHE_TAG:
4927bebe46cSjc25722 case L3_CACHE_DATA:
4937bebe46cSjc25722 tag_addr &= PN_L3_TAG_RD_MASK;
4947bebe46cSjc25722 start_paddr &= PN_L3_TAG_RD_MASK;
4957bebe46cSjc25722 end_paddr &= PN_L3_TAG_RD_MASK;
4967bebe46cSjc25722 type_str = "L3:";
4977bebe46cSjc25722 break;
4987bebe46cSjc25722 default:
4997bebe46cSjc25722 /*
5007bebe46cSjc25722 * Should never reach here.
5017bebe46cSjc25722 */
5027bebe46cSjc25722 ASSERT(0);
5037bebe46cSjc25722 return (B_FALSE);
5047bebe46cSjc25722 }
5057bebe46cSjc25722 if ((tag_addr > (start_paddr - 0x100)) &&
5067bebe46cSjc25722 (tag_addr < (end_paddr + 0x100))) {
5077bebe46cSjc25722 if (mem_cache_debug & 0x1)
5087bebe46cSjc25722 cmn_err(CE_CONT,
5097bebe46cSjc25722 "%s collision detected tag_addr = 0x%08x"
5107bebe46cSjc25722 " start_paddr = 0x%08x end_paddr = 0x%08x\n",
5117bebe46cSjc25722 type_str, (uint32_t)tag_addr, (uint32_t)start_paddr,
5127bebe46cSjc25722 (uint32_t)end_paddr);
5137bebe46cSjc25722 return (B_TRUE);
5147bebe46cSjc25722 }
5157bebe46cSjc25722 else
5167bebe46cSjc25722 return (B_FALSE);
5177bebe46cSjc25722 }
5187bebe46cSjc25722
5197bebe46cSjc25722 static uint64_t
get_tag_addr(cache_info_t * cache_info)5207bebe46cSjc25722 get_tag_addr(cache_info_t *cache_info)
5217bebe46cSjc25722 {
5227bebe46cSjc25722 uint64_t tag_addr, scratch;
5237bebe46cSjc25722
5247bebe46cSjc25722 switch (cache_info->cache) {
5257bebe46cSjc25722 case L2_CACHE_TAG:
5267bebe46cSjc25722 case L2_CACHE_DATA:
5277bebe46cSjc25722 tag_addr = (uint64_t)(cache_info->index <<
5287bebe46cSjc25722 PN_CACHE_LINE_SHIFT);
5297bebe46cSjc25722 scratch = (uint64_t)(cache_info->way <<
5307bebe46cSjc25722 PN_L2_WAY_SHIFT);
5317bebe46cSjc25722 tag_addr |= scratch;
5327bebe46cSjc25722 tag_addr |= PN_L2_IDX_HW_ECC_EN;
5337bebe46cSjc25722 break;
5347bebe46cSjc25722 case L3_CACHE_TAG:
5357bebe46cSjc25722 case L3_CACHE_DATA:
5367bebe46cSjc25722 tag_addr = (uint64_t)(cache_info->index <<
5377bebe46cSjc25722 PN_CACHE_LINE_SHIFT);
5387bebe46cSjc25722 scratch = (uint64_t)(cache_info->way <<
5397bebe46cSjc25722 PN_L3_WAY_SHIFT);
5407bebe46cSjc25722 tag_addr |= scratch;
5417bebe46cSjc25722 tag_addr |= PN_L3_IDX_HW_ECC_EN;
5427bebe46cSjc25722 break;
5437bebe46cSjc25722 default:
5447bebe46cSjc25722 /*
5457bebe46cSjc25722 * Should never reach here.
5467bebe46cSjc25722 */
5477bebe46cSjc25722 ASSERT(0);
5487bebe46cSjc25722 return (uint64_t)(0);
5497bebe46cSjc25722 }
5507bebe46cSjc25722 return (tag_addr);
5517bebe46cSjc25722 }
5527bebe46cSjc25722
5537bebe46cSjc25722 static int
mem_cache_ioctl_ops(int cmd,int mode,cache_info_t * cache_info)5547bebe46cSjc25722 mem_cache_ioctl_ops(int cmd, int mode, cache_info_t *cache_info)
5557bebe46cSjc25722 {
5567bebe46cSjc25722 int ret_val = 0;
5577bebe46cSjc25722 uint64_t afar, tag_addr;
5587bebe46cSjc25722 ch_cpu_logout_t clop;
5597bebe46cSjc25722 uint64_t Lxcache_tag_data[PN_CACHE_NWAYS];
5607bebe46cSjc25722 int i, retire_retry_count;
5617bebe46cSjc25722 cpu_t *cpu;
5627bebe46cSjc25722 uint64_t tag_data;
5637bebe46cSjc25722 uint8_t state;
5647bebe46cSjc25722
565a62774dfSSinanallur Balasubramanian if (cache_info->way >= PN_CACHE_NWAYS)
566a62774dfSSinanallur Balasubramanian return (EINVAL);
5677bebe46cSjc25722 switch (cache_info->cache) {
5687bebe46cSjc25722 case L2_CACHE_TAG:
5697bebe46cSjc25722 case L2_CACHE_DATA:
5707bebe46cSjc25722 if (cache_info->index >=
5717bebe46cSjc25722 (PN_L2_SET_SIZE/PN_L2_LINESIZE))
5727bebe46cSjc25722 return (EINVAL);
5737bebe46cSjc25722 break;
5747bebe46cSjc25722 case L3_CACHE_TAG:
5757bebe46cSjc25722 case L3_CACHE_DATA:
5767bebe46cSjc25722 if (cache_info->index >=
5777bebe46cSjc25722 (PN_L3_SET_SIZE/PN_L3_LINESIZE))
5787bebe46cSjc25722 return (EINVAL);
5797bebe46cSjc25722 break;
5807bebe46cSjc25722 default:
5817bebe46cSjc25722 return (ENOTSUP);
5827bebe46cSjc25722 }
5837bebe46cSjc25722 /*
5847bebe46cSjc25722 * Check if we have a valid cpu ID and that
5857bebe46cSjc25722 * CPU is ONLINE.
5867bebe46cSjc25722 */
5877bebe46cSjc25722 mutex_enter(&cpu_lock);
5887bebe46cSjc25722 cpu = cpu_get(cache_info->cpu_id);
5897bebe46cSjc25722 if ((cpu == NULL) || (!cpu_is_online(cpu))) {
5907bebe46cSjc25722 mutex_exit(&cpu_lock);
5917bebe46cSjc25722 return (EINVAL);
5927bebe46cSjc25722 }
5937bebe46cSjc25722 mutex_exit(&cpu_lock);
594a62774dfSSinanallur Balasubramanian pattern = 0; /* default value of TAG PA when cacheline is retired. */
5957bebe46cSjc25722 switch (cmd) {
5967bebe46cSjc25722 case MEM_CACHE_RETIRE:
5977bebe46cSjc25722 tag_addr = get_tag_addr(cache_info);
5987bebe46cSjc25722 pattern |= PN_ECSTATE_NA;
5997bebe46cSjc25722 retire_retry_count = 0;
6007bebe46cSjc25722 affinity_set(cache_info->cpu_id);
6017bebe46cSjc25722 switch (cache_info->cache) {
6027bebe46cSjc25722 case L2_CACHE_DATA:
6037bebe46cSjc25722 case L2_CACHE_TAG:
604a62774dfSSinanallur Balasubramanian if ((cache_info->bit & MSB_BIT_MASK) ==
605a62774dfSSinanallur Balasubramanian MSB_BIT_MASK)
606a62774dfSSinanallur Balasubramanian pattern |= PN_L2TAG_PA_MASK;
6077bebe46cSjc25722 retry_l2_retire:
6087bebe46cSjc25722 if (tag_addr_collides(tag_addr,
6097bebe46cSjc25722 cache_info->cache,
6107bebe46cSjc25722 retire_l2_start, retire_l2_end))
6117bebe46cSjc25722 ret_val =
6127bebe46cSjc25722 retire_l2_alternate(
6137bebe46cSjc25722 tag_addr, pattern);
6147bebe46cSjc25722 else
6157bebe46cSjc25722 ret_val = retire_l2(tag_addr,
6167bebe46cSjc25722 pattern);
6177bebe46cSjc25722 if (ret_val == 1) {
6187bebe46cSjc25722 /*
6197bebe46cSjc25722 * cacheline was in retired
6207bebe46cSjc25722 * STATE already.
6217bebe46cSjc25722 * so return success.
6227bebe46cSjc25722 */
6237bebe46cSjc25722 ret_val = 0;
6247bebe46cSjc25722 }
6257bebe46cSjc25722 if (ret_val < 0) {
6267bebe46cSjc25722 cmn_err(CE_WARN,
6277bebe46cSjc25722 "retire_l2() failed. index = 0x%x way %d. Retrying...\n",
6287bebe46cSjc25722 cache_info->index,
6297bebe46cSjc25722 cache_info->way);
6307bebe46cSjc25722 if (retire_retry_count >= 2) {
6317bebe46cSjc25722 retire_failures++;
6327bebe46cSjc25722 affinity_clear();
6337bebe46cSjc25722 return (EIO);
6347bebe46cSjc25722 }
6357bebe46cSjc25722 retire_retry_count++;
6367bebe46cSjc25722 goto retry_l2_retire;
6377bebe46cSjc25722 }
6387bebe46cSjc25722 if (ret_val == 2)
6397bebe46cSjc25722 l2_flush_retries_done++;
640142c9f13Sbala /*
641142c9f13Sbala * We bind ourself to a CPU and send cross trap to
642142c9f13Sbala * ourself. On return from xt_one we can rely on the
643142c9f13Sbala * data in tag_data being filled in. Normally one would
644142c9f13Sbala * do a xt_sync to make sure that the CPU has completed
645142c9f13Sbala * the cross trap call xt_one.
646142c9f13Sbala */
6477bebe46cSjc25722 xt_one(cache_info->cpu_id,
6487bebe46cSjc25722 (xcfunc_t *)(get_l2_tag_tl1),
6497bebe46cSjc25722 tag_addr, (uint64_t)(&tag_data));
6507bebe46cSjc25722 state = tag_data & CH_ECSTATE_MASK;
6517bebe46cSjc25722 if (state != PN_ECSTATE_NA) {
6527bebe46cSjc25722 retire_failures++;
6537bebe46cSjc25722 print_l2_tag(tag_addr,
6547bebe46cSjc25722 tag_data);
6557bebe46cSjc25722 cmn_err(CE_WARN,
6567bebe46cSjc25722 "L2 RETIRE:failed for index 0x%x way %d. Retrying...\n",
6577bebe46cSjc25722 cache_info->index,
6587bebe46cSjc25722 cache_info->way);
6597bebe46cSjc25722 if (retire_retry_count >= 2) {
6607bebe46cSjc25722 retire_failures++;
6617bebe46cSjc25722 affinity_clear();
6627bebe46cSjc25722 return (EIO);
6637bebe46cSjc25722 }
6647bebe46cSjc25722 retire_retry_count++;
6657bebe46cSjc25722 goto retry_l2_retire;
6667bebe46cSjc25722 }
6677bebe46cSjc25722 break;
6687bebe46cSjc25722 case L3_CACHE_TAG:
6697bebe46cSjc25722 case L3_CACHE_DATA:
670a62774dfSSinanallur Balasubramanian if ((cache_info->bit & MSB_BIT_MASK) ==
671a62774dfSSinanallur Balasubramanian MSB_BIT_MASK)
672a62774dfSSinanallur Balasubramanian pattern |= PN_L3TAG_PA_MASK;
6737bebe46cSjc25722 if (tag_addr_collides(tag_addr,
6747bebe46cSjc25722 cache_info->cache,
6757bebe46cSjc25722 retire_l3_start, retire_l3_end))
6767bebe46cSjc25722 ret_val =
6777bebe46cSjc25722 retire_l3_alternate(
6787bebe46cSjc25722 tag_addr, pattern);
6797bebe46cSjc25722 else
6807bebe46cSjc25722 ret_val = retire_l3(tag_addr,
6817bebe46cSjc25722 pattern);
6827bebe46cSjc25722 if (ret_val == 1) {
6837bebe46cSjc25722 /*
6847bebe46cSjc25722 * cacheline was in retired
6857bebe46cSjc25722 * STATE already.
6867bebe46cSjc25722 * so return success.
6877bebe46cSjc25722 */
6887bebe46cSjc25722 ret_val = 0;
6897bebe46cSjc25722 }
6907bebe46cSjc25722 if (ret_val < 0) {
6917bebe46cSjc25722 cmn_err(CE_WARN,
6927bebe46cSjc25722 "retire_l3() failed. ret_val = %d index = 0x%x\n",
6937bebe46cSjc25722 ret_val,
6947bebe46cSjc25722 cache_info->index);
6957bebe46cSjc25722 retire_failures++;
6967bebe46cSjc25722 affinity_clear();
6977bebe46cSjc25722 return (EIO);
6987bebe46cSjc25722 }
699142c9f13Sbala /*
700142c9f13Sbala * We bind ourself to a CPU and send cross trap to
701142c9f13Sbala * ourself. On return from xt_one we can rely on the
702142c9f13Sbala * data in tag_data being filled in. Normally one would
703142c9f13Sbala * do a xt_sync to make sure that the CPU has completed
704142c9f13Sbala * the cross trap call xt_one.
705142c9f13Sbala */
7067bebe46cSjc25722 xt_one(cache_info->cpu_id,
7077bebe46cSjc25722 (xcfunc_t *)(get_l3_tag_tl1),
7087bebe46cSjc25722 tag_addr, (uint64_t)(&tag_data));
7097bebe46cSjc25722 state = tag_data & CH_ECSTATE_MASK;
7107bebe46cSjc25722 if (state != PN_ECSTATE_NA) {
7117bebe46cSjc25722 cmn_err(CE_WARN,
7127bebe46cSjc25722 "L3 RETIRE failed for index 0x%x\n",
7137bebe46cSjc25722 cache_info->index);
7147bebe46cSjc25722 retire_failures++;
7157bebe46cSjc25722 affinity_clear();
7167bebe46cSjc25722 return (EIO);
7177bebe46cSjc25722 }
7187bebe46cSjc25722
7197bebe46cSjc25722 break;
7207bebe46cSjc25722 }
7217bebe46cSjc25722 affinity_clear();
7227bebe46cSjc25722 break;
7237bebe46cSjc25722 case MEM_CACHE_UNRETIRE:
7247bebe46cSjc25722 tag_addr = get_tag_addr(cache_info);
7257bebe46cSjc25722 pattern = PN_ECSTATE_INV;
7267bebe46cSjc25722 affinity_set(cache_info->cpu_id);
7277bebe46cSjc25722 switch (cache_info->cache) {
7287bebe46cSjc25722 case L2_CACHE_DATA:
7297bebe46cSjc25722 case L2_CACHE_TAG:
7307bebe46cSjc25722 /*
731142c9f13Sbala * We bind ourself to a CPU and send cross trap to
732142c9f13Sbala * ourself. On return from xt_one we can rely on the
733142c9f13Sbala * data in tag_data being filled in. Normally one would
734142c9f13Sbala * do a xt_sync to make sure that the CPU has completed
735142c9f13Sbala * the cross trap call xt_one.
736142c9f13Sbala */
7377bebe46cSjc25722 xt_one(cache_info->cpu_id,
7387bebe46cSjc25722 (xcfunc_t *)(get_l2_tag_tl1),
7397bebe46cSjc25722 tag_addr, (uint64_t)(&tag_data));
7407bebe46cSjc25722 state = tag_data & CH_ECSTATE_MASK;
7417bebe46cSjc25722 if (state != PN_ECSTATE_NA) {
7427bebe46cSjc25722 affinity_clear();
7437bebe46cSjc25722 return (EINVAL);
7447bebe46cSjc25722 }
7457bebe46cSjc25722 if (tag_addr_collides(tag_addr,
7467bebe46cSjc25722 cache_info->cache,
7477bebe46cSjc25722 unretire_l2_start, unretire_l2_end))
7487bebe46cSjc25722 ret_val =
7497bebe46cSjc25722 unretire_l2_alternate(
7507bebe46cSjc25722 tag_addr, pattern);
7517bebe46cSjc25722 else
7527bebe46cSjc25722 ret_val =
7537bebe46cSjc25722 unretire_l2(tag_addr,
7547bebe46cSjc25722 pattern);
7557bebe46cSjc25722 if (ret_val != 0) {
7567bebe46cSjc25722 cmn_err(CE_WARN,
7577bebe46cSjc25722 "unretire_l2() failed. ret_val = %d index = 0x%x\n",
7587bebe46cSjc25722 ret_val,
7597bebe46cSjc25722 cache_info->index);
7607bebe46cSjc25722 retire_failures++;
7617bebe46cSjc25722 affinity_clear();
7627bebe46cSjc25722 return (EIO);
7637bebe46cSjc25722 }
7647bebe46cSjc25722 break;
7657bebe46cSjc25722 case L3_CACHE_TAG:
7667bebe46cSjc25722 case L3_CACHE_DATA:
7677bebe46cSjc25722 /*
768142c9f13Sbala * We bind ourself to a CPU and send cross trap to
769142c9f13Sbala * ourself. On return from xt_one we can rely on the
770142c9f13Sbala * data in tag_data being filled in. Normally one would
771142c9f13Sbala * do a xt_sync to make sure that the CPU has completed
772142c9f13Sbala * the cross trap call xt_one.
773142c9f13Sbala */
7747bebe46cSjc25722 xt_one(cache_info->cpu_id,
7757bebe46cSjc25722 (xcfunc_t *)(get_l3_tag_tl1),
7767bebe46cSjc25722 tag_addr, (uint64_t)(&tag_data));
7777bebe46cSjc25722 state = tag_data & CH_ECSTATE_MASK;
7787bebe46cSjc25722 if (state != PN_ECSTATE_NA) {
7797bebe46cSjc25722 affinity_clear();
7807bebe46cSjc25722 return (EINVAL);
7817bebe46cSjc25722 }
7827bebe46cSjc25722 if (tag_addr_collides(tag_addr,
7837bebe46cSjc25722 cache_info->cache,
7847bebe46cSjc25722 unretire_l3_start, unretire_l3_end))
7857bebe46cSjc25722 ret_val =
7867bebe46cSjc25722 unretire_l3_alternate(
7877bebe46cSjc25722 tag_addr, pattern);
7887bebe46cSjc25722 else
7897bebe46cSjc25722 ret_val =
7907bebe46cSjc25722 unretire_l3(tag_addr,
7917bebe46cSjc25722 pattern);
7927bebe46cSjc25722 if (ret_val != 0) {
7937bebe46cSjc25722 cmn_err(CE_WARN,
7947bebe46cSjc25722 "unretire_l3() failed. ret_val = %d index = 0x%x\n",
7957bebe46cSjc25722 ret_val,
7967bebe46cSjc25722 cache_info->index);
7977bebe46cSjc25722 affinity_clear();
7987bebe46cSjc25722 return (EIO);
7997bebe46cSjc25722 }
8007bebe46cSjc25722 break;
8017bebe46cSjc25722 }
8027bebe46cSjc25722 affinity_clear();
8037bebe46cSjc25722 break;
8047bebe46cSjc25722 case MEM_CACHE_ISRETIRED:
8057bebe46cSjc25722 case MEM_CACHE_STATE:
8067bebe46cSjc25722 return (ENOTSUP);
8077bebe46cSjc25722 case MEM_CACHE_READ_TAGS:
808142c9f13Sbala #ifdef DEBUG
8097bebe46cSjc25722 case MEM_CACHE_READ_ERROR_INJECTED_TAGS:
810142c9f13Sbala #endif
8117bebe46cSjc25722 /*
8127bebe46cSjc25722 * Read tag and data for all the ways at a given afar
8137bebe46cSjc25722 */
8147bebe46cSjc25722 afar = (uint64_t)(cache_info->index
8157bebe46cSjc25722 << PN_CACHE_LINE_SHIFT);
816a62774dfSSinanallur Balasubramanian mutex_enter(&cpu_lock);
8177bebe46cSjc25722 affinity_set(cache_info->cpu_id);
818*0ed5c46eSJosef 'Jeff' Sipek pause_cpus(NULL, NULL);
819a62774dfSSinanallur Balasubramanian mutex_exit(&cpu_lock);
820142c9f13Sbala /*
821142c9f13Sbala * We bind ourself to a CPU and send cross trap to
822142c9f13Sbala * ourself. On return from xt_one we can rely on the
823142c9f13Sbala * data in clop being filled in. Normally one would
824142c9f13Sbala * do a xt_sync to make sure that the CPU has completed
825142c9f13Sbala * the cross trap call xt_one.
826142c9f13Sbala */
8277bebe46cSjc25722 xt_one(cache_info->cpu_id,
8287bebe46cSjc25722 (xcfunc_t *)(get_ecache_dtags_tl1),
8297bebe46cSjc25722 afar, (uint64_t)(&clop));
830a62774dfSSinanallur Balasubramanian mutex_enter(&cpu_lock);
831a62774dfSSinanallur Balasubramanian (void) start_cpus();
832a62774dfSSinanallur Balasubramanian mutex_exit(&cpu_lock);
833a62774dfSSinanallur Balasubramanian affinity_clear();
8347bebe46cSjc25722 switch (cache_info->cache) {
8357bebe46cSjc25722 case L2_CACHE_TAG:
8367bebe46cSjc25722 for (i = 0; i < PN_CACHE_NWAYS; i++) {
8377bebe46cSjc25722 Lxcache_tag_data[i] =
8387bebe46cSjc25722 clop.clo_data.chd_l2_data
8397bebe46cSjc25722 [i].ec_tag;
8407bebe46cSjc25722 }
841a62774dfSSinanallur Balasubramanian #ifdef DEBUG
8427bebe46cSjc25722 last_error_injected_bit =
8437bebe46cSjc25722 last_l2tag_error_injected_bit;
8447bebe46cSjc25722 last_error_injected_way =
8457bebe46cSjc25722 last_l2tag_error_injected_way;
846a62774dfSSinanallur Balasubramanian #endif
8477bebe46cSjc25722 break;
8487bebe46cSjc25722 case L3_CACHE_TAG:
8497bebe46cSjc25722 for (i = 0; i < PN_CACHE_NWAYS; i++) {
8507bebe46cSjc25722 Lxcache_tag_data[i] =
8517bebe46cSjc25722 clop.clo_data.chd_ec_data
8527bebe46cSjc25722 [i].ec_tag;
8537bebe46cSjc25722 }
854a62774dfSSinanallur Balasubramanian #ifdef DEBUG
8557bebe46cSjc25722 last_error_injected_bit =
8567bebe46cSjc25722 last_l3tag_error_injected_bit;
8577bebe46cSjc25722 last_error_injected_way =
8587bebe46cSjc25722 last_l3tag_error_injected_way;
859a62774dfSSinanallur Balasubramanian #endif
8607bebe46cSjc25722 break;
8617bebe46cSjc25722 default:
8627bebe46cSjc25722 return (ENOTSUP);
8637bebe46cSjc25722 } /* end if switch(cache) */
864142c9f13Sbala #ifdef DEBUG
865a62774dfSSinanallur Balasubramanian if ((cmd == MEM_CACHE_READ_ERROR_INJECTED_TAGS) &&
866a62774dfSSinanallur Balasubramanian (inject_anonymous_tag_error == 0) &&
867a62774dfSSinanallur Balasubramanian (last_error_injected_way >= 0) &&
868a62774dfSSinanallur Balasubramanian (last_error_injected_way <= 3)) {
8697bebe46cSjc25722 pattern = ((uint64_t)1 <<
8707bebe46cSjc25722 last_error_injected_bit);
8717bebe46cSjc25722 /*
8727bebe46cSjc25722 * If error bit is ECC we need to make sure
8737bebe46cSjc25722 * ECC on all all WAYS are corrupted.
8747bebe46cSjc25722 */
8757bebe46cSjc25722 if ((last_error_injected_bit >= 6) &&
8767bebe46cSjc25722 (last_error_injected_bit <= 14)) {
8777bebe46cSjc25722 for (i = 0; i < PN_CACHE_NWAYS; i++)
8787bebe46cSjc25722 Lxcache_tag_data[i] ^=
8797bebe46cSjc25722 pattern;
8807bebe46cSjc25722 } else
8817bebe46cSjc25722 Lxcache_tag_data
8827bebe46cSjc25722 [last_error_injected_way] ^=
8837bebe46cSjc25722 pattern;
8847bebe46cSjc25722 }
885142c9f13Sbala #endif
8867bebe46cSjc25722 if (ddi_copyout((caddr_t)Lxcache_tag_data,
8877bebe46cSjc25722 (caddr_t)cache_info->datap,
8887bebe46cSjc25722 sizeof (Lxcache_tag_data), mode)
8897bebe46cSjc25722 != DDI_SUCCESS) {
8907bebe46cSjc25722 return (EFAULT);
8917bebe46cSjc25722 }
8927bebe46cSjc25722 break; /* end of READ_TAGS */
8937bebe46cSjc25722 default:
8947bebe46cSjc25722 return (ENOTSUP);
8957bebe46cSjc25722 } /* end if switch(cmd) */
8967bebe46cSjc25722 return (ret_val);
8977bebe46cSjc25722 }
8987bebe46cSjc25722
8997bebe46cSjc25722 /*ARGSUSED*/
9007bebe46cSjc25722 static int
mem_cache_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)9017bebe46cSjc25722 mem_cache_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
9027bebe46cSjc25722 int *rvalp)
9037bebe46cSjc25722 {
9047bebe46cSjc25722 int inst;
9057bebe46cSjc25722 struct mem_cache_softc *softc;
9067bebe46cSjc25722 cache_info_t cache_info;
9077bebe46cSjc25722 cache_info32_t cache_info32;
9087bebe46cSjc25722 int ret_val;
909142c9f13Sbala int is_panther;
9107bebe46cSjc25722
9117bebe46cSjc25722 inst = getminor(dev);
9127bebe46cSjc25722 if ((softc = getsoftc(inst)) == NULL)
9137bebe46cSjc25722 return (ENXIO);
9147bebe46cSjc25722
9157bebe46cSjc25722 mutex_enter(&softc->mutex);
9167bebe46cSjc25722
9177bebe46cSjc25722 #ifdef _MULTI_DATAMODEL
9187bebe46cSjc25722 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
9197bebe46cSjc25722 if (ddi_copyin((cache_info32_t *)arg, &cache_info32,
9207bebe46cSjc25722 sizeof (cache_info32), mode) != DDI_SUCCESS) {
9217bebe46cSjc25722 mutex_exit(&softc->mutex);
9227bebe46cSjc25722 return (EFAULT);
9237bebe46cSjc25722 }
9247bebe46cSjc25722 cache_info.cache = cache_info32.cache;
9257bebe46cSjc25722 cache_info.index = cache_info32.index;
9267bebe46cSjc25722 cache_info.way = cache_info32.way;
9277bebe46cSjc25722 cache_info.cpu_id = cache_info32.cpu_id;
9287bebe46cSjc25722 cache_info.bit = cache_info32.bit;
9297bebe46cSjc25722 cache_info.datap = (void *)((uint64_t)cache_info32.datap);
9307bebe46cSjc25722 } else
9317bebe46cSjc25722 #endif
9327bebe46cSjc25722 if (ddi_copyin((cache_info_t *)arg, &cache_info,
9337bebe46cSjc25722 sizeof (cache_info), mode) != DDI_SUCCESS) {
9347bebe46cSjc25722 mutex_exit(&softc->mutex);
9357bebe46cSjc25722 return (EFAULT);
9367bebe46cSjc25722 }
9371dbf84bbScb222892
93834a79eb7SChristopher Baumbauer - Sun Microsystems - San Diego United States if ((cache_info.cpu_id < 0) || (cache_info.cpu_id >= _ncpu)) {
9391dbf84bbScb222892 mutex_exit(&softc->mutex);
9401dbf84bbScb222892 return (EINVAL);
9411dbf84bbScb222892 }
942142c9f13Sbala is_panther = IS_PANTHER(cpunodes[cache_info.cpu_id].implementation);
943142c9f13Sbala if (!is_panther) {
944142c9f13Sbala mutex_exit(&softc->mutex);
945142c9f13Sbala return (ENOTSUP);
946142c9f13Sbala }
9477bebe46cSjc25722 switch (cmd) {
9487bebe46cSjc25722 case MEM_CACHE_RETIRE:
9497bebe46cSjc25722 case MEM_CACHE_UNRETIRE:
9507bebe46cSjc25722 if ((mode & FWRITE) == 0) {
9517bebe46cSjc25722 ret_val = EBADF;
9527bebe46cSjc25722 break;
9537bebe46cSjc25722 }
9547bebe46cSjc25722 /*FALLTHROUGH*/
9557bebe46cSjc25722 case MEM_CACHE_ISRETIRED:
9567bebe46cSjc25722 case MEM_CACHE_STATE:
9577bebe46cSjc25722 case MEM_CACHE_READ_TAGS:
958142c9f13Sbala #ifdef DEBUG
9597bebe46cSjc25722 case MEM_CACHE_READ_ERROR_INJECTED_TAGS:
960142c9f13Sbala #endif
9617bebe46cSjc25722 ret_val = mem_cache_ioctl_ops(cmd, mode, &cache_info);
9627bebe46cSjc25722 break;
9637bebe46cSjc25722 default:
9647bebe46cSjc25722 ret_val = ENOTSUP;
9657bebe46cSjc25722 break;
9667bebe46cSjc25722 }
9677bebe46cSjc25722 mutex_exit(&softc->mutex);
9687bebe46cSjc25722 return (ret_val);
9697bebe46cSjc25722 }
970