xref: /titanic_50/usr/src/uts/sun4u/io/mem_cache.c (revision 2a8d6eba033e4713ab12b61178f0513f1f075482)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 
27 /*
28  * Driver to retire/unretire L2/L3 cachelines on panther
29  */
30 #include <sys/types.h>
31 #include <sys/types32.h>
32 #include <sys/time.h>
33 #include <sys/errno.h>
34 #include <sys/cmn_err.h>
35 #include <sys/param.h>
36 #include <sys/modctl.h>
37 #include <sys/conf.h>
38 #include <sys/open.h>
39 #include <sys/stat.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 #include <sys/file.h>
43 #include <sys/cpuvar.h>
44 #include <sys/x_call.h>
45 #include <sys/cheetahregs.h>
46 #include <sys/mem_cache.h>
47 #include <sys/mem_cache_ioctl.h>
48 
49 extern int	retire_l2(uint64_t, uint64_t);
50 extern int	retire_l2_alternate(uint64_t, uint64_t);
51 extern int	unretire_l2(uint64_t, uint64_t);
52 extern int	unretire_l2_alternate(uint64_t, uint64_t);
53 extern int	retire_l3(uint64_t, uint64_t);
54 extern int	retire_l3_alternate(uint64_t, uint64_t);
55 extern int	unretire_l3(uint64_t, uint64_t);
56 extern int	unretire_l3_alternate(uint64_t, uint64_t);
57 
58 extern void	retire_l2_start(uint64_t, uint64_t);
59 extern void	retire_l2_end(uint64_t, uint64_t);
60 extern void	unretire_l2_start(uint64_t, uint64_t);
61 extern void	unretire_l2_end(uint64_t, uint64_t);
62 extern void	retire_l3_start(uint64_t, uint64_t);
63 extern void	retire_l3_end(uint64_t, uint64_t);
64 extern void	unretire_l3_start(uint64_t, uint64_t);
65 extern void	unretire_l3_end(uint64_t, uint64_t);
66 
67 extern void	get_ecache_dtags_tl1(uint64_t, ch_cpu_logout_t *);
68 extern void	get_l2_tag_tl1(uint64_t, uint64_t);
69 extern void	get_l3_tag_tl1(uint64_t, uint64_t);
70 
71 
72 /* Macro for putting 64-bit onto stack as two 32-bit ints */
73 #define	PRTF_64_TO_32(x)	(uint32_t)((x)>>32), (uint32_t)(x)
74 
75 
76 uint_t l2_flush_retries_done = 0;
77 int mem_cache_debug = 0x0;
78 uint64_t pattern = 0;
79 uint32_t retire_failures = 0;
80 uint32_t last_error_injected_way = 0;
81 uint8_t last_error_injected_bit = 0;
82 uint32_t last_l3tag_error_injected_way = 0;
83 uint8_t last_l3tag_error_injected_bit = 0;
84 uint32_t last_l2tag_error_injected_way = 0;
85 uint8_t last_l2tag_error_injected_bit = 0;
86 uint32_t last_l3data_error_injected_way = 0;
87 uint8_t last_l3data_error_injected_bit = 0;
88 uint32_t last_l2data_error_injected_way = 0;
89 uint8_t last_l2data_error_injected_bit = 0;
90 
91 /* dev_ops and cb_ops entry point function declarations */
92 static int	mem_cache_attach(dev_info_t *, ddi_attach_cmd_t);
93 static int	mem_cache_detach(dev_info_t *, ddi_detach_cmd_t);
94 static int	mem_cache_getinfo(dev_info_t *, ddi_info_cmd_t, void *,
95 				void **);
96 static int	mem_cache_open(dev_t *, int, int, cred_t *);
97 static int	mem_cache_close(dev_t, int, int, cred_t *);
98 static int	mem_cache_ioctl_ops(int, int, cache_info_t *);
99 static int	mem_cache_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
100 
101 struct cb_ops mem_cache_cb_ops = {
102 	mem_cache_open,
103 	mem_cache_close,
104 	nodev,
105 	nodev,
106 	nodev,			/* dump */
107 	nodev,
108 	nodev,
109 	mem_cache_ioctl,
110 	nodev,			/* devmap */
111 	nodev,
112 	ddi_segmap,		/* segmap */
113 	nochpoll,
114 	ddi_prop_op,
115 	NULL,			/* for STREAMS drivers */
116 	D_NEW | D_MP		/* driver compatibility flag */
117 };
118 
119 static struct dev_ops mem_cache_dev_ops = {
120 	DEVO_REV,		/* driver build version */
121 	0,			/* device reference count */
122 	mem_cache_getinfo,
123 	nulldev,
124 	nulldev,		/* probe */
125 	mem_cache_attach,
126 	mem_cache_detach,
127 	nulldev,		/* reset */
128 	&mem_cache_cb_ops,
129 	(struct bus_ops *)NULL,
130 	nulldev,		/* power */
131 	ddi_quiesce_not_needed,		/* quiesce */
132 };
133 
134 /*
135  * Soft state
136  */
137 struct mem_cache_softc {
138 	dev_info_t	*dip;
139 	kmutex_t	mutex;
140 };
141 #define	getsoftc(inst)	((struct mem_cache_softc *)ddi_get_soft_state(statep,\
142 			(inst)))
143 
144 /* module configuration stuff */
145 static void *statep;
146 extern struct mod_ops mod_driverops;
147 
148 static struct modldrv modldrv = {
149 	&mod_driverops,
150 	"mem_cache_driver (08/01/30) ",
151 	&mem_cache_dev_ops
152 };
153 
154 static struct modlinkage modlinkage = {
155 	MODREV_1,
156 	&modldrv,
157 	0
158 };
159 
160 extern const int _ncpu; /* Pull the kernel's global _ncpu definition */
161 
162 int
163 _init(void)
164 {
165 	int e;
166 
167 	if (e = ddi_soft_state_init(&statep, sizeof (struct mem_cache_softc),
168 	    MAX_MEM_CACHE_INSTANCES)) {
169 		return (e);
170 	}
171 
172 	if ((e = mod_install(&modlinkage)) != 0)
173 		ddi_soft_state_fini(&statep);
174 
175 	return (e);
176 }
177 
178 int
179 _fini(void)
180 {
181 	int e;
182 
183 	if ((e = mod_remove(&modlinkage)) != 0)
184 		return (e);
185 
186 	ddi_soft_state_fini(&statep);
187 
188 	return (DDI_SUCCESS);
189 }
190 
191 int
192 _info(struct modinfo *modinfop)
193 {
194 	return (mod_info(&modlinkage, modinfop));
195 }
196 
197 /*ARGSUSED*/
198 static int
199 mem_cache_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
200 {
201 	int	inst;
202 	int	retval = DDI_SUCCESS;
203 	struct mem_cache_softc *softc;
204 
205 	inst = getminor((dev_t)arg);
206 
207 	switch (cmd) {
208 	case DDI_INFO_DEVT2DEVINFO:
209 		if ((softc = getsoftc(inst)) == NULL) {
210 			*result = (void *)NULL;
211 			retval = DDI_FAILURE;
212 		} else
213 			*result = (void *)softc->dip;
214 		break;
215 
216 	case DDI_INFO_DEVT2INSTANCE:
217 		*result = (void *)((uintptr_t)inst);
218 		break;
219 
220 	default:
221 		retval = DDI_FAILURE;
222 	}
223 
224 	return (retval);
225 }
226 
227 static int
228 mem_cache_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
229 {
230 	int inst;
231 	struct mem_cache_softc *softc = NULL;
232 	char name[80];
233 
234 	switch (cmd) {
235 	case DDI_ATTACH:
236 		inst = ddi_get_instance(dip);
237 		if (inst >= MAX_MEM_CACHE_INSTANCES) {
238 			cmn_err(CE_WARN, "attach failed, too many instances\n");
239 			return (DDI_FAILURE);
240 		}
241 		(void) sprintf(name, MEM_CACHE_DRIVER_NAME"%d", inst);
242 		if (ddi_create_priv_minor_node(dip, name,
243 		    S_IFCHR,
244 		    inst,
245 		    DDI_PSEUDO,
246 		    0, NULL, "all", 0640) ==
247 		    DDI_FAILURE) {
248 			ddi_remove_minor_node(dip, NULL);
249 			return (DDI_FAILURE);
250 		}
251 
252 		/* Allocate a soft state structure for this instance */
253 		if (ddi_soft_state_zalloc(statep, inst) != DDI_SUCCESS) {
254 			cmn_err(CE_WARN, " ddi_soft_state_zalloc() failed "
255 			    "for inst %d\n", inst);
256 			goto attach_failed;
257 		}
258 
259 		/* Setup soft state */
260 		softc = getsoftc(inst);
261 		softc->dip = dip;
262 		mutex_init(&softc->mutex, NULL, MUTEX_DRIVER, NULL);
263 
264 		/* Create main environmental node */
265 		ddi_report_dev(dip);
266 
267 		return (DDI_SUCCESS);
268 
269 	case DDI_RESUME:
270 		return (DDI_SUCCESS);
271 
272 	default:
273 		return (DDI_FAILURE);
274 	}
275 
276 attach_failed:
277 
278 	/* Free soft state, if allocated. remove minor node if added earlier */
279 	if (softc)
280 		ddi_soft_state_free(statep, inst);
281 
282 	ddi_remove_minor_node(dip, NULL);
283 
284 	return (DDI_FAILURE);
285 }
286 
287 static int
288 mem_cache_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
289 {
290 	int inst;
291 	struct mem_cache_softc *softc;
292 
293 	switch (cmd) {
294 	case DDI_DETACH:
295 		inst = ddi_get_instance(dip);
296 		if ((softc = getsoftc(inst)) == NULL)
297 			return (ENXIO);
298 
299 		/* Free the soft state and remove minor node added earlier */
300 		mutex_destroy(&softc->mutex);
301 		ddi_soft_state_free(statep, inst);
302 		ddi_remove_minor_node(dip, NULL);
303 		return (DDI_SUCCESS);
304 
305 	case DDI_SUSPEND:
306 		return (DDI_SUCCESS);
307 
308 	default:
309 		return (DDI_FAILURE);
310 	}
311 }
312 
313 /*ARGSUSED*/
314 static int
315 mem_cache_open(dev_t *devp, int flag, int otyp, cred_t *credp)
316 {
317 	int	inst = getminor(*devp);
318 
319 	return (getsoftc(inst) == NULL ? ENXIO : 0);
320 }
321 
322 /*ARGSUSED*/
323 static int
324 mem_cache_close(dev_t dev, int flag, int otyp, cred_t *credp)
325 {
326 	int	inst = getminor(dev);
327 
328 	return (getsoftc(inst) == NULL ? ENXIO : 0);
329 }
330 
331 static char *tstate_to_desc[] = {
332 	"Invalid",			/* 0 */
333 	"Shared",			/* 1 */
334 	"Exclusive",			/* 2 */
335 	"Owner",			/* 3 */
336 	"Modified",			/* 4 */
337 	"NA",				/* 5 */
338 	"Owner/Shared",			/* 6 */
339 	"Reserved(7)",			/* 7 */
340 };
341 
342 static char *
343 tag_state_to_desc(uint8_t tagstate)
344 {
345 	return (tstate_to_desc[tagstate & CH_ECSTATE_MASK]);
346 }
347 
348 void
349 print_l2_tag(uint64_t tag_addr, uint64_t l2_tag)
350 {
351 	uint64_t l2_subaddr;
352 	uint8_t	l2_state;
353 
354 	l2_subaddr = PN_L2TAG_TO_PA(l2_tag);
355 	l2_subaddr |= (tag_addr & PN_L2_INDEX_MASK);
356 
357 	l2_state = (l2_tag & CH_ECSTATE_MASK);
358 	cmn_err(CE_CONT,
359 	    "PA=0x%08x.%08x E$tag 0x%08x.%08x E$state %s\n",
360 	    PRTF_64_TO_32(l2_subaddr),
361 	    PRTF_64_TO_32(l2_tag),
362 	    tag_state_to_desc(l2_state));
363 }
364 
365 void
366 print_l2cache_line(ch_cpu_logout_t *clop)
367 {
368 	uint64_t l2_subaddr;
369 	int i, offset;
370 	uint8_t	way, l2_state;
371 	ch_ec_data_t *ecp;
372 
373 
374 	for (way = 0; way < PN_CACHE_NWAYS; way++) {
375 		ecp = &clop->clo_data.chd_l2_data[way];
376 		l2_subaddr = PN_L2TAG_TO_PA(ecp->ec_tag);
377 		l2_subaddr |= (ecp->ec_idx & PN_L2_INDEX_MASK);
378 
379 		l2_state = (ecp->ec_tag & CH_ECSTATE_MASK);
380 		cmn_err(CE_CONT,
381 		    "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
382 		    "E$tag 0x%08x.%08x E$state %s",
383 		    way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(l2_subaddr),
384 		    PRTF_64_TO_32(ecp->ec_tag),
385 		    tag_state_to_desc(l2_state));
386 		/*
387 		 * Dump out Ecache subblock data captured.
388 		 * For Cheetah, we need to compute the ECC for each 16-byte
389 		 * chunk and compare it with the captured chunk ECC to figure
390 		 * out which chunk is bad.
391 		 */
392 		for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
393 			ec_data_elm_t *ecdptr;
394 			uint64_t d_low, d_high;
395 			uint32_t ecc;
396 			int l2_data_idx = (i/2);
397 
398 			offset = i * 16;
399 			ecdptr = &clop->clo_data.chd_l2_data[way].ec_data
400 			    [l2_data_idx];
401 			if ((i & 1) == 0) {
402 				ecc = (ecdptr->ec_eccd >> 9) & 0x1ff;
403 				d_high = ecdptr->ec_d8[0];
404 				d_low  = ecdptr->ec_d8[1];
405 			} else {
406 				ecc = ecdptr->ec_eccd & 0x1ff;
407 				d_high = ecdptr->ec_d8[2];
408 				d_low  = ecdptr->ec_d8[3];
409 			}
410 
411 			cmn_err(CE_CONT,
412 			    "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
413 			    " ECC 0x%03x",
414 			    offset, PRTF_64_TO_32(d_high),
415 			    PRTF_64_TO_32(d_low), ecc);
416 		}
417 	}	/* end of for way loop */
418 }
419 
420 void
421 print_ecache_line(ch_cpu_logout_t *clop)
422 {
423 	uint64_t ec_subaddr;
424 	int i, offset;
425 	uint8_t	way, ec_state;
426 	ch_ec_data_t *ecp;
427 
428 
429 	for (way = 0; way < PN_CACHE_NWAYS; way++) {
430 		ecp = &clop->clo_data.chd_ec_data[way];
431 		ec_subaddr = PN_L3TAG_TO_PA(ecp->ec_tag);
432 		ec_subaddr |= (ecp->ec_idx & PN_L3_TAG_RD_MASK);
433 
434 		ec_state = (ecp->ec_tag & CH_ECSTATE_MASK);
435 		cmn_err(CE_CONT,
436 		    "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
437 		    "E$tag 0x%08x.%08x E$state %s",
438 		    way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(ec_subaddr),
439 		    PRTF_64_TO_32(ecp->ec_tag),
440 		    tag_state_to_desc(ec_state));
441 		/*
442 		 * Dump out Ecache subblock data captured.
443 		 * For Cheetah, we need to compute the ECC for each 16-byte
444 		 * chunk and compare it with the captured chunk ECC to figure
445 		 * out which chunk is bad.
446 		 */
447 		for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
448 			ec_data_elm_t *ecdptr;
449 			uint64_t d_low, d_high;
450 			uint32_t ecc;
451 			int ec_data_idx = (i/2);
452 
453 			offset = i * 16;
454 			ecdptr =
455 			    &clop->clo_data.chd_ec_data[way].ec_data
456 			    [ec_data_idx];
457 			if ((i & 1) == 0) {
458 				ecc = (ecdptr->ec_eccd >> 9) & 0x1ff;
459 				d_high = ecdptr->ec_d8[0];
460 				d_low  = ecdptr->ec_d8[1];
461 			} else {
462 				ecc = ecdptr->ec_eccd & 0x1ff;
463 				d_high = ecdptr->ec_d8[2];
464 				d_low  = ecdptr->ec_d8[3];
465 			}
466 
467 			cmn_err(CE_CONT,
468 			    "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
469 			    " ECC 0x%03x",
470 			    offset, PRTF_64_TO_32(d_high),
471 			    PRTF_64_TO_32(d_low), ecc);
472 		}
473 	}
474 }
475 
476 static boolean_t
477 tag_addr_collides(uint64_t tag_addr, cache_id_t type,
478     retire_func_t start_of_func, retire_func_t end_of_func)
479 {
480 	uint64_t start_paddr, end_paddr;
481 	char *type_str;
482 
483 	start_paddr = va_to_pa((void *)start_of_func);
484 	end_paddr = va_to_pa((void *)end_of_func);
485 	switch (type) {
486 		case L2_CACHE_TAG:
487 		case L2_CACHE_DATA:
488 			tag_addr &= PN_L2_INDEX_MASK;
489 			start_paddr &= PN_L2_INDEX_MASK;
490 			end_paddr &= PN_L2_INDEX_MASK;
491 			type_str = "L2:";
492 			break;
493 		case L3_CACHE_TAG:
494 		case L3_CACHE_DATA:
495 			tag_addr &= PN_L3_TAG_RD_MASK;
496 			start_paddr &= PN_L3_TAG_RD_MASK;
497 			end_paddr &= PN_L3_TAG_RD_MASK;
498 			type_str = "L3:";
499 			break;
500 		default:
501 			/*
502 			 * Should never reach here.
503 			 */
504 			ASSERT(0);
505 			return (B_FALSE);
506 	}
507 	if ((tag_addr > (start_paddr - 0x100)) &&
508 	    (tag_addr < (end_paddr + 0x100))) {
509 		if (mem_cache_debug & 0x1)
510 			cmn_err(CE_CONT,
511 			    "%s collision detected tag_addr = 0x%08x"
512 			    " start_paddr = 0x%08x end_paddr = 0x%08x\n",
513 			    type_str, (uint32_t)tag_addr, (uint32_t)start_paddr,
514 			    (uint32_t)end_paddr);
515 		return (B_TRUE);
516 	}
517 	else
518 		return (B_FALSE);
519 }
520 
521 static uint64_t
522 get_tag_addr(cache_info_t *cache_info)
523 {
524 	uint64_t tag_addr, scratch;
525 
526 	switch (cache_info->cache) {
527 		case L2_CACHE_TAG:
528 		case L2_CACHE_DATA:
529 			tag_addr = (uint64_t)(cache_info->index <<
530 			    PN_CACHE_LINE_SHIFT);
531 			scratch = (uint64_t)(cache_info->way <<
532 			    PN_L2_WAY_SHIFT);
533 			tag_addr |= scratch;
534 			tag_addr |= PN_L2_IDX_HW_ECC_EN;
535 			break;
536 		case L3_CACHE_TAG:
537 		case L3_CACHE_DATA:
538 			tag_addr = (uint64_t)(cache_info->index <<
539 			    PN_CACHE_LINE_SHIFT);
540 			scratch = (uint64_t)(cache_info->way <<
541 			    PN_L3_WAY_SHIFT);
542 			tag_addr |= scratch;
543 			tag_addr |= PN_L3_IDX_HW_ECC_EN;
544 			break;
545 		default:
546 			/*
547 			 * Should never reach here.
548 			 */
549 			ASSERT(0);
550 			return (uint64_t)(0);
551 	}
552 	return (tag_addr);
553 }
554 
555 static int
556 mem_cache_ioctl_ops(int cmd, int mode, cache_info_t *cache_info)
557 {
558 	int	ret_val = 0;
559 	uint64_t afar, tag_addr;
560 	ch_cpu_logout_t clop;
561 	uint64_t Lxcache_tag_data[PN_CACHE_NWAYS];
562 	int	i, retire_retry_count;
563 	cpu_t	*cpu;
564 	uint64_t tag_data;
565 	uint8_t state;
566 
567 	switch (cache_info->cache) {
568 		case L2_CACHE_TAG:
569 		case L2_CACHE_DATA:
570 			if (cache_info->way >= PN_CACHE_NWAYS)
571 				return (EINVAL);
572 			if (cache_info->index >=
573 			    (PN_L2_SET_SIZE/PN_L2_LINESIZE))
574 				return (EINVAL);
575 			break;
576 		case L3_CACHE_TAG:
577 		case L3_CACHE_DATA:
578 			if (cache_info->way >= PN_CACHE_NWAYS)
579 				return (EINVAL);
580 			if (cache_info->index >=
581 			    (PN_L3_SET_SIZE/PN_L3_LINESIZE))
582 				return (EINVAL);
583 			break;
584 		default:
585 			return (ENOTSUP);
586 	}
587 	/*
588 	 * Check if we have a valid cpu ID and that
589 	 * CPU is ONLINE.
590 	 */
591 	mutex_enter(&cpu_lock);
592 	cpu = cpu_get(cache_info->cpu_id);
593 	if ((cpu == NULL) || (!cpu_is_online(cpu))) {
594 		mutex_exit(&cpu_lock);
595 		return (EINVAL);
596 	}
597 	mutex_exit(&cpu_lock);
598 	switch (cmd) {
599 		case MEM_CACHE_RETIRE:
600 			if ((cache_info->bit & MSB_BIT_MASK) ==
601 			    MSB_BIT_MASK) {
602 				pattern = ((uint64_t)1 <<
603 				    (cache_info->bit & TAG_BIT_MASK));
604 			} else {
605 				pattern = 0;
606 			}
607 			tag_addr = get_tag_addr(cache_info);
608 			pattern |= PN_ECSTATE_NA;
609 			retire_retry_count = 0;
610 			affinity_set(cache_info->cpu_id);
611 			switch (cache_info->cache) {
612 				case L2_CACHE_DATA:
613 				case L2_CACHE_TAG:
614 retry_l2_retire:
615 					if (tag_addr_collides(tag_addr,
616 					    cache_info->cache,
617 					    retire_l2_start, retire_l2_end))
618 						ret_val =
619 						    retire_l2_alternate(
620 						    tag_addr, pattern);
621 					else
622 						ret_val = retire_l2(tag_addr,
623 						    pattern);
624 					if (ret_val == 1) {
625 						/*
626 						 * cacheline was in retired
627 						 * STATE already.
628 						 * so return success.
629 						 */
630 						ret_val = 0;
631 					}
632 					if (ret_val < 0) {
633 						cmn_err(CE_WARN,
634 		"retire_l2() failed. index = 0x%x way %d. Retrying...\n",
635 						    cache_info->index,
636 						    cache_info->way);
637 						if (retire_retry_count >= 2) {
638 							retire_failures++;
639 							affinity_clear();
640 							return (EIO);
641 						}
642 						retire_retry_count++;
643 						goto retry_l2_retire;
644 					}
645 					if (ret_val == 2)
646 						l2_flush_retries_done++;
647 			/*
648 			 * We bind ourself to a CPU and send cross trap to
649 			 * ourself. On return from xt_one we can rely on the
650 			 * data in tag_data being filled in. Normally one would
651 			 * do a xt_sync to make sure that the CPU has completed
652 			 * the cross trap call xt_one.
653 			 */
654 					xt_one(cache_info->cpu_id,
655 					    (xcfunc_t *)(get_l2_tag_tl1),
656 					    tag_addr, (uint64_t)(&tag_data));
657 					state = tag_data & CH_ECSTATE_MASK;
658 					if (state != PN_ECSTATE_NA) {
659 						retire_failures++;
660 						print_l2_tag(tag_addr,
661 						    tag_data);
662 						cmn_err(CE_WARN,
663 		"L2 RETIRE:failed for index 0x%x way %d. Retrying...\n",
664 						    cache_info->index,
665 						    cache_info->way);
666 						if (retire_retry_count >= 2) {
667 							retire_failures++;
668 							affinity_clear();
669 							return (EIO);
670 						}
671 						retire_retry_count++;
672 						goto retry_l2_retire;
673 					}
674 					break;
675 				case L3_CACHE_TAG:
676 				case L3_CACHE_DATA:
677 					if (tag_addr_collides(tag_addr,
678 					    cache_info->cache,
679 					    retire_l3_start, retire_l3_end))
680 						ret_val =
681 						    retire_l3_alternate(
682 						    tag_addr, pattern);
683 					else
684 						ret_val = retire_l3(tag_addr,
685 						    pattern);
686 					if (ret_val == 1) {
687 						/*
688 						 * cacheline was in retired
689 						 * STATE already.
690 						 * so return success.
691 						 */
692 						ret_val = 0;
693 					}
694 					if (ret_val < 0) {
695 						cmn_err(CE_WARN,
696 			"retire_l3() failed. ret_val = %d index = 0x%x\n",
697 						    ret_val,
698 						    cache_info->index);
699 						retire_failures++;
700 						affinity_clear();
701 						return (EIO);
702 					}
703 			/*
704 			 * We bind ourself to a CPU and send cross trap to
705 			 * ourself. On return from xt_one we can rely on the
706 			 * data in tag_data being filled in. Normally one would
707 			 * do a xt_sync to make sure that the CPU has completed
708 			 * the cross trap call xt_one.
709 			 */
710 					xt_one(cache_info->cpu_id,
711 					    (xcfunc_t *)(get_l3_tag_tl1),
712 					    tag_addr, (uint64_t)(&tag_data));
713 					state = tag_data & CH_ECSTATE_MASK;
714 					if (state != PN_ECSTATE_NA) {
715 						cmn_err(CE_WARN,
716 					"L3 RETIRE failed for index 0x%x\n",
717 						    cache_info->index);
718 						retire_failures++;
719 						affinity_clear();
720 						return (EIO);
721 					}
722 
723 					break;
724 			}
725 			affinity_clear();
726 			break;
727 		case MEM_CACHE_UNRETIRE:
728 			tag_addr = get_tag_addr(cache_info);
729 			pattern = PN_ECSTATE_INV;
730 			affinity_set(cache_info->cpu_id);
731 			switch (cache_info->cache) {
732 				case L2_CACHE_DATA:
733 				case L2_CACHE_TAG:
734 					/*
735 					 * Check if the index/way is in NA state
736 					 */
737 			/*
738 			 * We bind ourself to a CPU and send cross trap to
739 			 * ourself. On return from xt_one we can rely on the
740 			 * data in tag_data being filled in. Normally one would
741 			 * do a xt_sync to make sure that the CPU has completed
742 			 * the cross trap call xt_one.
743 			 */
744 					xt_one(cache_info->cpu_id,
745 					    (xcfunc_t *)(get_l2_tag_tl1),
746 					    tag_addr, (uint64_t)(&tag_data));
747 					state = tag_data & CH_ECSTATE_MASK;
748 					if (state != PN_ECSTATE_NA) {
749 						affinity_clear();
750 						return (EINVAL);
751 					}
752 					if (tag_addr_collides(tag_addr,
753 					    cache_info->cache,
754 					    unretire_l2_start, unretire_l2_end))
755 						ret_val =
756 						    unretire_l2_alternate(
757 						    tag_addr, pattern);
758 					else
759 						ret_val =
760 						    unretire_l2(tag_addr,
761 						    pattern);
762 					if (ret_val != 0) {
763 						cmn_err(CE_WARN,
764 			"unretire_l2() failed. ret_val = %d index = 0x%x\n",
765 						    ret_val,
766 						    cache_info->index);
767 						retire_failures++;
768 						affinity_clear();
769 						return (EIO);
770 					}
771 					break;
772 				case L3_CACHE_TAG:
773 				case L3_CACHE_DATA:
774 					/*
775 					 * Check if the index/way is in NA state
776 					 */
777 			/*
778 			 * We bind ourself to a CPU and send cross trap to
779 			 * ourself. On return from xt_one we can rely on the
780 			 * data in tag_data being filled in. Normally one would
781 			 * do a xt_sync to make sure that the CPU has completed
782 			 * the cross trap call xt_one.
783 			 */
784 					xt_one(cache_info->cpu_id,
785 					    (xcfunc_t *)(get_l3_tag_tl1),
786 					    tag_addr, (uint64_t)(&tag_data));
787 					state = tag_data & CH_ECSTATE_MASK;
788 					if (state != PN_ECSTATE_NA) {
789 						affinity_clear();
790 						return (EINVAL);
791 					}
792 					if (tag_addr_collides(tag_addr,
793 					    cache_info->cache,
794 					    unretire_l3_start, unretire_l3_end))
795 						ret_val =
796 						    unretire_l3_alternate(
797 						    tag_addr, pattern);
798 					else
799 						ret_val =
800 						    unretire_l3(tag_addr,
801 						    pattern);
802 					if (ret_val != 0) {
803 						cmn_err(CE_WARN,
804 			"unretire_l3() failed. ret_val = %d index = 0x%x\n",
805 						    ret_val,
806 						    cache_info->index);
807 						affinity_clear();
808 						return (EIO);
809 					}
810 					break;
811 			}
812 			affinity_clear();
813 			break;
814 		case MEM_CACHE_ISRETIRED:
815 		case MEM_CACHE_STATE:
816 			return (ENOTSUP);
817 		case MEM_CACHE_READ_TAGS:
818 #ifdef DEBUG
819 		case MEM_CACHE_READ_ERROR_INJECTED_TAGS:
820 #endif
821 			/*
822 			 * Read tag and data for all the ways at a given afar
823 			 */
824 			afar = (uint64_t)(cache_info->index
825 			    << PN_CACHE_LINE_SHIFT);
826 			affinity_set(cache_info->cpu_id);
827 			/*
828 			 * We bind ourself to a CPU and send cross trap to
829 			 * ourself. On return from xt_one we can rely on the
830 			 * data in clop being filled in. Normally one would
831 			 * do a xt_sync to make sure that the CPU has completed
832 			 * the cross trap call xt_one.
833 			 */
834 			xt_one(cache_info->cpu_id,
835 			    (xcfunc_t *)(get_ecache_dtags_tl1),
836 			    afar, (uint64_t)(&clop));
837 			switch (cache_info->cache) {
838 				case L2_CACHE_TAG:
839 					for (i = 0; i < PN_CACHE_NWAYS; i++) {
840 						Lxcache_tag_data[i] =
841 						    clop.clo_data.chd_l2_data
842 						    [i].ec_tag;
843 					}
844 					last_error_injected_bit =
845 					    last_l2tag_error_injected_bit;
846 					last_error_injected_way =
847 					    last_l2tag_error_injected_way;
848 					break;
849 				case L3_CACHE_TAG:
850 					for (i = 0; i < PN_CACHE_NWAYS; i++) {
851 						Lxcache_tag_data[i] =
852 						    clop.clo_data.chd_ec_data
853 						    [i].ec_tag;
854 					}
855 					last_error_injected_bit =
856 					    last_l3tag_error_injected_bit;
857 					last_error_injected_way =
858 					    last_l3tag_error_injected_way;
859 					break;
860 				default:
861 					affinity_clear();
862 					return (ENOTSUP);
863 			}	/* end if switch(cache) */
864 #ifdef DEBUG
865 			if (cmd == MEM_CACHE_READ_ERROR_INJECTED_TAGS) {
866 				pattern = ((uint64_t)1 <<
867 				    last_error_injected_bit);
868 				/*
869 				 * If error bit is ECC we need to make sure
870 				 * ECC on all all WAYS are corrupted.
871 				 */
872 				if ((last_error_injected_bit >= 6) &&
873 				    (last_error_injected_bit <= 14)) {
874 					for (i = 0; i < PN_CACHE_NWAYS; i++)
875 						Lxcache_tag_data[i] ^=
876 						    pattern;
877 				} else
878 					Lxcache_tag_data
879 					    [last_error_injected_way] ^=
880 					    pattern;
881 			}
882 #endif
883 			if (ddi_copyout((caddr_t)Lxcache_tag_data,
884 			    (caddr_t)cache_info->datap,
885 			    sizeof (Lxcache_tag_data), mode)
886 			    != DDI_SUCCESS) {
887 				affinity_clear();
888 				return (EFAULT);
889 			}
890 			affinity_clear();
891 			break;	/* end of READ_TAGS */
892 		default:
893 			return (ENOTSUP);
894 	}	/* end if switch(cmd) */
895 	return (ret_val);
896 }
897 
898 /*ARGSUSED*/
899 static int
900 mem_cache_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
901 		int *rvalp)
902 {
903 	int	inst;
904 	struct mem_cache_softc *softc;
905 	cache_info_t	cache_info;
906 	cache_info32_t	cache_info32;
907 	int	ret_val;
908 	int	is_panther;
909 
910 	inst = getminor(dev);
911 	if ((softc = getsoftc(inst)) == NULL)
912 		return (ENXIO);
913 
914 	mutex_enter(&softc->mutex);
915 
916 #ifdef _MULTI_DATAMODEL
917 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
918 		if (ddi_copyin((cache_info32_t *)arg, &cache_info32,
919 		    sizeof (cache_info32), mode) != DDI_SUCCESS) {
920 			mutex_exit(&softc->mutex);
921 			return (EFAULT);
922 		}
923 		cache_info.cache = cache_info32.cache;
924 		cache_info.index = cache_info32.index;
925 		cache_info.way = cache_info32.way;
926 		cache_info.cpu_id = cache_info32.cpu_id;
927 		cache_info.bit = cache_info32.bit;
928 		cache_info.datap = (void *)((uint64_t)cache_info32.datap);
929 	} else
930 #endif
931 	if (ddi_copyin((cache_info_t *)arg, &cache_info,
932 	    sizeof (cache_info), mode) != DDI_SUCCESS) {
933 		mutex_exit(&softc->mutex);
934 		return (EFAULT);
935 	}
936 
937 	if ((cache_info.cpu_id < 0) || (cache_info.cpu_id >= _ncpu)) {
938 		mutex_exit(&softc->mutex);
939 		return (EINVAL);
940 	}
941 
942 	is_panther = IS_PANTHER(cpunodes[cache_info.cpu_id].implementation);
943 	if (!is_panther) {
944 		mutex_exit(&softc->mutex);
945 		return (ENOTSUP);
946 	}
947 	switch (cmd) {
948 		case MEM_CACHE_RETIRE:
949 		case MEM_CACHE_UNRETIRE:
950 			if ((mode & FWRITE) == 0) {
951 				ret_val = EBADF;
952 				break;
953 			}
954 		/*FALLTHROUGH*/
955 		case MEM_CACHE_ISRETIRED:
956 		case MEM_CACHE_STATE:
957 		case MEM_CACHE_READ_TAGS:
958 #ifdef DEBUG
959 		case MEM_CACHE_READ_ERROR_INJECTED_TAGS:
960 #endif
961 			ret_val =  mem_cache_ioctl_ops(cmd, mode, &cache_info);
962 			break;
963 		default:
964 			ret_val = ENOTSUP;
965 			break;
966 	}
967 	mutex_exit(&softc->mutex);
968 	return (ret_val);
969 }
970