xref: /titanic_52/usr/src/uts/sun4u/io/mem_cache.c (revision 6009dbc6db3fe0e2890761d3d562bb4c4b9bf0be)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Driver to retire/unretire L2/L3 cachelines on panther
30  */
31 #include <sys/types.h>
32 #include <sys/types32.h>
33 #include <sys/time.h>
34 #include <sys/errno.h>
35 #include <sys/cmn_err.h>
36 #include <sys/param.h>
37 #include <sys/modctl.h>
38 #include <sys/conf.h>
39 #include <sys/open.h>
40 #include <sys/stat.h>
41 #include <sys/ddi.h>
42 #include <sys/sunddi.h>
43 #include <sys/file.h>
44 #include <sys/cpuvar.h>
45 #include <sys/x_call.h>
46 #include <sys/cheetahregs.h>
47 #include <sys/mem_cache.h>
48 #include <sys/mem_cache_ioctl.h>
49 
50 extern int	retire_l2(uint64_t, uint64_t);
51 extern int	retire_l2_alternate(uint64_t, uint64_t);
52 extern int	unretire_l2(uint64_t, uint64_t);
53 extern int	unretire_l2_alternate(uint64_t, uint64_t);
54 extern int	retire_l3(uint64_t, uint64_t);
55 extern int	retire_l3_alternate(uint64_t, uint64_t);
56 extern int	unretire_l3(uint64_t, uint64_t);
57 extern int	unretire_l3_alternate(uint64_t, uint64_t);
58 
59 extern void	rw_physical_addr(uint64_t, uint64_t);
60 extern void	casxa_physical_addr(uint64_t, uint64_t);
61 extern void	read_from_physical_addr(uint64_t, uint64_t, uint64_t);
62 
63 extern void	retire_l2_start(uint64_t, uint64_t);
64 extern void	retire_l2_end(uint64_t, uint64_t);
65 extern void	unretire_l2_start(uint64_t, uint64_t);
66 extern void	unretire_l2_end(uint64_t, uint64_t);
67 extern void	retire_l3_start(uint64_t, uint64_t);
68 extern void	retire_l3_end(uint64_t, uint64_t);
69 extern void	unretire_l3_start(uint64_t, uint64_t);
70 extern void	unretire_l3_end(uint64_t, uint64_t);
71 
72 extern void	get_ecache_dtags_tl1(uint64_t, ch_cpu_logout_t *);
73 extern uint64_t	get_l2_tag_tl1(uint64_t, uint64_t);
74 extern uint64_t	get_l3_tag_tl1(uint64_t, uint64_t);
75 
76 
77 /* Macro for putting 64-bit onto stack as two 32-bit ints */
78 #define	PRTF_64_TO_32(x)	(uint32_t)((x)>>32), (uint32_t)(x)
79 
80 
81 uint_t l2_flush_retries_done = 0;
82 int mem_cache_debug = 0x0;
83 uint64_t pattern = 0;
84 uint32_t retire_failures = 0;
85 uint32_t last_error_injected_way = 0;
86 uint8_t last_error_injected_bit = 0;
87 uint32_t last_l3tag_error_injected_way = 0;
88 uint8_t last_l3tag_error_injected_bit = 0;
89 uint32_t last_l2tag_error_injected_way = 0;
90 uint8_t last_l2tag_error_injected_bit = 0;
91 uint32_t last_l3data_error_injected_way = 0;
92 uint8_t last_l3data_error_injected_bit = 0;
93 uint32_t last_l2data_error_injected_way = 0;
94 uint8_t last_l2data_error_injected_bit = 0;
95 
96 /* dev_ops and cb_ops entry point function declarations */
97 static int	mem_cache_attach(dev_info_t *, ddi_attach_cmd_t);
98 static int	mem_cache_detach(dev_info_t *, ddi_detach_cmd_t);
99 static int	mem_cache_getinfo(dev_info_t *, ddi_info_cmd_t, void *,
100 				void **);
101 static int	mem_cache_open(dev_t *, int, int, cred_t *);
102 static int	mem_cache_close(dev_t, int, int, cred_t *);
103 static int	mem_cache_ioctl_ops(int, int, cache_info_t *);
104 static int	mem_cache_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
105 
106 struct cb_ops mem_cache_cb_ops = {
107 	mem_cache_open,
108 	mem_cache_close,
109 	nodev,
110 	nodev,
111 	nodev,			/* dump */
112 	nodev,
113 	nodev,
114 	mem_cache_ioctl,
115 	nodev,			/* devmap */
116 	nodev,
117 	ddi_segmap,		/* segmap */
118 	nochpoll,
119 	ddi_prop_op,
120 	NULL,			/* for STREAMS drivers */
121 	D_NEW | D_MP		/* driver compatibility flag */
122 };
123 
124 static struct dev_ops mem_cache_dev_ops = {
125 	DEVO_REV,		/* driver build version */
126 	0,			/* device reference count */
127 	mem_cache_getinfo,
128 	nulldev,
129 	nulldev,		/* probe */
130 	mem_cache_attach,
131 	mem_cache_detach,
132 	nulldev,		/* reset */
133 	&mem_cache_cb_ops,
134 	(struct bus_ops *)NULL,
135 	nulldev			/* power */
136 };
137 
138 /*
139  * Soft state
140  */
141 struct mem_cache_softc {
142 	dev_info_t	*dip;
143 	kmutex_t	mutex;
144 };
145 #define	getsoftc(inst)	((struct mem_cache_softc *)ddi_get_soft_state(statep,\
146 			(inst)))
147 
148 /* module configuration stuff */
149 static void *statep;
150 extern struct mod_ops mod_driverops;
151 
152 static struct modldrv modldrv = {
153 	&mod_driverops,
154 	"mem_cache_driver (08/01/30) ",
155 	&mem_cache_dev_ops
156 };
157 
158 static struct modlinkage modlinkage = {
159 	MODREV_1,
160 	&modldrv,
161 	0
162 };
163 
164 int
165 _init(void)
166 {
167 	int e;
168 
169 	if (e = ddi_soft_state_init(&statep, sizeof (struct mem_cache_softc),
170 	    MAX_MEM_CACHE_INSTANCES)) {
171 		return (e);
172 	}
173 
174 	if ((e = mod_install(&modlinkage)) != 0)
175 		ddi_soft_state_fini(&statep);
176 
177 	return (e);
178 }
179 
180 int
181 _fini(void)
182 {
183 	int e;
184 
185 	if ((e = mod_remove(&modlinkage)) != 0)
186 		return (e);
187 
188 	ddi_soft_state_fini(&statep);
189 
190 	return (DDI_SUCCESS);
191 }
192 
193 int
194 _info(struct modinfo *modinfop)
195 {
196 	return (mod_info(&modlinkage, modinfop));
197 }
198 
199 /*ARGSUSED*/
200 static int
201 mem_cache_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
202 {
203 	int	inst;
204 	int	retval = DDI_SUCCESS;
205 	struct mem_cache_softc *softc;
206 
207 	inst = getminor((dev_t)arg);
208 
209 	switch (cmd) {
210 	case DDI_INFO_DEVT2DEVINFO:
211 		if ((softc = getsoftc(inst)) == NULL) {
212 			*result = (void *)NULL;
213 			retval = DDI_FAILURE;
214 		} else
215 			*result = (void *)softc->dip;
216 		break;
217 
218 	case DDI_INFO_DEVT2INSTANCE:
219 		*result = (void *)((uintptr_t)inst);
220 		break;
221 
222 	default:
223 		retval = DDI_FAILURE;
224 	}
225 
226 	return (retval);
227 }
228 
229 static int
230 mem_cache_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
231 {
232 	int inst;
233 	struct mem_cache_softc *softc = NULL;
234 	char name[80];
235 
236 	switch (cmd) {
237 	case DDI_ATTACH:
238 		inst = ddi_get_instance(dip);
239 		if (inst >= MAX_MEM_CACHE_INSTANCES) {
240 			cmn_err(CE_WARN, "attach failed, too many instances\n");
241 			return (DDI_FAILURE);
242 		}
243 		(void) sprintf(name, MEM_CACHE_DRIVER_NAME"%d", inst);
244 		if (ddi_create_priv_minor_node(dip, name,
245 		    S_IFCHR,
246 		    inst,
247 		    DDI_PSEUDO,
248 		    0, NULL, "all", 0640) ==
249 		    DDI_FAILURE) {
250 			ddi_remove_minor_node(dip, NULL);
251 			return (DDI_FAILURE);
252 		}
253 
254 		/* Allocate a soft state structure for this instance */
255 		if (ddi_soft_state_zalloc(statep, inst) != DDI_SUCCESS) {
256 			cmn_err(CE_WARN, " ddi_soft_state_zalloc() failed "
257 			    "for inst %d\n", inst);
258 			goto attach_failed;
259 		}
260 
261 		/* Setup soft state */
262 		softc = getsoftc(inst);
263 		softc->dip = dip;
264 		mutex_init(&softc->mutex, NULL, MUTEX_DRIVER, NULL);
265 
266 		/* Create main environmental node */
267 		ddi_report_dev(dip);
268 
269 		return (DDI_SUCCESS);
270 
271 	case DDI_RESUME:
272 		return (DDI_SUCCESS);
273 
274 	default:
275 		return (DDI_FAILURE);
276 	}
277 
278 attach_failed:
279 
280 	/* Free soft state, if allocated. remove minor node if added earlier */
281 	if (softc)
282 		ddi_soft_state_free(statep, inst);
283 
284 	ddi_remove_minor_node(dip, NULL);
285 
286 	return (DDI_FAILURE);
287 }
288 
289 static int
290 mem_cache_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
291 {
292 	int inst;
293 	struct mem_cache_softc *softc;
294 
295 	switch (cmd) {
296 	case DDI_DETACH:
297 		inst = ddi_get_instance(dip);
298 		if ((softc = getsoftc(inst)) == NULL)
299 			return (ENXIO);
300 
301 		/* Free the soft state and remove minor node added earlier */
302 		mutex_destroy(&softc->mutex);
303 		ddi_soft_state_free(statep, inst);
304 		ddi_remove_minor_node(dip, NULL);
305 		return (DDI_SUCCESS);
306 
307 	case DDI_SUSPEND:
308 		return (DDI_SUCCESS);
309 
310 	default:
311 		return (DDI_FAILURE);
312 	}
313 }
314 
315 /*ARGSUSED*/
316 static int
317 mem_cache_open(dev_t *devp, int flag, int otyp, cred_t *credp)
318 {
319 	int	inst = getminor(*devp);
320 
321 	return (getsoftc(inst) == NULL ? ENXIO : 0);
322 }
323 
324 /*ARGSUSED*/
325 static int
326 mem_cache_close(dev_t dev, int flag, int otyp, cred_t *credp)
327 {
328 	int	inst = getminor(dev);
329 
330 	return (getsoftc(inst) == NULL ? ENXIO : 0);
331 }
332 
333 static char *tstate_to_desc[] = {
334 	"Invalid",			/* 0 */
335 	"Shared",			/* 1 */
336 	"Exclusive",			/* 2 */
337 	"Owner",			/* 3 */
338 	"Modified",			/* 4 */
339 	"NA",				/* 5 */
340 	"Owner/Shared",			/* 6 */
341 	"Reserved(7)",			/* 7 */
342 };
343 
344 static char *
345 tag_state_to_desc(uint8_t tagstate)
346 {
347 	return (tstate_to_desc[tagstate & CH_ECSTATE_MASK]);
348 }
349 
350 void
351 print_l2_tag(uint64_t tag_addr, uint64_t l2_tag)
352 {
353 	uint64_t l2_subaddr;
354 	uint8_t	l2_state;
355 
356 	l2_subaddr = PN_L2TAG_TO_PA(l2_tag);
357 	l2_subaddr |= (tag_addr & PN_L2_INDEX_MASK);
358 
359 	l2_state = (l2_tag & CH_ECSTATE_MASK);
360 	cmn_err(CE_CONT,
361 	    "PA=0x%08x.%08x E$tag 0x%08x.%08x E$state %s\n",
362 	    PRTF_64_TO_32(l2_subaddr),
363 	    PRTF_64_TO_32(l2_tag),
364 	    tag_state_to_desc(l2_state));
365 }
366 
367 void
368 print_l2cache_line(ch_cpu_logout_t *clop)
369 {
370 	uint64_t l2_subaddr;
371 	int i, offset;
372 	uint8_t	way, l2_state;
373 	ch_ec_data_t *ecp;
374 
375 
376 	for (way = 0; way < PN_CACHE_NWAYS; way++) {
377 		ecp = &clop->clo_data.chd_l2_data[way];
378 		l2_subaddr = PN_L2TAG_TO_PA(ecp->ec_tag);
379 		l2_subaddr |= (ecp->ec_idx & PN_L2_INDEX_MASK);
380 
381 		l2_state = (ecp->ec_tag & CH_ECSTATE_MASK);
382 		cmn_err(CE_CONT,
383 		    "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
384 		    "E$tag 0x%08x.%08x E$state %s",
385 		    way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(l2_subaddr),
386 		    PRTF_64_TO_32(ecp->ec_tag),
387 		    tag_state_to_desc(l2_state));
388 		/*
389 		 * Dump out Ecache subblock data captured.
390 		 * For Cheetah, we need to compute the ECC for each 16-byte
391 		 * chunk and compare it with the captured chunk ECC to figure
392 		 * out which chunk is bad.
393 		 */
394 		for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
395 			ec_data_elm_t *ecdptr;
396 			uint64_t d_low, d_high;
397 			uint32_t ecc;
398 			int l2_data_idx = (i/2);
399 
400 			offset = i * 16;
401 			ecdptr = &clop->clo_data.chd_l2_data[way].ec_data
402 			    [l2_data_idx];
403 			if ((i & 1) == 0) {
404 				ecc = (ecdptr->ec_eccd >> 9) & 0x1ff;
405 				d_high = ecdptr->ec_d8[0];
406 				d_low  = ecdptr->ec_d8[1];
407 			} else {
408 				ecc = ecdptr->ec_eccd & 0x1ff;
409 				d_high = ecdptr->ec_d8[2];
410 				d_low  = ecdptr->ec_d8[3];
411 			}
412 
413 			cmn_err(CE_CONT,
414 			    "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
415 			    " ECC 0x%03x",
416 			    offset, PRTF_64_TO_32(d_high),
417 			    PRTF_64_TO_32(d_low), ecc);
418 		}
419 	}	/* end of for way loop */
420 }
421 
422 void
423 print_ecache_line(ch_cpu_logout_t *clop)
424 {
425 	uint64_t ec_subaddr;
426 	int i, offset;
427 	uint8_t	way, ec_state;
428 	ch_ec_data_t *ecp;
429 
430 
431 	for (way = 0; way < PN_CACHE_NWAYS; way++) {
432 		ecp = &clop->clo_data.chd_ec_data[way];
433 		ec_subaddr = PN_L3TAG_TO_PA(ecp->ec_tag);
434 		ec_subaddr |= (ecp->ec_idx & PN_L3_TAG_RD_MASK);
435 
436 		ec_state = (ecp->ec_tag & CH_ECSTATE_MASK);
437 		cmn_err(CE_CONT,
438 		    "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
439 		    "E$tag 0x%08x.%08x E$state %s",
440 		    way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(ec_subaddr),
441 		    PRTF_64_TO_32(ecp->ec_tag),
442 		    tag_state_to_desc(ec_state));
443 		/*
444 		 * Dump out Ecache subblock data captured.
445 		 * For Cheetah, we need to compute the ECC for each 16-byte
446 		 * chunk and compare it with the captured chunk ECC to figure
447 		 * out which chunk is bad.
448 		 */
449 		for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
450 			ec_data_elm_t *ecdptr;
451 			uint64_t d_low, d_high;
452 			uint32_t ecc;
453 			int ec_data_idx = (i/2);
454 
455 			offset = i * 16;
456 			ecdptr =
457 			    &clop->clo_data.chd_ec_data[way].ec_data
458 			    [ec_data_idx];
459 			if ((i & 1) == 0) {
460 				ecc = (ecdptr->ec_eccd >> 9) & 0x1ff;
461 				d_high = ecdptr->ec_d8[0];
462 				d_low  = ecdptr->ec_d8[1];
463 			} else {
464 				ecc = ecdptr->ec_eccd & 0x1ff;
465 				d_high = ecdptr->ec_d8[2];
466 				d_low  = ecdptr->ec_d8[3];
467 			}
468 
469 			cmn_err(CE_CONT,
470 			    "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
471 			    " ECC 0x%03x",
472 			    offset, PRTF_64_TO_32(d_high),
473 			    PRTF_64_TO_32(d_low), ecc);
474 		}
475 	}
476 }
477 
478 static boolean_t
479 tag_addr_collides(uint64_t tag_addr, cache_id_t type,
480     retire_func_t start_of_func, retire_func_t end_of_func)
481 {
482 	uint64_t start_paddr, end_paddr;
483 	char *type_str;
484 
485 	start_paddr = va_to_pa((void *)start_of_func);
486 	end_paddr = va_to_pa((void *)end_of_func);
487 	switch (type) {
488 		case L2_CACHE_TAG:
489 		case L2_CACHE_DATA:
490 			tag_addr &= PN_L2_INDEX_MASK;
491 			start_paddr &= PN_L2_INDEX_MASK;
492 			end_paddr &= PN_L2_INDEX_MASK;
493 			type_str = "L2:";
494 			break;
495 		case L3_CACHE_TAG:
496 		case L3_CACHE_DATA:
497 			tag_addr &= PN_L3_TAG_RD_MASK;
498 			start_paddr &= PN_L3_TAG_RD_MASK;
499 			end_paddr &= PN_L3_TAG_RD_MASK;
500 			type_str = "L3:";
501 			break;
502 		default:
503 			/*
504 			 * Should never reach here.
505 			 */
506 			ASSERT(0);
507 			return (B_FALSE);
508 	}
509 	if ((tag_addr > (start_paddr - 0x100)) &&
510 	    (tag_addr < (end_paddr + 0x100))) {
511 		if (mem_cache_debug & 0x1)
512 			cmn_err(CE_CONT,
513 			    "%s collision detected tag_addr = 0x%08x"
514 			    " start_paddr = 0x%08x end_paddr = 0x%08x\n",
515 			    type_str, (uint32_t)tag_addr, (uint32_t)start_paddr,
516 			    (uint32_t)end_paddr);
517 		return (B_TRUE);
518 	}
519 	else
520 		return (B_FALSE);
521 }
522 
523 static uint64_t
524 get_tag_addr(cache_info_t *cache_info)
525 {
526 	uint64_t tag_addr, scratch;
527 
528 	switch (cache_info->cache) {
529 		case L2_CACHE_TAG:
530 		case L2_CACHE_DATA:
531 			tag_addr = (uint64_t)(cache_info->index <<
532 			    PN_CACHE_LINE_SHIFT);
533 			scratch = (uint64_t)(cache_info->way <<
534 			    PN_L2_WAY_SHIFT);
535 			tag_addr |= scratch;
536 			tag_addr |= PN_L2_IDX_HW_ECC_EN;
537 			break;
538 		case L3_CACHE_TAG:
539 		case L3_CACHE_DATA:
540 			tag_addr = (uint64_t)(cache_info->index <<
541 			    PN_CACHE_LINE_SHIFT);
542 			scratch = (uint64_t)(cache_info->way <<
543 			    PN_L3_WAY_SHIFT);
544 			tag_addr |= scratch;
545 			tag_addr |= PN_L3_IDX_HW_ECC_EN;
546 			break;
547 		default:
548 			/*
549 			 * Should never reach here.
550 			 */
551 			ASSERT(0);
552 			return (uint64_t)(0);
553 	}
554 	return (tag_addr);
555 }
556 
557 static int
558 mem_cache_ioctl_ops(int cmd, int mode, cache_info_t *cache_info)
559 {
560 	int	ret_val = 0;
561 	uint64_t afar, tag_addr;
562 	ch_cpu_logout_t clop;
563 	uint64_t Lxcache_tag_data[PN_CACHE_NWAYS];
564 	int	i, retire_retry_count;
565 	cpu_t	*cpu;
566 	uint64_t tag_data;
567 	uint8_t state;
568 	uint64_t start_paddr;
569 	uint64_t cache_set_size;
570 	uint_t	iteration_count = 0x100000;
571 
572 	switch (cache_info->cache) {
573 		case L2_CACHE_TAG:
574 		case L2_CACHE_DATA:
575 			if (cache_info->way >= PN_CACHE_NWAYS)
576 				return (EINVAL);
577 			if (cache_info->index >=
578 			    (PN_L2_SET_SIZE/PN_L2_LINESIZE))
579 				return (EINVAL);
580 			cache_set_size = PN_L2_SET_SIZE;
581 			break;
582 		case L3_CACHE_TAG:
583 		case L3_CACHE_DATA:
584 			if (cache_info->way >= PN_CACHE_NWAYS)
585 				return (EINVAL);
586 			if (cache_info->index >=
587 			    (PN_L3_SET_SIZE/PN_L3_LINESIZE))
588 				return (EINVAL);
589 			cache_set_size = PN_L3_SET_SIZE;
590 			break;
591 		default:
592 			return (ENOTSUP);
593 	}
594 	/*
595 	 * Check if we have a valid cpu ID and that
596 	 * CPU is ONLINE.
597 	 */
598 	mutex_enter(&cpu_lock);
599 	cpu = cpu_get(cache_info->cpu_id);
600 	if ((cpu == NULL) || (!cpu_is_online(cpu))) {
601 		mutex_exit(&cpu_lock);
602 		return (EINVAL);
603 	}
604 	mutex_exit(&cpu_lock);
605 	switch (cmd) {
606 		case MEM_CACHE_RETIRE:
607 			if ((cache_info->bit & MSB_BIT_MASK) ==
608 			    MSB_BIT_MASK) {
609 				pattern = ((uint64_t)1 <<
610 				    (cache_info->bit & TAG_BIT_MASK));
611 			} else {
612 				pattern = 0;
613 			}
614 			tag_addr = get_tag_addr(cache_info);
615 			pattern |= PN_ECSTATE_NA;
616 			retire_retry_count = 0;
617 			affinity_set(cache_info->cpu_id);
618 			switch (cache_info->cache) {
619 				case L2_CACHE_DATA:
620 				case L2_CACHE_TAG:
621 retry_l2_retire:
622 					if (tag_addr_collides(tag_addr,
623 					    cache_info->cache,
624 					    retire_l2_start, retire_l2_end))
625 						ret_val =
626 						    retire_l2_alternate(
627 						    tag_addr, pattern);
628 					else
629 						ret_val = retire_l2(tag_addr,
630 						    pattern);
631 					if (ret_val == 1) {
632 						/*
633 						 * cacheline was in retired
634 						 * STATE already.
635 						 * so return success.
636 						 */
637 						ret_val = 0;
638 					}
639 					if (ret_val < 0) {
640 						cmn_err(CE_WARN,
641 		"retire_l2() failed. index = 0x%x way %d. Retrying...\n",
642 						    cache_info->index,
643 						    cache_info->way);
644 						if (retire_retry_count >= 2) {
645 							retire_failures++;
646 							affinity_clear();
647 							return (EIO);
648 						}
649 						retire_retry_count++;
650 						goto retry_l2_retire;
651 					}
652 					if (ret_val == 2)
653 						l2_flush_retries_done++;
654 					xt_one(cache_info->cpu_id,
655 					    (xcfunc_t *)(get_l2_tag_tl1),
656 					    tag_addr, (uint64_t)(&tag_data));
657 					state = tag_data & CH_ECSTATE_MASK;
658 					if (state != PN_ECSTATE_NA) {
659 						retire_failures++;
660 						print_l2_tag(tag_addr,
661 						    tag_data);
662 						cmn_err(CE_WARN,
663 		"L2 RETIRE:failed for index 0x%x way %d. Retrying...\n",
664 						    cache_info->index,
665 						    cache_info->way);
666 						if (retire_retry_count >= 2) {
667 							retire_failures++;
668 							affinity_clear();
669 							return (EIO);
670 						}
671 						retire_retry_count++;
672 						goto retry_l2_retire;
673 					}
674 					break;
675 				case L3_CACHE_TAG:
676 				case L3_CACHE_DATA:
677 					if (tag_addr_collides(tag_addr,
678 					    cache_info->cache,
679 					    retire_l3_start, retire_l3_end))
680 						ret_val =
681 						    retire_l3_alternate(
682 						    tag_addr, pattern);
683 					else
684 						ret_val = retire_l3(tag_addr,
685 						    pattern);
686 					if (ret_val == 1) {
687 						/*
688 						 * cacheline was in retired
689 						 * STATE already.
690 						 * so return success.
691 						 */
692 						ret_val = 0;
693 					}
694 					if (ret_val < 0) {
695 						cmn_err(CE_WARN,
696 			"retire_l3() failed. ret_val = %d index = 0x%x\n",
697 						    ret_val,
698 						    cache_info->index);
699 						retire_failures++;
700 						affinity_clear();
701 						return (EIO);
702 					}
703 					xt_one(cache_info->cpu_id,
704 					    (xcfunc_t *)(get_l3_tag_tl1),
705 					    tag_addr, (uint64_t)(&tag_data));
706 					state = tag_data & CH_ECSTATE_MASK;
707 					if (state != PN_ECSTATE_NA) {
708 						cmn_err(CE_WARN,
709 					"L3 RETIRE failed for index 0x%x\n",
710 						    cache_info->index);
711 						retire_failures++;
712 						affinity_clear();
713 						return (EIO);
714 					}
715 
716 					break;
717 			}
718 			affinity_clear();
719 			break;
720 		case MEM_CACHE_UNRETIRE:
721 			tag_addr = get_tag_addr(cache_info);
722 			pattern = PN_ECSTATE_INV;
723 			affinity_set(cache_info->cpu_id);
724 			switch (cache_info->cache) {
725 				case L2_CACHE_DATA:
726 				case L2_CACHE_TAG:
727 					/*
728 					 * Check if the index/way is in NA state
729 					 */
730 					xt_one(cache_info->cpu_id,
731 					    (xcfunc_t *)(get_l2_tag_tl1),
732 					    tag_addr, (uint64_t)(&tag_data));
733 					state = tag_data & CH_ECSTATE_MASK;
734 					if (state != PN_ECSTATE_NA) {
735 						affinity_clear();
736 						return (EINVAL);
737 					}
738 					if (tag_addr_collides(tag_addr,
739 					    cache_info->cache,
740 					    unretire_l2_start, unretire_l2_end))
741 						ret_val =
742 						    unretire_l2_alternate(
743 						    tag_addr, pattern);
744 					else
745 						ret_val =
746 						    unretire_l2(tag_addr,
747 						    pattern);
748 					if (ret_val != 0) {
749 						cmn_err(CE_WARN,
750 			"unretire_l2() failed. ret_val = %d index = 0x%x\n",
751 						    ret_val,
752 						    cache_info->index);
753 						retire_failures++;
754 						affinity_clear();
755 						return (EIO);
756 					}
757 					break;
758 				case L3_CACHE_TAG:
759 				case L3_CACHE_DATA:
760 					/*
761 					 * Check if the index/way is in NA state
762 					 */
763 					xt_one(cache_info->cpu_id,
764 					    (xcfunc_t *)(get_l3_tag_tl1),
765 					    tag_addr, (uint64_t)(&tag_data));
766 					state = tag_data & CH_ECSTATE_MASK;
767 					if (state != PN_ECSTATE_NA) {
768 						affinity_clear();
769 						return (EINVAL);
770 					}
771 					if (tag_addr_collides(tag_addr,
772 					    cache_info->cache,
773 					    unretire_l3_start, unretire_l3_end))
774 						ret_val =
775 						    unretire_l3_alternate(
776 						    tag_addr, pattern);
777 					else
778 						ret_val =
779 						    unretire_l3(tag_addr,
780 						    pattern);
781 					if (ret_val != 0) {
782 						cmn_err(CE_WARN,
783 			"unretire_l3() failed. ret_val = %d index = 0x%x\n",
784 						    ret_val,
785 						    cache_info->index);
786 						affinity_clear();
787 						return (EIO);
788 					}
789 					break;
790 			}
791 			affinity_clear();
792 			break;
793 		case MEM_CACHE_ISRETIRED:
794 		case MEM_CACHE_STATE:
795 			return (ENOTSUP);
796 		case MEM_CACHE_READ_TAGS:
797 		case MEM_CACHE_READ_ERROR_INJECTED_TAGS:
798 			/*
799 			 * Read tag and data for all the ways at a given afar
800 			 */
801 			afar = (uint64_t)(cache_info->index
802 			    << PN_CACHE_LINE_SHIFT);
803 			affinity_set(cache_info->cpu_id);
804 			xt_one(cache_info->cpu_id,
805 			    (xcfunc_t *)(get_ecache_dtags_tl1),
806 			    afar, (uint64_t)(&clop));
807 			switch (cache_info->cache) {
808 				case L2_CACHE_TAG:
809 					for (i = 0; i < PN_CACHE_NWAYS; i++) {
810 						Lxcache_tag_data[i] =
811 						    clop.clo_data.chd_l2_data
812 						    [i].ec_tag;
813 					}
814 					last_error_injected_bit =
815 					    last_l2tag_error_injected_bit;
816 					last_error_injected_way =
817 					    last_l2tag_error_injected_way;
818 					break;
819 				case L3_CACHE_TAG:
820 					for (i = 0; i < PN_CACHE_NWAYS; i++) {
821 						Lxcache_tag_data[i] =
822 						    clop.clo_data.chd_ec_data
823 						    [i].ec_tag;
824 					}
825 					last_error_injected_bit =
826 					    last_l3tag_error_injected_bit;
827 					last_error_injected_way =
828 					    last_l3tag_error_injected_way;
829 					break;
830 				default:
831 					affinity_clear();
832 					return (ENOTSUP);
833 			}	/* end if switch(cache) */
834 			if (cmd == MEM_CACHE_READ_ERROR_INJECTED_TAGS) {
835 				pattern = ((uint64_t)1 <<
836 				    last_error_injected_bit);
837 				/*
838 				 * If error bit is ECC we need to make sure
839 				 * ECC on all all WAYS are corrupted.
840 				 */
841 				if ((last_error_injected_bit >= 6) &&
842 				    (last_error_injected_bit <= 14)) {
843 					for (i = 0; i < PN_CACHE_NWAYS; i++)
844 						Lxcache_tag_data[i] ^=
845 						    pattern;
846 				} else
847 					Lxcache_tag_data
848 					    [last_error_injected_way] ^=
849 					    pattern;
850 			}
851 			if (ddi_copyout((caddr_t)Lxcache_tag_data,
852 			    (caddr_t)cache_info->datap,
853 			    sizeof (Lxcache_tag_data), mode)
854 			    != DDI_SUCCESS) {
855 				affinity_clear();
856 				return (EFAULT);
857 			}
858 			affinity_clear();
859 			break;	/* end of READ_TAGS */
860 		case MEM_CACHE_RETIRE_AND_UNRETIRE_RW:
861 			affinity_set(cache_info->cpu_id);
862 			tag_addr = get_tag_addr(cache_info);
863 			do {
864 				pattern = 0;
865 				pattern |= PN_ECSTATE_NA;
866 				switch (cache_info->cache) {
867 					case L2_CACHE_DATA:
868 					case L2_CACHE_TAG:
869 					if (tag_addr_collides(tag_addr,
870 					    cache_info->cache,
871 					    retire_l2_start, retire_l2_end))
872 						ret_val =
873 						    retire_l2_alternate(
874 						    tag_addr, pattern);
875 					else
876 						ret_val = retire_l2(tag_addr,
877 						    pattern);
878 					if (ret_val == 2)
879 						l2_flush_retries_done++;
880 					if (ret_val < 0) {
881 						cmn_err(CE_WARN,
882 		"retire_l2() failed. ret_val = %d index = 0x%x way %d\n",
883 						    ret_val,
884 						    cache_info->index,
885 						    cache_info->way);
886 						affinity_clear();
887 						return (EIO);
888 					}
889 					xt_one(cache_info->cpu_id,
890 					    (xcfunc_t *)(get_l2_tag_tl1),
891 					    tag_addr, (uint64_t)(&tag_data));
892 					state = tag_data & CH_ECSTATE_MASK;
893 					if (state != PN_ECSTATE_NA) {
894 						cmn_err(CE_WARN,
895 				"L2 RETIRE:failed for index 0x%x way %d\n",
896 						    cache_info->index,
897 						    cache_info->way);
898 						affinity_clear();
899 						return (EIO);
900 					}
901 					break;
902 					case L3_CACHE_TAG:
903 					case L3_CACHE_DATA:
904 					if (tag_addr_collides(tag_addr,
905 					    cache_info->cache,
906 					    retire_l3_start, retire_l3_end))
907 						ret_val =
908 						    retire_l3_alternate(
909 						    tag_addr, pattern);
910 					else
911 						ret_val = retire_l3(tag_addr,
912 						    pattern);
913 					if (ret_val != 0) {
914 						cmn_err(CE_WARN,
915 		"retire_l3() failed. ret_val = %d index = 0x%x way %d\n",
916 						    ret_val,
917 						    cache_info->index,
918 						    cache_info->way);
919 						affinity_clear();
920 						return (EIO);
921 					}
922 					xt_one(cache_info->cpu_id,
923 					    (xcfunc_t *)(get_l3_tag_tl1),
924 					    tag_addr, (uint64_t)(&tag_data));
925 					state = tag_data & CH_ECSTATE_MASK;
926 					if (state != PN_ECSTATE_NA) {
927 						cmn_err(CE_WARN,
928 				"L3 RETIRE failed for index 0x%x way %d\n",
929 						    cache_info->index,
930 						    cache_info->way);
931 						affinity_clear();
932 						return (EIO);
933 					}
934 					break;
935 				}	/* end of switch */
936 				/*
937 				 * Now unretire the way.
938 				 */
939 				pattern = PN_ECSTATE_INV;
940 				switch (cache_info->cache) {
941 				case L2_CACHE_DATA:
942 				case L2_CACHE_TAG:
943 					/*
944 					 * Check if the way is in NA state
945 					 */
946 					xt_one(cache_info->cpu_id,
947 					    (xcfunc_t *)(get_l2_tag_tl1),
948 					    tag_addr, (uint64_t)(&tag_data));
949 					state = tag_data & CH_ECSTATE_MASK;
950 					if (state != PN_ECSTATE_NA) {
951 						affinity_clear();
952 						return (EINVAL);
953 					}
954 					if (tag_addr_collides(tag_addr,
955 					    cache_info->cache,
956 					    unretire_l2_start, unretire_l2_end))
957 						ret_val =
958 						    unretire_l2_alternate(
959 						    tag_addr, pattern);
960 					else
961 						ret_val =
962 						    unretire_l2(tag_addr,
963 						    pattern);
964 					if (ret_val != 0) {
965 						cmn_err(CE_WARN,
966 			"unretire_l2() failed. ret_val = %d index = 0x%x\n",
967 						    ret_val,
968 						    cache_info->index);
969 						affinity_clear();
970 						return (EIO);
971 					}
972 					xt_one(cache_info->cpu_id,
973 					    (xcfunc_t *)(get_l2_tag_tl1),
974 					    tag_addr, (uint64_t)(&tag_data));
975 					state = tag_data & CH_ECSTATE_MASK;
976 					if (state == PN_ECSTATE_NA) {
977 						cmn_err(CE_WARN,
978 		"L2 UNRETIRE failed for index 0x%x way %d\n",
979 						    cache_info->index,
980 						    cache_info->way);
981 						affinity_clear();
982 						return (EIO);
983 					}
984 					break;
985 				case L3_CACHE_TAG:
986 				case L3_CACHE_DATA:
987 					/*
988 					 * Check if the way is in NA state
989 					 */
990 					xt_one(cache_info->cpu_id,
991 					    (xcfunc_t *)(get_l3_tag_tl1),
992 					    tag_addr, (uint64_t)(&tag_data));
993 					state = tag_data & CH_ECSTATE_MASK;
994 					if (state != PN_ECSTATE_NA) {
995 						affinity_clear();
996 						return (EINVAL);
997 					}
998 					if (tag_addr_collides(tag_addr,
999 					    cache_info->cache,
1000 					    unretire_l3_start, unretire_l3_end))
1001 						ret_val =
1002 						    unretire_l3_alternate(
1003 						    tag_addr, pattern);
1004 					else
1005 						ret_val =
1006 						    unretire_l3(tag_addr,
1007 						    pattern);
1008 					if (ret_val != 0) {
1009 						cmn_err(CE_WARN,
1010 			"unretire_l3() failed. ret_val = %d index = 0x%x\n",
1011 						    ret_val,
1012 						    cache_info->index);
1013 						affinity_clear();
1014 						return (EIO);
1015 					}
1016 					break;
1017 				}
1018 			} while (iteration_count--);
1019 			affinity_clear();
1020 			break;
1021 		case MEM_CACHE_RW_COLLISION_CODE:
1022 			/*
1023 			 * Find the lowest physical addr of kernel text
1024 			 * that aligns to first L2/L3 cacheline
1025 			 */
1026 			tag_addr = get_tag_addr(cache_info);
1027 			start_paddr = va_to_pa((void *)s_text);
1028 			start_paddr += (cache_set_size -1);
1029 			start_paddr &= ~(cache_set_size -1);
1030 			tag_addr &= (cache_set_size -1);
1031 			start_paddr += tag_addr;
1032 			casxa_physical_addr(start_paddr, 0x1000000);
1033 			break;
1034 		default:
1035 			return (ENOTSUP);
1036 	}	/* end if switch(cmd) */
1037 	return (ret_val);
1038 }
1039 
1040 /*ARGSUSED*/
1041 static int
1042 mem_cache_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1043 		int *rvalp)
1044 {
1045 	int	inst;
1046 	struct mem_cache_softc *softc;
1047 	cache_info_t	cache_info;
1048 	cache_info32_t	cache_info32;
1049 	int	ret_val;
1050 
1051 	inst = getminor(dev);
1052 	if ((softc = getsoftc(inst)) == NULL)
1053 		return (ENXIO);
1054 
1055 	mutex_enter(&softc->mutex);
1056 
1057 #ifdef _MULTI_DATAMODEL
1058 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
1059 		if (ddi_copyin((cache_info32_t *)arg, &cache_info32,
1060 		    sizeof (cache_info32), mode) != DDI_SUCCESS) {
1061 			mutex_exit(&softc->mutex);
1062 			return (EFAULT);
1063 		}
1064 		cache_info.cache = cache_info32.cache;
1065 		cache_info.index = cache_info32.index;
1066 		cache_info.way = cache_info32.way;
1067 		cache_info.cpu_id = cache_info32.cpu_id;
1068 		cache_info.bit = cache_info32.bit;
1069 		cache_info.datap = (void *)((uint64_t)cache_info32.datap);
1070 	} else
1071 #endif
1072 	if (ddi_copyin((cache_info_t *)arg, &cache_info,
1073 	    sizeof (cache_info), mode) != DDI_SUCCESS) {
1074 		mutex_exit(&softc->mutex);
1075 		return (EFAULT);
1076 	}
1077 	switch (cmd) {
1078 		case MEM_CACHE_RETIRE:
1079 		case MEM_CACHE_UNRETIRE:
1080 		case MEM_CACHE_RETIRE_AND_UNRETIRE_RW:
1081 			if ((mode & FWRITE) == 0) {
1082 				ret_val = EBADF;
1083 				break;
1084 			}
1085 		/*FALLTHROUGH*/
1086 		case MEM_CACHE_ISRETIRED:
1087 		case MEM_CACHE_STATE:
1088 		case MEM_CACHE_READ_TAGS:
1089 		case MEM_CACHE_READ_ERROR_INJECTED_TAGS:
1090 		case MEM_CACHE_RW_COLLISION_CODE:
1091 			ret_val =  mem_cache_ioctl_ops(cmd, mode, &cache_info);
1092 			break;
1093 		default:
1094 			ret_val = ENOTSUP;
1095 			break;
1096 	}
1097 	mutex_exit(&softc->mutex);
1098 	return (ret_val);
1099 }
1100