xref: /titanic_50/usr/src/uts/i86pc/io/immu_qinv.c (revision 1e49577a7fcde812700ded04431b49d67cc57d6d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Portions Copyright (c) 2010, Oracle and/or its affiliates.
23  * All rights reserved.
24  */
25 
26 /*
27  * Copyright (c) 2009, Intel Corporation.
28  * All rights reserved.
29  */
30 
31 #include <sys/ddi.h>
32 #include <sys/archsystm.h>
33 #include <vm/hat_i86.h>
34 #include <sys/types.h>
35 #include <sys/sysmacros.h>
36 #include <sys/immu.h>
37 
38 /* invalidation queue table entry size */
39 #define	QINV_ENTRY_SIZE		0x10
40 
41 /* max value of Queue Size field of Invalidation Queue Address Register */
42 #define	QINV_MAX_QUEUE_SIZE	0x7
43 
44 /* status data size of invalidation wait descriptor */
45 #define	QINV_SYNC_DATA_SIZE	0x4
46 
47 /* status data value of invalidation wait descriptor */
48 #define	QINV_SYNC_DATA_FENCE	1
49 #define	QINV_SYNC_DATA_UNFENCE	2
50 
51 /* invalidation queue head and tail */
52 #define	QINV_IQA_HEAD(QH)	BITX((QH), 18, 4)
53 #define	QINV_IQA_TAIL_SHIFT	4
54 
55 /* invalidation queue entry structure */
56 typedef struct qinv_inv_dsc {
57 	uint64_t	lo;
58 	uint64_t	hi;
59 } qinv_dsc_t;
60 
61 /*
62  * struct iotlb_cache_node
63  *   the pending data for iotlb flush
64  */
65 typedef struct iotlb_pend_node {
66 	dvcookie_t	*icn_dvcookies;  /* ptr to dvma cookie array */
67 	uint_t		icn_count;  /* valid cookie count */
68 	uint_t		icn_array_size;  /* array size */
69 	list_node_t	node;
70 } qinv_iotlb_pend_node_t;
71 
72 /*
73  * struct iotlb_cache_head
74  *   the pending head for the iotlb flush
75  */
76 typedef struct iotlb_pend_head {
77 	/* the pending node cache list */
78 	kmutex_t	ich_mem_lock;
79 	list_t		ich_mem_list;
80 } qinv_iotlb_pend_head_t;
81 
82 /*
83  * qinv_iotlb_t
84  *   pending data for qiueued invalidation iotlb flush
85  */
86 typedef struct qinv_iotlb {
87 	dvcookie_t	*qinv_iotlb_dvcookies;
88 	uint_t		qinv_iotlb_count;
89 	uint_t		qinv_iotlb_size;
90 	list_node_t	qinv_iotlb_node;
91 } qinv_iotlb_t;
92 
93 /* physical contigous pages for invalidation queue */
94 typedef struct qinv_mem {
95 	kmutex_t	   qinv_mem_lock;
96 	ddi_dma_handle_t   qinv_mem_dma_hdl;
97 	ddi_acc_handle_t   qinv_mem_acc_hdl;
98 	caddr_t		   qinv_mem_vaddr;
99 	paddr_t		   qinv_mem_paddr;
100 	uint_t		   qinv_mem_size;
101 	uint16_t	   qinv_mem_head;
102 	uint16_t	   qinv_mem_tail;
103 } qinv_mem_t;
104 
105 
106 /*
107  * invalidation queue state
108  *   This structure describes the state information of the
109  *   invalidation queue table and related status memeory for
110  *   invalidation wait descriptor
111  *
112  * qinv_table		- invalidation queue table
113  * qinv_sync		- sync status memory for invalidation wait descriptor
114  * qinv_iotlb_pend_node	- pending iotlb node
115  */
116 typedef struct qinv {
117 	qinv_mem_t		qinv_table;
118 	qinv_mem_t		qinv_sync;
119 	qinv_iotlb_pend_head_t qinv_pend_head;
120 	qinv_iotlb_pend_node_t  **qinv_iotlb_pend_node;
121 } qinv_t;
122 
123 static struct immu_flushops immu_qinv_flushops = {
124 	immu_qinv_context_fsi,
125 	immu_qinv_context_dsi,
126 	immu_qinv_context_gbl,
127 	immu_qinv_iotlb_psi,
128 	immu_qinv_iotlb_dsi,
129 	immu_qinv_iotlb_gbl
130 };
131 
132 /* helper macro for making queue invalidation descriptor */
133 #define	INV_DSC_TYPE(dsc)	((dsc)->lo & 0xF)
134 #define	CC_INV_DSC_HIGH		(0)
135 #define	CC_INV_DSC_LOW(fm, sid, did, g)	(((uint64_t)(fm) << 48) | \
136 	((uint64_t)(sid) << 32) | \
137 	((uint64_t)(did) << 16) | \
138 	((uint64_t)(g) << 4) | \
139 	1)
140 
141 #define	IOTLB_INV_DSC_HIGH(addr, ih, am) (((uint64_t)(addr)) | \
142 	((uint64_t)(ih) << 6) |	\
143 	((uint64_t)(am)))
144 
145 #define	IOTLB_INV_DSC_LOW(did, dr, dw, g) (((uint64_t)(did) << 16) | \
146 	((uint64_t)(dr) << 7) | \
147 	((uint64_t)(dw) << 6) | \
148 	((uint64_t)(g) << 4) | \
149 	2)
150 
151 #define	DEV_IOTLB_INV_DSC_HIGH(addr, s) (((uint64_t)(addr)) | (s))
152 
153 #define	DEV_IOTLB_INV_DSC_LOW(sid, max_invs_pd) ( \
154 	((uint64_t)(sid) << 32) | \
155 	((uint64_t)(max_invs_pd) << 16) | \
156 	3)
157 
158 #define	IEC_INV_DSC_HIGH (0)
159 #define	IEC_INV_DSC_LOW(idx, im, g) (((uint64_t)(idx) << 32) | \
160 	((uint64_t)(im) << 27) | \
161 	((uint64_t)(g) << 4) | \
162 	4)
163 
164 #define	INV_WAIT_DSC_HIGH(saddr) ((uint64_t)(saddr))
165 
166 #define	INV_WAIT_DSC_LOW(sdata, fn, sw, iflag) (((uint64_t)(sdata) << 32) | \
167 	((uint64_t)(fn) << 6) | \
168 	((uint64_t)(sw) << 5) | \
169 	((uint64_t)(iflag) << 4) | \
170 	5)
171 
172 /*
173  * QS field of Invalidation Queue Address Register
174  * the size of invalidation queue is 1 << (qinv_iqa_qs + 8)
175  */
176 static uint_t qinv_iqa_qs = 6;
177 
178 /*
179  * the invalidate desctiptor type of queued invalidation interface
180  */
181 static char *qinv_dsc_type[] = {
182 	"Reserved",
183 	"Context Cache Invalidate Descriptor",
184 	"IOTLB Invalidate Descriptor",
185 	"Device-IOTLB Invalidate Descriptor",
186 	"Interrupt Entry Cache Invalidate Descriptor",
187 	"Invalidation Wait Descriptor",
188 	"Incorrect queue invalidation type"
189 };
190 
191 #define	QINV_MAX_DSC_TYPE	(sizeof (qinv_dsc_type) / sizeof (char *))
192 
193 /*
194  * the queued invalidation interface functions
195  */
196 static void qinv_submit_inv_dsc(immu_t *immu, qinv_dsc_t *dsc);
197 static void qinv_context_common(immu_t *immu, uint8_t function_mask,
198     uint16_t source_id, uint_t domain_id, ctt_inv_g_t type);
199 static void qinv_iotlb_common(immu_t *immu, uint_t domain_id,
200     uint64_t addr, uint_t am, uint_t hint, tlb_inv_g_t type);
201 static void qinv_iec_common(immu_t *immu, uint_t iidx,
202     uint_t im, uint_t g);
203 static uint_t qinv_alloc_sync_mem_entry(immu_t *immu);
204 static void qinv_wait_async_unfence(immu_t *immu,
205     qinv_iotlb_pend_node_t *node);
206 static void qinv_wait_sync(immu_t *immu);
207 static int qinv_wait_async_finish(immu_t *immu, int *count);
208 /*LINTED*/
209 static void qinv_wait_async_fence(immu_t *immu);
210 /*LINTED*/
211 static void qinv_dev_iotlb_common(immu_t *immu, uint16_t sid,
212     uint64_t addr, uint_t size, uint_t max_invs_pd);
213 
214 
215 /* submit invalidation request descriptor to invalidation queue */
216 static void
217 qinv_submit_inv_dsc(immu_t *immu, qinv_dsc_t *dsc)
218 {
219 	qinv_t *qinv;
220 	qinv_mem_t *qinv_table;
221 	uint_t tail;
222 
223 	qinv = (qinv_t *)immu->immu_qinv;
224 	qinv_table = &(qinv->qinv_table);
225 
226 	mutex_enter(&qinv_table->qinv_mem_lock);
227 	tail = qinv_table->qinv_mem_tail;
228 	qinv_table->qinv_mem_tail++;
229 
230 	if (qinv_table->qinv_mem_tail == qinv_table->qinv_mem_size)
231 		qinv_table->qinv_mem_tail = 0;
232 
233 	while (qinv_table->qinv_mem_head == qinv_table->qinv_mem_tail) {
234 		/*
235 		 * inv queue table exhausted, wait hardware to fetch
236 		 * next descriptor
237 		 */
238 		qinv_table->qinv_mem_head = QINV_IQA_HEAD(
239 		    immu_regs_get64(immu, IMMU_REG_INVAL_QH));
240 	}
241 
242 	bcopy(dsc, qinv_table->qinv_mem_vaddr + tail * QINV_ENTRY_SIZE,
243 	    QINV_ENTRY_SIZE);
244 
245 	immu_regs_put64(immu, IMMU_REG_INVAL_QT,
246 	    qinv_table->qinv_mem_tail << QINV_IQA_TAIL_SHIFT);
247 
248 	mutex_exit(&qinv_table->qinv_mem_lock);
249 }
250 
251 /* queued invalidation interface -- invalidate context cache */
252 static void
253 qinv_context_common(immu_t *immu, uint8_t function_mask,
254     uint16_t source_id, uint_t domain_id, ctt_inv_g_t type)
255 {
256 	qinv_dsc_t dsc;
257 
258 	dsc.lo = CC_INV_DSC_LOW(function_mask, source_id, domain_id, type);
259 	dsc.hi = CC_INV_DSC_HIGH;
260 
261 	qinv_submit_inv_dsc(immu, &dsc);
262 }
263 
264 /* queued invalidation interface -- invalidate iotlb */
265 static void
266 qinv_iotlb_common(immu_t *immu, uint_t domain_id,
267     uint64_t addr, uint_t am, uint_t hint, tlb_inv_g_t type)
268 {
269 	qinv_dsc_t dsc;
270 	uint8_t dr = 0;
271 	uint8_t dw = 0;
272 
273 	if (IMMU_CAP_GET_DRD(immu->immu_regs_cap))
274 		dr = 1;
275 	if (IMMU_CAP_GET_DWD(immu->immu_regs_cap))
276 		dw = 1;
277 
278 	switch (type) {
279 	case TLB_INV_G_PAGE:
280 		if (!IMMU_CAP_GET_PSI(immu->immu_regs_cap) ||
281 		    am > IMMU_CAP_GET_MAMV(immu->immu_regs_cap) ||
282 		    addr & IMMU_PAGEOFFSET) {
283 			type = TLB_INV_G_DOMAIN;
284 			goto qinv_ignore_psi;
285 		}
286 		dsc.lo = IOTLB_INV_DSC_LOW(domain_id, dr, dw, type);
287 		dsc.hi = IOTLB_INV_DSC_HIGH(addr, hint, am);
288 		break;
289 
290 	qinv_ignore_psi:
291 	case TLB_INV_G_DOMAIN:
292 		dsc.lo = IOTLB_INV_DSC_LOW(domain_id, dr, dw, type);
293 		dsc.hi = 0;
294 		break;
295 
296 	case TLB_INV_G_GLOBAL:
297 		dsc.lo = IOTLB_INV_DSC_LOW(0, dr, dw, type);
298 		dsc.hi = 0;
299 		break;
300 	default:
301 		ddi_err(DER_WARN, NULL, "incorrect iotlb flush type");
302 		return;
303 	}
304 
305 	qinv_submit_inv_dsc(immu, &dsc);
306 }
307 
308 /* queued invalidation interface -- invalidate dev_iotlb */
309 static void
310 qinv_dev_iotlb_common(immu_t *immu, uint16_t sid,
311     uint64_t addr, uint_t size, uint_t max_invs_pd)
312 {
313 	qinv_dsc_t dsc;
314 
315 	dsc.lo = DEV_IOTLB_INV_DSC_LOW(sid, max_invs_pd);
316 	dsc.hi = DEV_IOTLB_INV_DSC_HIGH(addr, size);
317 
318 	qinv_submit_inv_dsc(immu, &dsc);
319 }
320 
321 /* queued invalidation interface -- invalidate interrupt entry cache */
322 static void
323 qinv_iec_common(immu_t *immu, uint_t iidx, uint_t im, uint_t g)
324 {
325 	qinv_dsc_t dsc;
326 
327 	dsc.lo = IEC_INV_DSC_LOW(iidx, im, g);
328 	dsc.hi = IEC_INV_DSC_HIGH;
329 
330 	qinv_submit_inv_dsc(immu, &dsc);
331 }
332 
333 /*
334  * alloc free entry from sync status table
335  */
336 static uint_t
337 qinv_alloc_sync_mem_entry(immu_t *immu)
338 {
339 	qinv_mem_t *sync_mem;
340 	uint_t tail;
341 	qinv_t *qinv;
342 
343 	qinv = (qinv_t *)immu->immu_qinv;
344 	sync_mem = &qinv->qinv_sync;
345 
346 sync_mem_exhausted:
347 	mutex_enter(&sync_mem->qinv_mem_lock);
348 	tail = sync_mem->qinv_mem_tail;
349 	sync_mem->qinv_mem_tail++;
350 	if (sync_mem->qinv_mem_tail == sync_mem->qinv_mem_size)
351 		sync_mem->qinv_mem_tail = 0;
352 
353 	if (sync_mem->qinv_mem_head == sync_mem->qinv_mem_tail) {
354 		/* should never happen */
355 		ddi_err(DER_WARN, NULL, "sync mem exhausted");
356 		sync_mem->qinv_mem_tail = tail;
357 		mutex_exit(&sync_mem->qinv_mem_lock);
358 		delay(IMMU_ALLOC_RESOURCE_DELAY);
359 		goto sync_mem_exhausted;
360 	}
361 	mutex_exit(&sync_mem->qinv_mem_lock);
362 
363 	return (tail);
364 }
365 
366 /*
367  * queued invalidation interface -- invalidation wait descriptor
368  *   fence flag not set, need status data to indicate the invalidation
369  *   wait descriptor completion
370  */
371 static void
372 qinv_wait_async_unfence(immu_t *immu, qinv_iotlb_pend_node_t *node)
373 {
374 	qinv_dsc_t dsc;
375 	qinv_mem_t *sync_mem;
376 	uint64_t saddr;
377 	uint_t tail;
378 	qinv_t *qinv;
379 
380 	qinv = (qinv_t *)immu->immu_qinv;
381 	sync_mem = &qinv->qinv_sync;
382 	tail = qinv_alloc_sync_mem_entry(immu);
383 
384 	/* plant an iotlb pending node */
385 	qinv->qinv_iotlb_pend_node[tail] = node;
386 
387 	saddr = sync_mem->qinv_mem_paddr + tail * QINV_SYNC_DATA_SIZE;
388 
389 	/*
390 	 * sdata = QINV_SYNC_DATA_UNFENCE, fence = 0, sw = 1, if = 0
391 	 * indicate the invalidation wait descriptor completion by
392 	 * performing a coherent DWORD write to the status address,
393 	 * not by generating an invalidation completion event
394 	 */
395 	dsc.lo = INV_WAIT_DSC_LOW(QINV_SYNC_DATA_UNFENCE, 0, 1, 0);
396 	dsc.hi = INV_WAIT_DSC_HIGH(saddr);
397 
398 	qinv_submit_inv_dsc(immu, &dsc);
399 }
400 
401 /*
402  * queued invalidation interface -- invalidation wait descriptor
403  *   fence flag set, indicate descriptors following the invalidation
404  *   wait descriptor must be processed by hardware only after the
405  *   invalidation wait descriptor completes.
406  */
407 static void
408 qinv_wait_async_fence(immu_t *immu)
409 {
410 	qinv_dsc_t dsc;
411 
412 	/* sw = 0, fence = 1, iflag = 0 */
413 	dsc.lo = INV_WAIT_DSC_LOW(0, 1, 0, 0);
414 	dsc.hi = 0;
415 	qinv_submit_inv_dsc(immu, &dsc);
416 }
417 
418 /*
419  * queued invalidation interface -- invalidation wait descriptor
420  *   wait until the invalidation request finished
421  */
422 static void
423 qinv_wait_sync(immu_t *immu)
424 {
425 	qinv_dsc_t dsc;
426 	qinv_mem_t *sync_mem;
427 	uint64_t saddr;
428 	uint_t tail;
429 	qinv_t *qinv;
430 	volatile uint32_t *status;
431 
432 	qinv = (qinv_t *)immu->immu_qinv;
433 	sync_mem = &qinv->qinv_sync;
434 	tail = qinv_alloc_sync_mem_entry(immu);
435 	saddr = sync_mem->qinv_mem_paddr + tail * QINV_SYNC_DATA_SIZE;
436 	status = (uint32_t *)(sync_mem->qinv_mem_vaddr + tail *
437 	    QINV_SYNC_DATA_SIZE);
438 
439 	/*
440 	 * sdata = QINV_SYNC_DATA_FENCE, fence = 1, sw = 1, if = 0
441 	 * indicate the invalidation wait descriptor completion by
442 	 * performing a coherent DWORD write to the status address,
443 	 * not by generating an invalidation completion event
444 	 */
445 	dsc.lo = INV_WAIT_DSC_LOW(QINV_SYNC_DATA_FENCE, 1, 1, 0);
446 	dsc.hi = INV_WAIT_DSC_HIGH(saddr);
447 
448 	qinv_submit_inv_dsc(immu, &dsc);
449 
450 	while ((*status) != QINV_SYNC_DATA_FENCE)
451 		iommu_cpu_nop();
452 	*status = QINV_SYNC_DATA_UNFENCE;
453 }
454 
455 /* get already completed invalidation wait requests */
456 static int
457 qinv_wait_async_finish(immu_t *immu, int *cnt)
458 {
459 	qinv_mem_t *sync_mem;
460 	int index;
461 	qinv_t *qinv;
462 	volatile uint32_t *value;
463 
464 	ASSERT((*cnt) == 0);
465 
466 	qinv = (qinv_t *)immu->immu_qinv;
467 	sync_mem = &qinv->qinv_sync;
468 
469 	mutex_enter(&sync_mem->qinv_mem_lock);
470 	index = sync_mem->qinv_mem_head;
471 	value = (uint32_t *)(sync_mem->qinv_mem_vaddr + index
472 	    * QINV_SYNC_DATA_SIZE);
473 	while (*value == QINV_SYNC_DATA_UNFENCE) {
474 		*value = 0;
475 		(*cnt)++;
476 		sync_mem->qinv_mem_head++;
477 		if (sync_mem->qinv_mem_head == sync_mem->qinv_mem_size) {
478 			sync_mem->qinv_mem_head = 0;
479 			value = (uint32_t *)(sync_mem->qinv_mem_vaddr);
480 		} else
481 			value = (uint32_t *)((char *)value +
482 			    QINV_SYNC_DATA_SIZE);
483 	}
484 
485 	mutex_exit(&sync_mem->qinv_mem_lock);
486 	if ((*cnt) > 0)
487 		return (index);
488 	else
489 		return (-1);
490 }
491 
492 /*
493  * call ddi_dma_mem_alloc to allocate physical contigous
494  * pages for invalidation queue table
495  */
496 static int
497 qinv_setup(immu_t *immu)
498 {
499 	qinv_t *qinv;
500 	size_t size;
501 
502 	ddi_dma_attr_t qinv_dma_attr = {
503 		DMA_ATTR_V0,
504 		0U,
505 		0xffffffffffffffffULL,
506 		0xffffffffU,
507 		MMU_PAGESIZE, /* page aligned */
508 		0x1,
509 		0x1,
510 		0xffffffffU,
511 		0xffffffffffffffffULL,
512 		1,
513 		4,
514 		0
515 	};
516 
517 	ddi_device_acc_attr_t qinv_acc_attr = {
518 		DDI_DEVICE_ATTR_V0,
519 		DDI_NEVERSWAP_ACC,
520 		DDI_STRICTORDER_ACC
521 	};
522 
523 	mutex_init(&(immu->immu_qinv_lock), NULL, MUTEX_DRIVER, NULL);
524 
525 
526 	mutex_enter(&(immu->immu_qinv_lock));
527 
528 	immu->immu_qinv = NULL;
529 	if (!IMMU_ECAP_GET_QI(immu->immu_regs_excap) ||
530 	    immu_qinv_enable == B_FALSE) {
531 		mutex_exit(&(immu->immu_qinv_lock));
532 		return (DDI_SUCCESS);
533 	}
534 
535 	if (qinv_iqa_qs > QINV_MAX_QUEUE_SIZE)
536 		qinv_iqa_qs = QINV_MAX_QUEUE_SIZE;
537 
538 	qinv = kmem_zalloc(sizeof (qinv_t), KM_SLEEP);
539 
540 	if (ddi_dma_alloc_handle(root_devinfo,
541 	    &qinv_dma_attr, DDI_DMA_SLEEP, NULL,
542 	    &(qinv->qinv_table.qinv_mem_dma_hdl)) != DDI_SUCCESS) {
543 		ddi_err(DER_WARN, root_devinfo,
544 		    "alloc invalidation queue table handler failed");
545 		goto queue_table_handle_failed;
546 	}
547 
548 	if (ddi_dma_alloc_handle(root_devinfo,
549 	    &qinv_dma_attr, DDI_DMA_SLEEP, NULL,
550 	    &(qinv->qinv_sync.qinv_mem_dma_hdl)) != DDI_SUCCESS) {
551 		ddi_err(DER_WARN, root_devinfo,
552 		    "alloc invalidation queue sync mem handler failed");
553 		goto sync_table_handle_failed;
554 	}
555 
556 	qinv->qinv_table.qinv_mem_size = (1 << (qinv_iqa_qs + 8));
557 	size = qinv->qinv_table.qinv_mem_size * QINV_ENTRY_SIZE;
558 
559 	/* alloc physical contiguous pages for invalidation queue */
560 	if (ddi_dma_mem_alloc(qinv->qinv_table.qinv_mem_dma_hdl,
561 	    size,
562 	    &qinv_acc_attr,
563 	    DDI_DMA_CONSISTENT | IOMEM_DATA_UNCACHED,
564 	    DDI_DMA_SLEEP,
565 	    NULL,
566 	    &(qinv->qinv_table.qinv_mem_vaddr),
567 	    &size,
568 	    &(qinv->qinv_table.qinv_mem_acc_hdl)) != DDI_SUCCESS) {
569 		ddi_err(DER_WARN, root_devinfo,
570 		    "alloc invalidation queue table failed");
571 		goto queue_table_mem_failed;
572 	}
573 
574 	ASSERT(!((uintptr_t)qinv->qinv_table.qinv_mem_vaddr & MMU_PAGEOFFSET));
575 	bzero(qinv->qinv_table.qinv_mem_vaddr, size);
576 
577 	/* get the base physical address of invalidation request queue */
578 	qinv->qinv_table.qinv_mem_paddr = pfn_to_pa(
579 	    hat_getpfnum(kas.a_hat, qinv->qinv_table.qinv_mem_vaddr));
580 
581 	qinv->qinv_table.qinv_mem_head = qinv->qinv_table.qinv_mem_tail = 0;
582 
583 	qinv->qinv_sync.qinv_mem_size = qinv->qinv_table.qinv_mem_size;
584 	size = qinv->qinv_sync.qinv_mem_size * QINV_SYNC_DATA_SIZE;
585 
586 	/* alloc status memory for invalidation wait descriptor */
587 	if (ddi_dma_mem_alloc(qinv->qinv_sync.qinv_mem_dma_hdl,
588 	    size,
589 	    &qinv_acc_attr,
590 	    DDI_DMA_CONSISTENT | IOMEM_DATA_UNCACHED,
591 	    DDI_DMA_SLEEP,
592 	    NULL,
593 	    &(qinv->qinv_sync.qinv_mem_vaddr),
594 	    &size,
595 	    &(qinv->qinv_sync.qinv_mem_acc_hdl)) != DDI_SUCCESS) {
596 		ddi_err(DER_WARN, root_devinfo,
597 		    "alloc invalidation queue sync mem failed");
598 		goto sync_table_mem_failed;
599 	}
600 
601 	ASSERT(!((uintptr_t)qinv->qinv_sync.qinv_mem_vaddr & MMU_PAGEOFFSET));
602 	bzero(qinv->qinv_sync.qinv_mem_vaddr, size);
603 	qinv->qinv_sync.qinv_mem_paddr = pfn_to_pa(
604 	    hat_getpfnum(kas.a_hat, qinv->qinv_sync.qinv_mem_vaddr));
605 
606 	qinv->qinv_sync.qinv_mem_head = qinv->qinv_sync.qinv_mem_tail = 0;
607 
608 	mutex_init(&(qinv->qinv_table.qinv_mem_lock), NULL, MUTEX_DRIVER, NULL);
609 	mutex_init(&(qinv->qinv_sync.qinv_mem_lock), NULL, MUTEX_DRIVER, NULL);
610 
611 	/*
612 	 * init iotlb pend node for submitting invalidation iotlb
613 	 * queue request
614 	 */
615 	qinv->qinv_iotlb_pend_node = (qinv_iotlb_pend_node_t **)
616 	    kmem_zalloc(qinv->qinv_sync.qinv_mem_size
617 	    * sizeof (qinv_iotlb_pend_node_t *), KM_SLEEP);
618 
619 	/* set invalidation queue structure */
620 	immu->immu_qinv = qinv;
621 
622 	mutex_exit(&(immu->immu_qinv_lock));
623 
624 	return (DDI_SUCCESS);
625 
626 sync_table_mem_failed:
627 	ddi_dma_mem_free(&(qinv->qinv_table.qinv_mem_acc_hdl));
628 
629 queue_table_mem_failed:
630 	ddi_dma_free_handle(&(qinv->qinv_sync.qinv_mem_dma_hdl));
631 
632 sync_table_handle_failed:
633 	ddi_dma_free_handle(&(qinv->qinv_table.qinv_mem_dma_hdl));
634 
635 queue_table_handle_failed:
636 	kmem_free(qinv, sizeof (qinv_t));
637 
638 	mutex_exit(&(immu->immu_qinv_lock));
639 
640 	return (DDI_FAILURE);
641 }
642 
643 /*
644  * ###########################################################################
645  *
646  * Functions exported by immu_qinv.c
647  *
648  * ###########################################################################
649  */
650 
651 /*
652  * initialize invalidation request queue structure.
653  */
654 int
655 immu_qinv_setup(list_t *listp)
656 {
657 	immu_t *immu;
658 	int nerr;
659 
660 	if (immu_qinv_enable == B_FALSE) {
661 		return (DDI_FAILURE);
662 	}
663 
664 	nerr = 0;
665 	immu = list_head(listp);
666 	for (; immu; immu = list_next(listp, immu)) {
667 		if (qinv_setup(immu) == DDI_SUCCESS) {
668 			immu->immu_qinv_setup = B_TRUE;
669 		} else {
670 			nerr++;
671 			break;
672 		}
673 	}
674 
675 	return (nerr > 0 ? DDI_FAILURE : DDI_SUCCESS);
676 }
677 
678 void
679 immu_qinv_startup(immu_t *immu)
680 {
681 	qinv_t *qinv;
682 	uint64_t qinv_reg_value;
683 
684 	if (immu->immu_qinv_setup == B_FALSE) {
685 		return;
686 	}
687 
688 	qinv = (qinv_t *)immu->immu_qinv;
689 	qinv_reg_value = qinv->qinv_table.qinv_mem_paddr | qinv_iqa_qs;
690 	immu_regs_qinv_enable(immu, qinv_reg_value);
691 	immu->immu_flushops = &immu_qinv_flushops;
692 	immu->immu_qinv_running = B_TRUE;
693 }
694 
695 /*
696  * queued invalidation interface
697  *   function based context cache invalidation
698  */
699 void
700 immu_qinv_context_fsi(immu_t *immu, uint8_t function_mask,
701     uint16_t source_id, uint_t domain_id)
702 {
703 	qinv_context_common(immu, function_mask, source_id,
704 	    domain_id, CTT_INV_G_DEVICE);
705 	qinv_wait_sync(immu);
706 }
707 
708 /*
709  * queued invalidation interface
710  *   domain based context cache invalidation
711  */
712 void
713 immu_qinv_context_dsi(immu_t *immu, uint_t domain_id)
714 {
715 	qinv_context_common(immu, 0, 0, domain_id, CTT_INV_G_DOMAIN);
716 	qinv_wait_sync(immu);
717 }
718 
719 /*
720  * queued invalidation interface
721  *   invalidation global context cache
722  */
723 void
724 immu_qinv_context_gbl(immu_t *immu)
725 {
726 	qinv_context_common(immu, 0, 0, 0, CTT_INV_G_GLOBAL);
727 	qinv_wait_sync(immu);
728 }
729 
730 /*
731  * queued invalidation interface
732  *   paged based iotlb invalidation
733  */
734 void
735 immu_qinv_iotlb_psi(immu_t *immu, uint_t domain_id,
736 	uint64_t dvma, uint_t count, uint_t hint)
737 {
738 	uint_t am = 0;
739 	uint_t max_am;
740 
741 	max_am = IMMU_CAP_GET_MAMV(immu->immu_regs_cap);
742 
743 	/* choose page specified invalidation */
744 	if (IMMU_CAP_GET_PSI(immu->immu_regs_cap)) {
745 		while (am <= max_am) {
746 			if ((ADDR_AM_OFFSET(IMMU_BTOP(dvma), am) + count)
747 			    <= ADDR_AM_MAX(am)) {
748 				qinv_iotlb_common(immu, domain_id,
749 				    dvma, am, hint, TLB_INV_G_PAGE);
750 				break;
751 			}
752 			am++;
753 		}
754 		if (am > max_am) {
755 			qinv_iotlb_common(immu, domain_id,
756 			    dvma, 0, hint, TLB_INV_G_DOMAIN);
757 		}
758 
759 	/* choose domain invalidation */
760 	} else {
761 		qinv_iotlb_common(immu, domain_id, dvma,
762 		    0, hint, TLB_INV_G_DOMAIN);
763 	}
764 }
765 
766 /*
767  * queued invalidation interface
768  *   domain based iotlb invalidation
769  */
770 void
771 immu_qinv_iotlb_dsi(immu_t *immu, uint_t domain_id)
772 {
773 	qinv_iotlb_common(immu, domain_id, 0, 0, 0, TLB_INV_G_DOMAIN);
774 	qinv_wait_sync(immu);
775 }
776 
777 /*
778  * queued invalidation interface
779  *    global iotlb invalidation
780  */
781 void
782 immu_qinv_iotlb_gbl(immu_t *immu)
783 {
784 	qinv_iotlb_common(immu, 0, 0, 0, 0, TLB_INV_G_GLOBAL);
785 	qinv_wait_sync(immu);
786 }
787 
788 
789 
790 /*
791  * the plant wait operation for queued invalidation interface
792  */
793 void
794 immu_qinv_plant(immu_t *immu, dvcookie_t *dvcookies,
795 	uint_t count, uint_t array_size)
796 {
797 	qinv_t *qinv;
798 	qinv_iotlb_pend_node_t *node = NULL;
799 	qinv_iotlb_pend_head_t *head;
800 
801 	qinv = (qinv_t *)immu->immu_qinv;
802 
803 	head = &(qinv->qinv_pend_head);
804 	mutex_enter(&(head->ich_mem_lock));
805 	node = list_head(&(head->ich_mem_list));
806 	if (node) {
807 		list_remove(&(head->ich_mem_list), node);
808 	}
809 	mutex_exit(&(head->ich_mem_lock));
810 
811 	/* no cache, alloc one */
812 	if (node == NULL) {
813 		node = kmem_zalloc(sizeof (qinv_iotlb_pend_node_t), KM_SLEEP);
814 	}
815 	node->icn_dvcookies = dvcookies;
816 	node->icn_count = count;
817 	node->icn_array_size = array_size;
818 
819 	/* plant an invalidation wait descriptor, not wait its completion */
820 	qinv_wait_async_unfence(immu, node);
821 }
822 
823 /*
824  * the reap wait operation for queued invalidation interface
825  */
826 void
827 immu_qinv_reap(immu_t *immu)
828 {
829 	int index, cnt = 0;
830 	qinv_iotlb_pend_node_t *node;
831 	qinv_iotlb_pend_head_t *head;
832 	qinv_t *qinv;
833 
834 	qinv = (qinv_t *)immu->immu_qinv;
835 	head = &(qinv->qinv_pend_head);
836 
837 	index = qinv_wait_async_finish(immu, &cnt);
838 
839 	while (cnt--) {
840 		node = qinv->qinv_iotlb_pend_node[index];
841 		if (node == NULL)
842 			continue;
843 		mutex_enter(&(head->ich_mem_lock));
844 		list_insert_head(&(head->ich_mem_list), node);
845 		mutex_exit(&(head->ich_mem_lock));
846 		qinv->qinv_iotlb_pend_node[index] = NULL;
847 		index++;
848 		if (index == qinv->qinv_sync.qinv_mem_size)
849 			index = 0;
850 	}
851 }
852 
853 
854 /* queued invalidation interface -- global invalidate interrupt entry cache */
855 void
856 immu_qinv_intr_global(immu_t *immu)
857 {
858 	qinv_iec_common(immu, 0, 0, IEC_INV_GLOBAL);
859 	qinv_wait_sync(immu);
860 }
861 
862 /* queued invalidation interface -- invalidate single interrupt entry cache */
863 void
864 immu_qinv_intr_one_cache(immu_t *immu, uint_t iidx)
865 {
866 	qinv_iec_common(immu, iidx, 0, IEC_INV_INDEX);
867 	qinv_wait_sync(immu);
868 }
869 
870 /* queued invalidation interface -- invalidate interrupt entry caches */
871 void
872 immu_qinv_intr_caches(immu_t *immu, uint_t iidx, uint_t cnt)
873 {
874 	uint_t	i, mask = 0;
875 
876 	ASSERT(cnt != 0);
877 
878 	/* requested interrupt count is not a power of 2 */
879 	if (!ISP2(cnt)) {
880 		for (i = 0; i < cnt; i++) {
881 			qinv_iec_common(immu, iidx + cnt, 0, IEC_INV_INDEX);
882 		}
883 		qinv_wait_sync(immu);
884 		return;
885 	}
886 
887 	while ((2 << mask) < cnt) {
888 		mask++;
889 	}
890 
891 	if (mask > IMMU_ECAP_GET_MHMV(immu->immu_regs_excap)) {
892 		for (i = 0; i < cnt; i++) {
893 			qinv_iec_common(immu, iidx + cnt, 0, IEC_INV_INDEX);
894 		}
895 		qinv_wait_sync(immu);
896 		return;
897 	}
898 
899 	qinv_iec_common(immu, iidx, mask, IEC_INV_INDEX);
900 
901 	qinv_wait_sync(immu);
902 }
903 
904 void
905 immu_qinv_report_fault(immu_t *immu)
906 {
907 	uint16_t head;
908 	qinv_dsc_t *dsc;
909 	qinv_t *qinv;
910 
911 	/* access qinv data */
912 	mutex_enter(&(immu->immu_qinv_lock));
913 
914 	qinv = (qinv_t *)(immu->immu_qinv);
915 
916 	head = QINV_IQA_HEAD(
917 	    immu_regs_get64(immu, IMMU_REG_INVAL_QH));
918 
919 	dsc = (qinv_dsc_t *)(qinv->qinv_table.qinv_mem_vaddr
920 	    + (head * QINV_ENTRY_SIZE));
921 
922 	/* report the error */
923 	ddi_err(DER_WARN, immu->immu_dip,
924 	    "generated a fault when fetching a descriptor from the"
925 	    "\tinvalidation queue, or detects that the fetched"
926 	    "\tdescriptor is invalid. The head register is "
927 	    "0x%" PRIx64
928 	    "\tthe type is %s",
929 	    head,
930 	    qinv_dsc_type[MIN(INV_DSC_TYPE(dsc), QINV_MAX_DSC_TYPE)]);
931 
932 	mutex_exit(&(immu->immu_qinv_lock));
933 }
934