xref: /titanic_52/usr/src/uts/i86pc/io/immu_qinv.c (revision 3c112a2b34403220c06c3e2fcac403358cfba168)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Portions Copyright (c) 2010, Oracle and/or its affiliates.
23  * All rights reserved.
24  */
25 
26 /*
27  * Copyright (c) 2009, Intel Corporation.
28  * All rights reserved.
29  */
30 
31 #include <sys/ddi.h>
32 #include <sys/archsystm.h>
33 #include <vm/hat_i86.h>
34 #include <sys/types.h>
35 #include <sys/sysmacros.h>
36 #include <sys/immu.h>
37 
38 /* invalidation queue table entry size */
39 #define	QINV_ENTRY_SIZE		0x10
40 
41 /* max value of Queue Size field of Invalidation Queue Address Register */
42 #define	QINV_MAX_QUEUE_SIZE	0x7
43 
44 /* status data size of invalidation wait descriptor */
45 #define	QINV_SYNC_DATA_SIZE	0x4
46 
47 /* status data value of invalidation wait descriptor */
48 #define	QINV_SYNC_DATA_FENCE	1
49 #define	QINV_SYNC_DATA_UNFENCE	2
50 
51 /* invalidation queue head and tail */
52 #define	QINV_IQA_HEAD(QH)	BITX((QH), 18, 4)
53 #define	QINV_IQA_TAIL_SHIFT	4
54 
55 /* invalidation queue entry structure */
56 typedef struct qinv_inv_dsc {
57 	uint64_t	lo;
58 	uint64_t	hi;
59 } qinv_dsc_t;
60 
61 /*
62  * struct iotlb_cache_node
63  *   the pending data for iotlb flush
64  */
65 typedef struct iotlb_pend_node {
66 	dvcookie_t	*icn_dvcookies;  /* ptr to dvma cookie array */
67 	uint_t		icn_count;  /* valid cookie count */
68 	uint_t		icn_array_size;  /* array size */
69 	list_node_t	node;
70 } qinv_iotlb_pend_node_t;
71 
72 /*
73  * struct iotlb_cache_head
74  *   the pending head for the iotlb flush
75  */
76 typedef struct iotlb_pend_head {
77 	/* the pending node cache list */
78 	kmutex_t	ich_mem_lock;
79 	list_t		ich_mem_list;
80 } qinv_iotlb_pend_head_t;
81 
82 /*
83  * qinv_iotlb_t
84  *   pending data for qiueued invalidation iotlb flush
85  */
86 typedef struct qinv_iotlb {
87 	dvcookie_t	*qinv_iotlb_dvcookies;
88 	uint_t		qinv_iotlb_count;
89 	uint_t		qinv_iotlb_size;
90 	list_node_t	qinv_iotlb_node;
91 } qinv_iotlb_t;
92 
93 /* physical contigous pages for invalidation queue */
94 typedef struct qinv_mem {
95 	kmutex_t	   qinv_mem_lock;
96 	ddi_dma_handle_t   qinv_mem_dma_hdl;
97 	ddi_acc_handle_t   qinv_mem_acc_hdl;
98 	caddr_t		   qinv_mem_vaddr;
99 	paddr_t		   qinv_mem_paddr;
100 	uint_t		   qinv_mem_size;
101 	uint16_t	   qinv_mem_head;
102 	uint16_t	   qinv_mem_tail;
103 } qinv_mem_t;
104 
105 
106 /*
107  * invalidation queue state
108  *   This structure describes the state information of the
109  *   invalidation queue table and related status memeory for
110  *   invalidation wait descriptor
111  *
112  * qinv_table		- invalidation queue table
113  * qinv_sync		- sync status memory for invalidation wait descriptor
114  * qinv_iotlb_pend_node	- pending iotlb node
115  */
116 typedef struct qinv {
117 	qinv_mem_t		qinv_table;
118 	qinv_mem_t		qinv_sync;
119 	qinv_iotlb_pend_head_t qinv_pend_head;
120 	qinv_iotlb_pend_node_t  **qinv_iotlb_pend_node;
121 } qinv_t;
122 
123 static struct immu_flushops immu_qinv_flushops = {
124 	immu_qinv_context_fsi,
125 	immu_qinv_context_dsi,
126 	immu_qinv_context_gbl,
127 	immu_qinv_iotlb_psi,
128 	immu_qinv_iotlb_dsi,
129 	immu_qinv_iotlb_gbl
130 };
131 
132 /* helper macro for making queue invalidation descriptor */
133 #define	INV_DSC_TYPE(dsc)	((dsc)->lo & 0xF)
134 #define	CC_INV_DSC_HIGH		(0)
135 #define	CC_INV_DSC_LOW(fm, sid, did, g)	(((uint64_t)(fm) << 48) | \
136 	((uint64_t)(sid) << 32) | \
137 	((uint64_t)(did) << 16) | \
138 	((uint64_t)(g) << 4) | \
139 	1)
140 
141 #define	IOTLB_INV_DSC_HIGH(addr, ih, am) (((uint64_t)(addr)) | \
142 	((uint64_t)(ih) << 6) |	\
143 	((uint64_t)(am)))
144 
145 #define	IOTLB_INV_DSC_LOW(did, dr, dw, g) (((uint64_t)(did) << 16) | \
146 	((uint64_t)(dr) << 7) | \
147 	((uint64_t)(dw) << 6) | \
148 	((uint64_t)(g) << 4) | \
149 	2)
150 
151 #define	DEV_IOTLB_INV_DSC_HIGH(addr, s) (((uint64_t)(addr)) | (s))
152 
153 #define	DEV_IOTLB_INV_DSC_LOW(sid, max_invs_pd) ( \
154 	((uint64_t)(sid) << 32) | \
155 	((uint64_t)(max_invs_pd) << 16) | \
156 	3)
157 
158 #define	IEC_INV_DSC_HIGH (0)
159 #define	IEC_INV_DSC_LOW(idx, im, g) (((uint64_t)(idx) << 32) | \
160 	((uint64_t)(im) << 27) | \
161 	((uint64_t)(g) << 4) | \
162 	4)
163 
164 #define	INV_WAIT_DSC_HIGH(saddr) ((uint64_t)(saddr))
165 
166 #define	INV_WAIT_DSC_LOW(sdata, fn, sw, iflag) (((uint64_t)(sdata) << 32) | \
167 	((uint64_t)(fn) << 6) | \
168 	((uint64_t)(sw) << 5) | \
169 	((uint64_t)(iflag) << 4) | \
170 	5)
171 
172 /*
173  * QS field of Invalidation Queue Address Register
174  * the size of invalidation queue is 1 << (qinv_iqa_qs + 8)
175  */
176 static uint_t qinv_iqa_qs = 6;
177 
178 /*
179  * the invalidate desctiptor type of queued invalidation interface
180  */
181 static char *qinv_dsc_type[] = {
182 	"Reserved",
183 	"Context Cache Invalidate Descriptor",
184 	"IOTLB Invalidate Descriptor",
185 	"Device-IOTLB Invalidate Descriptor",
186 	"Interrupt Entry Cache Invalidate Descriptor",
187 	"Invalidation Wait Descriptor",
188 	"Incorrect queue invalidation type"
189 };
190 
191 #define	QINV_MAX_DSC_TYPE	(sizeof (qinv_dsc_type) / sizeof (char *))
192 
193 /*
194  * the queued invalidation interface functions
195  */
196 static void qinv_submit_inv_dsc(immu_t *immu, qinv_dsc_t *dsc);
197 static void qinv_context_common(immu_t *immu, uint8_t function_mask,
198     uint16_t source_id, uint_t domain_id, ctt_inv_g_t type);
199 static void qinv_iotlb_common(immu_t *immu, uint_t domain_id,
200     uint64_t addr, uint_t am, uint_t hint, tlb_inv_g_t type);
201 static void qinv_iec_common(immu_t *immu, uint_t iidx,
202     uint_t im, uint_t g);
203 static uint_t qinv_alloc_sync_mem_entry(immu_t *immu);
204 static void qinv_wait_async_unfence(immu_t *immu,
205     qinv_iotlb_pend_node_t *node);
206 static void qinv_wait_sync(immu_t *immu);
207 static int qinv_wait_async_finish(immu_t *immu, int *count);
208 /*LINTED*/
209 static void qinv_wait_async_fence(immu_t *immu);
210 /*LINTED*/
211 static void qinv_dev_iotlb_common(immu_t *immu, uint16_t sid,
212     uint64_t addr, uint_t size, uint_t max_invs_pd);
213 
214 
215 /* submit invalidation request descriptor to invalidation queue */
216 static void
217 qinv_submit_inv_dsc(immu_t *immu, qinv_dsc_t *dsc)
218 {
219 	qinv_t *qinv;
220 	qinv_mem_t *qinv_table;
221 	uint_t tail;
222 
223 	qinv = (qinv_t *)immu->immu_qinv;
224 	qinv_table = &(qinv->qinv_table);
225 
226 	mutex_enter(&qinv_table->qinv_mem_lock);
227 	tail = qinv_table->qinv_mem_tail;
228 	qinv_table->qinv_mem_tail++;
229 
230 	if (qinv_table->qinv_mem_tail == qinv_table->qinv_mem_size)
231 		qinv_table->qinv_mem_tail = 0;
232 
233 	while (qinv_table->qinv_mem_head == qinv_table->qinv_mem_tail) {
234 		/*
235 		 * inv queue table exhausted, wait hardware to fetch
236 		 * next descriptor
237 		 */
238 		qinv_table->qinv_mem_head = QINV_IQA_HEAD(
239 		    immu_regs_get64(immu, IMMU_REG_INVAL_QH));
240 	}
241 
242 	bcopy(dsc, qinv_table->qinv_mem_vaddr + tail * QINV_ENTRY_SIZE,
243 	    QINV_ENTRY_SIZE);
244 
245 	immu_regs_put64(immu, IMMU_REG_INVAL_QT,
246 	    qinv_table->qinv_mem_tail << QINV_IQA_TAIL_SHIFT);
247 
248 	mutex_exit(&qinv_table->qinv_mem_lock);
249 }
250 
251 /* queued invalidation interface -- invalidate context cache */
252 static void
253 qinv_context_common(immu_t *immu, uint8_t function_mask,
254     uint16_t source_id, uint_t domain_id, ctt_inv_g_t type)
255 {
256 	qinv_dsc_t dsc;
257 
258 	dsc.lo = CC_INV_DSC_LOW(function_mask, source_id, domain_id, type);
259 	dsc.hi = CC_INV_DSC_HIGH;
260 
261 	qinv_submit_inv_dsc(immu, &dsc);
262 }
263 
264 /* queued invalidation interface -- invalidate iotlb */
265 static void
266 qinv_iotlb_common(immu_t *immu, uint_t domain_id,
267     uint64_t addr, uint_t am, uint_t hint, tlb_inv_g_t type)
268 {
269 	qinv_dsc_t dsc;
270 	uint8_t dr = 0;
271 	uint8_t dw = 0;
272 
273 	if (IMMU_CAP_GET_DRD(immu->immu_regs_cap))
274 		dr = 1;
275 	if (IMMU_CAP_GET_DWD(immu->immu_regs_cap))
276 		dw = 1;
277 
278 	switch (type) {
279 	case TLB_INV_G_PAGE:
280 		if (!IMMU_CAP_GET_PSI(immu->immu_regs_cap) ||
281 		    am > IMMU_CAP_GET_MAMV(immu->immu_regs_cap) ||
282 		    addr & IMMU_PAGEOFFSET) {
283 			type = TLB_INV_G_DOMAIN;
284 			goto qinv_ignore_psi;
285 		}
286 		dsc.lo = IOTLB_INV_DSC_LOW(domain_id, dr, dw, type);
287 		dsc.hi = IOTLB_INV_DSC_HIGH(addr, hint, am);
288 		break;
289 
290 	qinv_ignore_psi:
291 	case TLB_INV_G_DOMAIN:
292 		dsc.lo = IOTLB_INV_DSC_LOW(domain_id, dr, dw, type);
293 		dsc.hi = 0;
294 		break;
295 
296 	case TLB_INV_G_GLOBAL:
297 		dsc.lo = IOTLB_INV_DSC_LOW(0, dr, dw, type);
298 		dsc.hi = 0;
299 		break;
300 	default:
301 		ddi_err(DER_WARN, NULL, "incorrect iotlb flush type");
302 		return;
303 	}
304 
305 	qinv_submit_inv_dsc(immu, &dsc);
306 }
307 
308 /* queued invalidation interface -- invalidate dev_iotlb */
309 static void
310 qinv_dev_iotlb_common(immu_t *immu, uint16_t sid,
311     uint64_t addr, uint_t size, uint_t max_invs_pd)
312 {
313 	qinv_dsc_t dsc;
314 
315 	dsc.lo = DEV_IOTLB_INV_DSC_LOW(sid, max_invs_pd);
316 	dsc.hi = DEV_IOTLB_INV_DSC_HIGH(addr, size);
317 
318 	qinv_submit_inv_dsc(immu, &dsc);
319 }
320 
321 /* queued invalidation interface -- invalidate interrupt entry cache */
322 static void
323 qinv_iec_common(immu_t *immu, uint_t iidx, uint_t im, uint_t g)
324 {
325 	qinv_dsc_t dsc;
326 
327 	dsc.lo = IEC_INV_DSC_LOW(iidx, im, g);
328 	dsc.hi = IEC_INV_DSC_HIGH;
329 
330 	qinv_submit_inv_dsc(immu, &dsc);
331 }
332 
333 /*
334  * alloc free entry from sync status table
335  */
336 static uint_t
337 qinv_alloc_sync_mem_entry(immu_t *immu)
338 {
339 	qinv_mem_t *sync_mem;
340 	uint_t tail;
341 	qinv_t *qinv;
342 
343 	qinv = (qinv_t *)immu->immu_qinv;
344 	sync_mem = &qinv->qinv_sync;
345 
346 sync_mem_exhausted:
347 	mutex_enter(&sync_mem->qinv_mem_lock);
348 	tail = sync_mem->qinv_mem_tail;
349 	sync_mem->qinv_mem_tail++;
350 	if (sync_mem->qinv_mem_tail == sync_mem->qinv_mem_size)
351 		sync_mem->qinv_mem_tail = 0;
352 
353 	if (sync_mem->qinv_mem_head == sync_mem->qinv_mem_tail) {
354 		/* should never happen */
355 		ddi_err(DER_WARN, NULL, "sync mem exhausted");
356 		sync_mem->qinv_mem_tail = tail;
357 		mutex_exit(&sync_mem->qinv_mem_lock);
358 		delay(IMMU_ALLOC_RESOURCE_DELAY);
359 		goto sync_mem_exhausted;
360 	}
361 	mutex_exit(&sync_mem->qinv_mem_lock);
362 
363 	return (tail);
364 }
365 
366 /*
367  * queued invalidation interface -- invalidation wait descriptor
368  *   fence flag not set, need status data to indicate the invalidation
369  *   wait descriptor completion
370  */
371 static void
372 qinv_wait_async_unfence(immu_t *immu, qinv_iotlb_pend_node_t *node)
373 {
374 	qinv_dsc_t dsc;
375 	qinv_mem_t *sync_mem;
376 	uint64_t saddr;
377 	uint_t tail;
378 	qinv_t *qinv;
379 
380 	qinv = (qinv_t *)immu->immu_qinv;
381 	sync_mem = &qinv->qinv_sync;
382 	tail = qinv_alloc_sync_mem_entry(immu);
383 
384 	/* plant an iotlb pending node */
385 	qinv->qinv_iotlb_pend_node[tail] = node;
386 
387 	saddr = sync_mem->qinv_mem_paddr + tail * QINV_SYNC_DATA_SIZE;
388 
389 	/*
390 	 * sdata = QINV_SYNC_DATA_UNFENCE, fence = 0, sw = 1, if = 0
391 	 * indicate the invalidation wait descriptor completion by
392 	 * performing a coherent DWORD write to the status address,
393 	 * not by generating an invalidation completion event
394 	 */
395 	dsc.lo = INV_WAIT_DSC_LOW(QINV_SYNC_DATA_UNFENCE, 0, 1, 0);
396 	dsc.hi = INV_WAIT_DSC_HIGH(saddr);
397 
398 	qinv_submit_inv_dsc(immu, &dsc);
399 }
400 
401 /*
402  * queued invalidation interface -- invalidation wait descriptor
403  *   fence flag set, indicate descriptors following the invalidation
404  *   wait descriptor must be processed by hardware only after the
405  *   invalidation wait descriptor completes.
406  */
407 static void
408 qinv_wait_async_fence(immu_t *immu)
409 {
410 	qinv_dsc_t dsc;
411 
412 	/* sw = 0, fence = 1, iflag = 0 */
413 	dsc.lo = INV_WAIT_DSC_LOW(0, 1, 0, 0);
414 	dsc.hi = 0;
415 	qinv_submit_inv_dsc(immu, &dsc);
416 }
417 
418 /*
419  * queued invalidation interface -- invalidation wait descriptor
420  *   wait until the invalidation request finished
421  */
422 static void
423 qinv_wait_sync(immu_t *immu)
424 {
425 	qinv_dsc_t dsc;
426 	qinv_mem_t *sync_mem;
427 	uint64_t saddr;
428 	uint_t tail;
429 	qinv_t *qinv;
430 	volatile uint32_t *status;
431 
432 	qinv = (qinv_t *)immu->immu_qinv;
433 	sync_mem = &qinv->qinv_sync;
434 	tail = qinv_alloc_sync_mem_entry(immu);
435 	saddr = sync_mem->qinv_mem_paddr + tail * QINV_SYNC_DATA_SIZE;
436 	status = (uint32_t *)(sync_mem->qinv_mem_vaddr + tail *
437 	    QINV_SYNC_DATA_SIZE);
438 
439 	/*
440 	 * sdata = QINV_SYNC_DATA_FENCE, fence = 1, sw = 1, if = 0
441 	 * indicate the invalidation wait descriptor completion by
442 	 * performing a coherent DWORD write to the status address,
443 	 * not by generating an invalidation completion event
444 	 */
445 	dsc.lo = INV_WAIT_DSC_LOW(QINV_SYNC_DATA_FENCE, 1, 1, 0);
446 	dsc.hi = INV_WAIT_DSC_HIGH(saddr);
447 
448 	qinv_submit_inv_dsc(immu, &dsc);
449 
450 	while ((*status) != QINV_SYNC_DATA_FENCE)
451 		iommu_cpu_nop();
452 	*status = QINV_SYNC_DATA_UNFENCE;
453 }
454 
455 /* get already completed invalidation wait requests */
456 static int
457 qinv_wait_async_finish(immu_t *immu, int *cnt)
458 {
459 	qinv_mem_t *sync_mem;
460 	int index;
461 	qinv_t *qinv;
462 	volatile uint32_t *value;
463 
464 	ASSERT((*cnt) == 0);
465 
466 	qinv = (qinv_t *)immu->immu_qinv;
467 	sync_mem = &qinv->qinv_sync;
468 
469 	mutex_enter(&sync_mem->qinv_mem_lock);
470 	index = sync_mem->qinv_mem_head;
471 	value = (uint32_t *)(sync_mem->qinv_mem_vaddr + index
472 	    * QINV_SYNC_DATA_SIZE);
473 	while (*value == QINV_SYNC_DATA_UNFENCE) {
474 		*value = 0;
475 		(*cnt)++;
476 		sync_mem->qinv_mem_head++;
477 		if (sync_mem->qinv_mem_head == sync_mem->qinv_mem_size) {
478 			sync_mem->qinv_mem_head = 0;
479 			value = (uint32_t *)(sync_mem->qinv_mem_vaddr);
480 		} else
481 			value = (uint32_t *)((char *)value +
482 			    QINV_SYNC_DATA_SIZE);
483 	}
484 
485 	mutex_exit(&sync_mem->qinv_mem_lock);
486 	if ((*cnt) > 0)
487 		return (index);
488 	else
489 		return (-1);
490 }
491 
492 /*
493  * call ddi_dma_mem_alloc to allocate physical contigous
494  * pages for invalidation queue table
495  */
496 static int
497 qinv_setup(immu_t *immu)
498 {
499 	qinv_t *qinv;
500 	size_t size;
501 
502 	ddi_dma_attr_t qinv_dma_attr = {
503 		DMA_ATTR_V0,
504 		0U,
505 		0xffffffffU,
506 		0xffffffffU,
507 		MMU_PAGESIZE, /* page aligned */
508 		0x1,
509 		0x1,
510 		0xffffffffU,
511 		0xffffffffU,
512 		1,
513 		4,
514 		0
515 	};
516 
517 	ddi_device_acc_attr_t qinv_acc_attr = {
518 		DDI_DEVICE_ATTR_V0,
519 		DDI_NEVERSWAP_ACC,
520 		DDI_STRICTORDER_ACC
521 	};
522 
523 	mutex_init(&(immu->immu_qinv_lock), NULL, MUTEX_DRIVER, NULL);
524 
525 
526 	mutex_enter(&(immu->immu_qinv_lock));
527 
528 	immu->immu_qinv = NULL;
529 	if (!IMMU_ECAP_GET_QI(immu->immu_regs_excap) ||
530 	    immu_qinv_enable == B_FALSE) {
531 		mutex_exit(&(immu->immu_qinv_lock));
532 		return (DDI_SUCCESS);
533 	}
534 
535 	if (qinv_iqa_qs > QINV_MAX_QUEUE_SIZE)
536 		qinv_iqa_qs = QINV_MAX_QUEUE_SIZE;
537 
538 	qinv = kmem_zalloc(sizeof (qinv_t), KM_SLEEP);
539 
540 	if (ddi_dma_alloc_handle(root_devinfo,
541 	    &qinv_dma_attr, DDI_DMA_SLEEP, NULL,
542 	    &(qinv->qinv_table.qinv_mem_dma_hdl)) != DDI_SUCCESS) {
543 		ddi_err(DER_WARN, root_devinfo,
544 		    "alloc invalidation queue table handler failed");
545 		goto queue_table_handle_failed;
546 	}
547 
548 	if (ddi_dma_alloc_handle(root_devinfo,
549 	    &qinv_dma_attr, DDI_DMA_SLEEP, NULL,
550 	    &(qinv->qinv_sync.qinv_mem_dma_hdl)) != DDI_SUCCESS) {
551 		ddi_err(DER_WARN, root_devinfo,
552 		    "alloc invalidation queue sync mem handler failed");
553 		goto sync_table_handle_failed;
554 	}
555 
556 	qinv->qinv_table.qinv_mem_size = (1 << (qinv_iqa_qs + 8));
557 	size = qinv->qinv_table.qinv_mem_size * QINV_ENTRY_SIZE;
558 
559 	/* alloc physical contiguous pages for invalidation queue */
560 	if (ddi_dma_mem_alloc(qinv->qinv_table.qinv_mem_dma_hdl,
561 	    size,
562 	    &qinv_acc_attr,
563 	    DDI_DMA_CONSISTENT | IOMEM_DATA_UNCACHED,
564 	    DDI_DMA_SLEEP,
565 	    NULL,
566 	    &(qinv->qinv_table.qinv_mem_vaddr),
567 	    &size,
568 	    &(qinv->qinv_table.qinv_mem_acc_hdl)) != DDI_SUCCESS) {
569 		ddi_err(DER_WARN, root_devinfo,
570 		    "alloc invalidation queue table failed");
571 		goto queue_table_mem_failed;
572 	}
573 
574 	ASSERT(!((uintptr_t)qinv->qinv_table.qinv_mem_vaddr & MMU_PAGEOFFSET));
575 	bzero(qinv->qinv_table.qinv_mem_vaddr, size);
576 
577 	/* get the base physical address of invalidation request queue */
578 	qinv->qinv_table.qinv_mem_paddr = pfn_to_pa(
579 	    hat_getpfnum(kas.a_hat, qinv->qinv_table.qinv_mem_vaddr));
580 
581 	qinv->qinv_table.qinv_mem_head = qinv->qinv_table.qinv_mem_tail = 0;
582 
583 	qinv->qinv_sync.qinv_mem_size = qinv->qinv_table.qinv_mem_size;
584 	size = qinv->qinv_sync.qinv_mem_size * QINV_SYNC_DATA_SIZE;
585 
586 	/* alloc status memory for invalidation wait descriptor */
587 	if (ddi_dma_mem_alloc(qinv->qinv_sync.qinv_mem_dma_hdl,
588 	    size,
589 	    &qinv_acc_attr,
590 	    DDI_DMA_CONSISTENT | IOMEM_DATA_UNCACHED,
591 	    DDI_DMA_SLEEP,
592 	    NULL,
593 	    &(qinv->qinv_sync.qinv_mem_vaddr),
594 	    &size,
595 	    &(qinv->qinv_sync.qinv_mem_acc_hdl)) != DDI_SUCCESS) {
596 		ddi_err(DER_WARN, root_devinfo,
597 		    "alloc invalidation queue sync mem failed");
598 		goto sync_table_mem_failed;
599 	}
600 
601 	ASSERT(!((uintptr_t)qinv->qinv_sync.qinv_mem_vaddr & MMU_PAGEOFFSET));
602 	bzero(qinv->qinv_sync.qinv_mem_vaddr, size);
603 	qinv->qinv_sync.qinv_mem_paddr = pfn_to_pa(
604 	    hat_getpfnum(kas.a_hat, qinv->qinv_sync.qinv_mem_vaddr));
605 
606 	qinv->qinv_sync.qinv_mem_head = qinv->qinv_sync.qinv_mem_tail = 0;
607 
608 	mutex_init(&(qinv->qinv_table.qinv_mem_lock), NULL, MUTEX_DRIVER, NULL);
609 	mutex_init(&(qinv->qinv_sync.qinv_mem_lock), NULL, MUTEX_DRIVER, NULL);
610 
611 	/*
612 	 * init iotlb pend node for submitting invalidation iotlb
613 	 * queue request
614 	 */
615 	qinv->qinv_iotlb_pend_node = (qinv_iotlb_pend_node_t **)
616 	    kmem_zalloc(qinv->qinv_sync.qinv_mem_size
617 	    * sizeof (qinv_iotlb_pend_node_t *), KM_SLEEP);
618 
619 	/* set invalidation queue structure */
620 	immu->immu_qinv = qinv;
621 
622 	mutex_exit(&(immu->immu_qinv_lock));
623 
624 	return (DDI_SUCCESS);
625 
626 sync_table_mem_failed:
627 	ddi_dma_mem_free(&(qinv->qinv_table.qinv_mem_acc_hdl));
628 
629 queue_table_mem_failed:
630 	ddi_dma_free_handle(&(qinv->qinv_sync.qinv_mem_dma_hdl));
631 
632 sync_table_handle_failed:
633 	ddi_dma_free_handle(&(qinv->qinv_table.qinv_mem_dma_hdl));
634 
635 queue_table_handle_failed:
636 	kmem_free(qinv, sizeof (qinv_t));
637 
638 	mutex_exit(&(immu->immu_qinv_lock));
639 
640 	return (DDI_FAILURE);
641 }
642 
643 /*
644  * ###########################################################################
645  *
646  * Functions exported by immu_qinv.c
647  *
648  * ###########################################################################
649  */
650 
651 /*
652  * initialize invalidation request queue structure.
653  */
654 void
655 immu_qinv_setup(list_t *listp)
656 {
657 	immu_t *immu;
658 
659 	if (immu_qinv_enable == B_FALSE) {
660 		return;
661 	}
662 
663 	immu = list_head(listp);
664 	for (; immu; immu = list_next(listp, immu)) {
665 		if (qinv_setup(immu) == DDI_SUCCESS) {
666 			immu->immu_qinv_setup = B_TRUE;
667 		}
668 	}
669 }
670 
671 void
672 immu_qinv_startup(immu_t *immu)
673 {
674 	qinv_t *qinv;
675 	uint64_t qinv_reg_value;
676 
677 	if (immu->immu_qinv_setup == B_FALSE) {
678 		return;
679 	}
680 
681 	qinv = (qinv_t *)immu->immu_qinv;
682 	qinv_reg_value = qinv->qinv_table.qinv_mem_paddr | qinv_iqa_qs;
683 	immu_regs_qinv_enable(immu, qinv_reg_value);
684 	immu->immu_flushops = &immu_qinv_flushops;
685 	immu->immu_qinv_running = B_TRUE;
686 }
687 
688 /*
689  * queued invalidation interface
690  *   function based context cache invalidation
691  */
692 void
693 immu_qinv_context_fsi(immu_t *immu, uint8_t function_mask,
694     uint16_t source_id, uint_t domain_id)
695 {
696 	qinv_context_common(immu, function_mask, source_id,
697 	    domain_id, CTT_INV_G_DEVICE);
698 	qinv_wait_sync(immu);
699 }
700 
701 /*
702  * queued invalidation interface
703  *   domain based context cache invalidation
704  */
705 void
706 immu_qinv_context_dsi(immu_t *immu, uint_t domain_id)
707 {
708 	qinv_context_common(immu, 0, 0, domain_id, CTT_INV_G_DOMAIN);
709 	qinv_wait_sync(immu);
710 }
711 
712 /*
713  * queued invalidation interface
714  *   invalidation global context cache
715  */
716 void
717 immu_qinv_context_gbl(immu_t *immu)
718 {
719 	qinv_context_common(immu, 0, 0, 0, CTT_INV_G_GLOBAL);
720 	qinv_wait_sync(immu);
721 }
722 
723 /*
724  * queued invalidation interface
725  *   paged based iotlb invalidation
726  */
727 void
728 immu_qinv_iotlb_psi(immu_t *immu, uint_t domain_id,
729 	uint64_t dvma, uint_t count, uint_t hint)
730 {
731 	uint_t am = 0;
732 	uint_t max_am;
733 
734 	max_am = IMMU_CAP_GET_MAMV(immu->immu_regs_cap);
735 
736 	/* choose page specified invalidation */
737 	if (IMMU_CAP_GET_PSI(immu->immu_regs_cap)) {
738 		while (am <= max_am) {
739 			if ((ADDR_AM_OFFSET(IMMU_BTOP(dvma), am) + count)
740 			    <= ADDR_AM_MAX(am)) {
741 				qinv_iotlb_common(immu, domain_id,
742 				    dvma, am, hint, TLB_INV_G_PAGE);
743 				break;
744 			}
745 			am++;
746 		}
747 		if (am > max_am) {
748 			qinv_iotlb_common(immu, domain_id,
749 			    dvma, 0, hint, TLB_INV_G_DOMAIN);
750 		}
751 
752 	/* choose domain invalidation */
753 	} else {
754 		qinv_iotlb_common(immu, domain_id, dvma,
755 		    0, hint, TLB_INV_G_DOMAIN);
756 	}
757 }
758 
759 /*
760  * queued invalidation interface
761  *   domain based iotlb invalidation
762  */
763 void
764 immu_qinv_iotlb_dsi(immu_t *immu, uint_t domain_id)
765 {
766 	qinv_iotlb_common(immu, domain_id, 0, 0, 0, TLB_INV_G_DOMAIN);
767 	qinv_wait_sync(immu);
768 }
769 
770 /*
771  * queued invalidation interface
772  *    global iotlb invalidation
773  */
774 void
775 immu_qinv_iotlb_gbl(immu_t *immu)
776 {
777 	qinv_iotlb_common(immu, 0, 0, 0, 0, TLB_INV_G_GLOBAL);
778 	qinv_wait_sync(immu);
779 }
780 
781 
782 
783 /*
784  * the plant wait operation for queued invalidation interface
785  */
786 void
787 immu_qinv_plant(immu_t *immu, dvcookie_t *dvcookies,
788 	uint_t count, uint_t array_size)
789 {
790 	qinv_t *qinv;
791 	qinv_iotlb_pend_node_t *node = NULL;
792 	qinv_iotlb_pend_head_t *head;
793 
794 	qinv = (qinv_t *)immu->immu_qinv;
795 
796 	head = &(qinv->qinv_pend_head);
797 	mutex_enter(&(head->ich_mem_lock));
798 	node = list_head(&(head->ich_mem_list));
799 	if (node) {
800 		list_remove(&(head->ich_mem_list), node);
801 	}
802 	mutex_exit(&(head->ich_mem_lock));
803 
804 	/* no cache, alloc one */
805 	if (node == NULL) {
806 		node = kmem_zalloc(sizeof (qinv_iotlb_pend_node_t), KM_SLEEP);
807 	}
808 	node->icn_dvcookies = dvcookies;
809 	node->icn_count = count;
810 	node->icn_array_size = array_size;
811 
812 	/* plant an invalidation wait descriptor, not wait its completion */
813 	qinv_wait_async_unfence(immu, node);
814 }
815 
816 /*
817  * the reap wait operation for queued invalidation interface
818  */
819 void
820 immu_qinv_reap(immu_t *immu)
821 {
822 	int index, cnt = 0;
823 	qinv_iotlb_pend_node_t *node;
824 	qinv_iotlb_pend_head_t *head;
825 	qinv_t *qinv;
826 
827 	qinv = (qinv_t *)immu->immu_qinv;
828 	head = &(qinv->qinv_pend_head);
829 
830 	index = qinv_wait_async_finish(immu, &cnt);
831 
832 	while (cnt--) {
833 		node = qinv->qinv_iotlb_pend_node[index];
834 		if (node == NULL)
835 			continue;
836 		mutex_enter(&(head->ich_mem_lock));
837 		list_insert_head(&(head->ich_mem_list), node);
838 		mutex_exit(&(head->ich_mem_lock));
839 		qinv->qinv_iotlb_pend_node[index] = NULL;
840 		index++;
841 		if (index == qinv->qinv_sync.qinv_mem_size)
842 			index = 0;
843 	}
844 }
845 
846 
847 /* queued invalidation interface -- global invalidate interrupt entry cache */
848 void
849 immu_qinv_intr_global(immu_t *immu)
850 {
851 	qinv_iec_common(immu, 0, 0, IEC_INV_GLOBAL);
852 	qinv_wait_sync(immu);
853 }
854 
855 /* queued invalidation interface -- invalidate single interrupt entry cache */
856 void
857 immu_qinv_intr_one_cache(immu_t *immu, uint_t iidx)
858 {
859 	qinv_iec_common(immu, iidx, 0, IEC_INV_INDEX);
860 	qinv_wait_sync(immu);
861 }
862 
863 /* queued invalidation interface -- invalidate interrupt entry caches */
864 void
865 immu_qinv_intr_caches(immu_t *immu, uint_t iidx, uint_t cnt)
866 {
867 	uint_t	i, mask = 0;
868 
869 	ASSERT(cnt != 0);
870 
871 	/* requested interrupt count is not a power of 2 */
872 	if (!ISP2(cnt)) {
873 		for (i = 0; i < cnt; i++) {
874 			qinv_iec_common(immu, iidx + cnt, 0, IEC_INV_INDEX);
875 		}
876 		qinv_wait_sync(immu);
877 		return;
878 	}
879 
880 	while ((2 << mask) < cnt) {
881 		mask++;
882 	}
883 
884 	if (mask > IMMU_ECAP_GET_MHMV(immu->immu_regs_excap)) {
885 		for (i = 0; i < cnt; i++) {
886 			qinv_iec_common(immu, iidx + cnt, 0, IEC_INV_INDEX);
887 		}
888 		qinv_wait_sync(immu);
889 		return;
890 	}
891 
892 	qinv_iec_common(immu, iidx, mask, IEC_INV_INDEX);
893 
894 	qinv_wait_sync(immu);
895 }
896 
897 void
898 immu_qinv_report_fault(immu_t *immu)
899 {
900 	uint16_t head;
901 	qinv_dsc_t *dsc;
902 	qinv_t *qinv;
903 
904 	/* access qinv data */
905 	mutex_enter(&(immu->immu_qinv_lock));
906 
907 	qinv = (qinv_t *)(immu->immu_qinv);
908 
909 	head = QINV_IQA_HEAD(
910 	    immu_regs_get64(immu, IMMU_REG_INVAL_QH));
911 
912 	dsc = (qinv_dsc_t *)(qinv->qinv_table.qinv_mem_vaddr
913 	    + (head * QINV_ENTRY_SIZE));
914 
915 	/* report the error */
916 	ddi_err(DER_WARN, immu->immu_dip,
917 	    "generated a fault when fetching a descriptor from the"
918 	    "\tinvalidation queue, or detects that the fetched"
919 	    "\tdescriptor is invalid. The head register is "
920 	    "0x%" PRIx64
921 	    "\tthe type is %s",
922 	    head,
923 	    qinv_dsc_type[MIN(INV_DSC_TYPE(dsc), QINV_MAX_DSC_TYPE)]);
924 
925 	mutex_exit(&(immu->immu_qinv_lock));
926 }
927