xref: /titanic_41/usr/src/uts/intel/io/agpgart/agpgart.c (revision e5ba14ff435beeefdaa2e6649e175c74afe02c76)
1 /*
2  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 /*
6  * Portions Philip Brown phil@bolthole.com Dec 2001
7  */
8 
9 #pragma ident	"%Z%%M%	%I%	%E% SMI"
10 
11 /*
12  * agpgart driver
13  *
14  * This driver is primary targeted at providing memory support for INTEL
15  * AGP device, INTEL memory less video card, and AMD64 cpu GART devices.
16  * So there are four main architectures, ARC_IGD810, ARC_IGD830, ARC_INTELAGP,
17  * ARC_AMD64AGP, ARC_AMD64NOAGP to agpgart driver. However, the memory
18  * interfaces are the same for these architectures. The difference is how to
19  * manage the hardware GART table for them.
20  *
21  * For large memory allocation, this driver use direct mapping to userland
22  * application interface to save kernel virtual memory .
23  */
24 
25 #include <sys/types.h>
26 #include <sys/pci.h>
27 #include <sys/systm.h>
28 #include <sys/conf.h>
29 #include <sys/file.h>
30 #include <sys/kstat.h>
31 #include <sys/stat.h>
32 #include <sys/modctl.h>
33 #include <sys/ddi.h>
34 #include <sys/sunddi.h>
35 #include <sys/sunldi.h>
36 #include <sys/policy.h>
37 #include <sys/ddidevmap.h>
38 #include <vm/seg_dev.h>
39 #include <sys/pmem.h>
40 #include <sys/agpgart.h>
41 #include <sys/agp/agpdefs.h>
42 #include <sys/agp/agpgart_impl.h>
43 #include <sys/agp/agpamd64gart_io.h>
44 #include <sys/agp/agpmaster_io.h>
45 #include <sys/agp/agptarget_io.h>
46 
47 /* Dynamic debug support */
48 int agp_debug_var = 0;
49 #define	AGPDB_PRINT1(fmt)	if (agp_debug_var == 1) cmn_err fmt
50 #define	AGPDB_PRINT2(fmt)	if (agp_debug_var >= 1) cmn_err fmt
51 
52 /* Driver global softstate handle */
53 static void *agpgart_glob_soft_handle;
54 
55 #define	MAX_INSTNUM			16
56 
57 #define	AGP_DEV2INST(devt)	(getminor((devt)) >> 4)
58 #define	AGP_INST2MINOR(instance)	((instance) << 4)
59 #define	IS_INTEL_830(type)	((type) == ARC_IGD830)
60 #define	IS_TRUE_AGP(type)	(((type) == ARC_INTELAGP) || \
61 	((type) == ARC_AMD64AGP))
62 
63 #define	agpinfo_default_to_32(v, v32)	\
64 	{				\
65 		(v32).agpi32_version = (v).agpi_version;	\
66 		(v32).agpi32_devid = (v).agpi_devid;	\
67 		(v32).agpi32_mode = (v).agpi_mode;	\
68 		(v32).agpi32_aperbase = (v).agpi_aperbase;	\
69 		(v32).agpi32_apersize = (v).agpi_apersize;	\
70 		(v32).agpi32_pgtotal = (v).agpi_pgtotal;	\
71 		(v32).agpi32_pgsystem = (v).agpi_pgsystem;	\
72 		(v32).agpi32_pgused = (v).agpi_pgused;	\
73 	}
74 
75 static ddi_dma_attr_t agpgart_dma_attr = {
76 	DMA_ATTR_V0,
77 	0U,				/* dma_attr_addr_lo */
78 	0xffffffffU,			/* dma_attr_addr_hi */
79 	0xffffffffU,			/* dma_attr_count_max */
80 	(uint64_t)AGP_PAGE_SIZE,	/* dma_attr_align */
81 	1,				/* dma_attr_burstsizes */
82 	1,				/* dma_attr_minxfer */
83 	0xffffffffU,			/* dma_attr_maxxfer */
84 	0xffffffffU,			/* dma_attr_seg */
85 	1,				/* dma_attr_sgllen, variable */
86 	4,				/* dma_attr_granular */
87 	0				/* dma_attr_flags */
88 };
89 
90 /*
91  * AMD64 supports gart table above 4G. See alloc_gart_table.
92  */
93 static ddi_dma_attr_t garttable_dma_attr = {
94 	DMA_ATTR_V0,
95 	0U,				/* dma_attr_addr_lo */
96 	0xffffffffU,			/* dma_attr_addr_hi */
97 	0xffffffffU,			/* dma_attr_count_max */
98 	(uint64_t)AGP_PAGE_SIZE,	/* dma_attr_align */
99 	1,				/* dma_attr_burstsizes */
100 	1,				/* dma_attr_minxfer */
101 	0xffffffffU,			/* dma_attr_maxxfer */
102 	0xffffffffU,			/* dma_attr_seg */
103 	1,				/* dma_attr_sgllen, variable */
104 	4,				/* dma_attr_granular */
105 	0				/* dma_attr_flags */
106 };
107 
108 /*
109  * AGPGART table need a physical contiguous memory. To assure that
110  * each access to gart table is strongly ordered and uncachable,
111  * we use DDI_STRICTORDER_ACC.
112  */
113 static ddi_device_acc_attr_t gart_dev_acc_attr = {
114 	DDI_DEVICE_ATTR_V0,
115 	DDI_NEVERSWAP_ACC,
116 	DDI_STRICTORDER_ACC	/* must be DDI_STRICTORDER_ACC */
117 };
118 
119 /*
120  * AGP memory is usually used as texture memory or for a framebuffer, so we
121  * can set the memory attribute to write combining. Video drivers will
122  * determine the frame buffer attributes, for example the memory is write
123  * combinging or non-cachable. However, the interface between Xorg and agpgart
124  * driver to support attribute selcetion doesn't exist yet. So we set agp memory
125  * to non-cachable by default now. This attribute might be overridden
126  * by MTTR in X86.
127  */
128 static ddi_device_acc_attr_t mem_dev_acc_attr = {
129 	DDI_DEVICE_ATTR_V0,
130 	DDI_NEVERSWAP_ACC,
131 	DDI_STRICTORDER_ACC	/* Can be DDI_MERGING_OK_ACC */
132 };
133 
134 static keytable_ent_t *
135 agp_find_bound_keyent(agpgart_softstate_t *softstate, uint32_t pg_offset);
136 static void
137 amd64_gart_unregister(amd64_garts_dev_t *cpu_garts);
138 
139 
140 static void
141 agp_devmap_unmap(devmap_cookie_t handle, void *devprivate,
142     offset_t off, size_t len, devmap_cookie_t new_handle1,
143     void **new_devprivate1, devmap_cookie_t new_handle2,
144     void **new_devprivate2)
145 {
146 
147 	struct keytable_ent *mementry;
148 	agpgart_softstate_t *softstate;
149 	agpgart_ctx_t *ctxp, *newctxp1, *newctxp2;
150 
151 	ASSERT(AGP_ALIGNED(len) && AGP_ALIGNED(off));
152 	ASSERT(devprivate);
153 	ASSERT(handle);
154 
155 	ctxp = (agpgart_ctx_t *)devprivate;
156 	softstate = ctxp->actx_sc;
157 	ASSERT(softstate);
158 
159 	if (new_handle1 != NULL) {
160 		newctxp1 = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
161 		newctxp1->actx_sc = softstate;
162 		newctxp1->actx_off = ctxp->actx_off;
163 		*new_devprivate1 = newctxp1;
164 	}
165 
166 	if (new_handle2 != NULL) {
167 		newctxp2 = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
168 		newctxp2->actx_sc = softstate;
169 		newctxp2->actx_off = off + len;
170 		*new_devprivate2 = newctxp2;
171 	}
172 
173 	mutex_enter(&softstate->asoft_instmutex);
174 	if ((new_handle1 == NULL) && (new_handle2 == NULL)) {
175 		mementry =
176 		    agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(off));
177 		ASSERT(mementry);
178 		mementry->kte_refcnt--;
179 	} else if ((new_handle1 != NULL) && (new_handle2 != NULL)) {
180 		mementry =
181 		    agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(off));
182 		ASSERT(mementry);
183 		mementry->kte_refcnt++;
184 	}
185 	ASSERT(mementry->kte_refcnt >= 0);
186 	mutex_exit(&softstate->asoft_instmutex);
187 	kmem_free(ctxp, sizeof (struct agpgart_ctx));
188 }
189 
190 /*ARGSUSED*/
191 static int
192 agp_devmap_map(devmap_cookie_t handle, dev_t dev,
193     uint_t flags, offset_t offset, size_t len, void **new_devprivate)
194 {
195 	agpgart_softstate_t *softstate;
196 	int instance;
197 	struct keytable_ent *mementry;
198 	agpgart_ctx_t *newctxp;
199 
200 	ASSERT(handle);
201 	instance = AGP_DEV2INST(dev);
202 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
203 	if (softstate == NULL) {
204 		AGPDB_PRINT2((CE_WARN, "agp_devmap_map: get soft state err"));
205 		return (ENXIO);
206 	}
207 
208 	ASSERT(softstate);
209 	ASSERT(mutex_owned(&softstate->asoft_instmutex));
210 	ASSERT(len);
211 	ASSERT(AGP_ALIGNED(offset) && AGP_ALIGNED(len));
212 
213 	mementry =
214 	    agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(offset));
215 	ASSERT(mementry);
216 	mementry->kte_refcnt++;
217 	ASSERT(mementry->kte_refcnt >= 0);
218 	newctxp = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
219 	newctxp->actx_off = offset;
220 	newctxp->actx_sc = softstate;
221 	*new_devprivate = newctxp;
222 
223 	return (0);
224 }
225 
226 /*ARGSUSED*/
227 static int agp_devmap_dup(devmap_cookie_t handle, void *devprivate,
228     devmap_cookie_t new_handle, void **new_devprivate)
229 {
230 	struct keytable_ent *mementry;
231 	agpgart_ctx_t *newctxp, *ctxp;
232 	agpgart_softstate_t *softstate;
233 
234 	ASSERT(devprivate);
235 	ASSERT(handle && new_handle);
236 
237 	ctxp = (agpgart_ctx_t *)devprivate;
238 	ASSERT(AGP_ALIGNED(ctxp->actx_off));
239 
240 	newctxp = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
241 	newctxp->actx_off = ctxp->actx_off;
242 	newctxp->actx_sc = ctxp->actx_sc;
243 	softstate = (agpgart_softstate_t *)newctxp->actx_sc;
244 
245 	mutex_enter(&softstate->asoft_instmutex);
246 	mementry = agp_find_bound_keyent(softstate,
247 	    AGP_BYTES2PAGES(newctxp->actx_off));
248 	mementry->kte_refcnt++;
249 	ASSERT(mementry->kte_refcnt >= 0);
250 	mutex_exit(&softstate->asoft_instmutex);
251 	*new_devprivate = newctxp;
252 
253 	return (0);
254 }
255 
256 struct devmap_callback_ctl agp_devmap_cb = {
257 	DEVMAP_OPS_REV,		/* rev */
258 	agp_devmap_map,		/* map */
259 	NULL,			/* access */
260 	agp_devmap_dup,		/* dup */
261 	agp_devmap_unmap,	/* unmap */
262 };
263 
264 /*
265  * agp_master_regis_byname()
266  *
267  * Description:
268  * 	Open the AGP master device node by device path name and
269  * 	register the device handle for later operations.
270  * 	We check all possible driver instance from 0
271  * 	to MAX_INSTNUM because the master device could be
272  * 	at any instance number. Only one AGP master is supported.
273  *
274  * Arguments:
275  * 	master_hdlp		AGP master device LDI handle pointer
276  *	agpgart_l		AGPGART driver LDI identifier
277  *
278  * Returns:
279  * 	-1			failed
280  * 	0			success
281  */
282 static int
283 agp_master_regis_byname(ldi_handle_t *master_hdlp, ldi_ident_t agpgart_li)
284 {
285 	int	i;
286 	char	buf[MAXPATHLEN];
287 
288 	ASSERT(master_hdlp);
289 	ASSERT(agpgart_li);
290 
291 	/*
292 	 * Search all possible instance numbers for the agp master device.
293 	 * Only one master device is supported now, so the search ends
294 	 * when one master device is found.
295 	 */
296 	for (i = 0; i < MAX_INSTNUM; i++) {
297 		(void) snprintf(buf, MAXPATHLEN, "%s%d", AGPMASTER_DEVLINK, i);
298 		if ((ldi_open_by_name(buf, 0, kcred,
299 		    master_hdlp, agpgart_li)))
300 			continue;
301 		AGPDB_PRINT1((CE_NOTE,
302 		    "master device found: instance number=%d", i));
303 		break;
304 
305 	}
306 
307 	/* AGP master device not found */
308 	if (i == MAX_INSTNUM)
309 		return (-1);
310 
311 	return (0);
312 }
313 
314 /*
315  * agp_target_regis_byname()
316  *
317  * Description:
318  * 	This function opens agp bridge device node by
319  * 	device path name and registers the device handle
320  * 	for later operations.
321  * 	We check driver instance from 0 to MAX_INSTNUM
322  * 	because the master device could be at any instance
323  * 	number. Only one agp target is supported.
324  *
325  *
326  * Arguments:
327  *	target_hdlp		AGP target device LDI handle pointer
328  *	agpgart_l		AGPGART driver LDI identifier
329  *
330  * Returns:
331  * 	-1			failed
332  * 	0			success
333  */
334 static int
335 agp_target_regis_byname(ldi_handle_t *target_hdlp, ldi_ident_t agpgart_li)
336 {
337 	int	i;
338 	char	buf[MAXPATHLEN];
339 
340 	ASSERT(target_hdlp);
341 	ASSERT(agpgart_li);
342 
343 	for (i = 0; i < MAX_INSTNUM; i++) {
344 		(void) snprintf(buf, MAXPATHLEN, "%s%d", AGPTARGET_DEVLINK, i);
345 		if ((ldi_open_by_name(buf, 0, kcred,
346 		    target_hdlp, agpgart_li)))
347 			continue;
348 
349 		AGPDB_PRINT1((CE_NOTE,
350 		    "bridge device found: instance number=%d", i));
351 		break;
352 
353 	}
354 
355 	/* AGP bridge device not found */
356 	if (i == MAX_INSTNUM) {
357 		AGPDB_PRINT2((CE_WARN, "bridge device not found"));
358 		return (-1);
359 	}
360 
361 	return (0);
362 }
363 
364 /*
365  * amd64_gart_regis_byname()
366  *
367  * Description:
368  * 	Open all amd64 gart device nodes by deice path name and
369  * 	register the device handles for later operations. Each cpu
370  * 	has its own amd64 gart device.
371  *
372  * Arguments:
373  * 	cpu_garts		cpu garts device list header
374  *	agpgart_l		AGPGART driver LDI identifier
375  *
376  * Returns:
377  * 	-1			failed
378  * 	0			success
379  */
380 static int
381 amd64_gart_regis_byname(amd64_garts_dev_t *cpu_garts, ldi_ident_t agpgart_li)
382 {
383 	amd64_gart_dev_list_t	*gart_list;
384 	int			i;
385 	char			buf[MAXPATHLEN];
386 	ldi_handle_t		gart_hdl;
387 	int			ret;
388 
389 	ASSERT(cpu_garts);
390 	ASSERT(agpgart_li);
391 
392 	/*
393 	 * Search all possible instance numbers for the gart devices.
394 	 * There can be multiple on-cpu gart devices for Opteron server.
395 	 */
396 	for (i = 0; i < MAX_INSTNUM; i++) {
397 		(void) snprintf(buf, MAXPATHLEN, "%s%d", CPUGART_DEVLINK, i);
398 		ret = ldi_open_by_name(buf, 0, kcred,
399 		    &gart_hdl, agpgart_li);
400 
401 		if (ret == ENODEV)
402 			continue;
403 		else if (ret != 0) { /* There was an error opening the device */
404 			amd64_gart_unregister(cpu_garts);
405 			return (ret);
406 		}
407 
408 		AGPDB_PRINT1((CE_NOTE,
409 		    "amd64 gart device found: instance number=%d", i));
410 
411 		gart_list = (amd64_gart_dev_list_t *)
412 		    kmem_zalloc(sizeof (amd64_gart_dev_list_t), KM_SLEEP);
413 
414 		/* Add new item to the head of the gart device list */
415 		gart_list->gart_devhdl = gart_hdl;
416 		gart_list->next = cpu_garts->gart_dev_list_head;
417 		cpu_garts->gart_dev_list_head = gart_list;
418 		cpu_garts->gart_device_num++;
419 	}
420 
421 	if (cpu_garts->gart_device_num == 0)
422 		return (ENODEV);
423 	return (0);
424 }
425 
426 /*
427  * Unregister agp master device handle
428  */
429 static void
430 agp_master_unregister(ldi_handle_t *master_hdlp)
431 {
432 	ASSERT(master_hdlp);
433 
434 	if (master_hdlp) {
435 		(void) ldi_close(*master_hdlp, 0, kcred);
436 		*master_hdlp = NULL;
437 	}
438 }
439 
440 /*
441  * Unregister agp bridge device handle
442  */
443 static void
444 agp_target_unregister(ldi_handle_t *target_hdlp)
445 {
446 	if (target_hdlp) {
447 		(void) ldi_close(*target_hdlp, 0, kcred);
448 		*target_hdlp = NULL;
449 	}
450 }
451 
452 /*
453  * Unregister all amd64 gart device handles
454  */
455 static void
456 amd64_gart_unregister(amd64_garts_dev_t *cpu_garts)
457 {
458 	amd64_gart_dev_list_t	*gart_list;
459 	amd64_gart_dev_list_t	*next;
460 
461 	ASSERT(cpu_garts);
462 
463 	for (gart_list = cpu_garts->gart_dev_list_head;
464 	    gart_list; gart_list = next) {
465 
466 		ASSERT(gart_list->gart_devhdl);
467 		(void) ldi_close(gart_list->gart_devhdl, 0, kcred);
468 		next = gart_list->next;
469 		/* Free allocated memory */
470 		kmem_free(gart_list, sizeof (amd64_gart_dev_list_t));
471 	}
472 	cpu_garts->gart_dev_list_head = NULL;
473 	cpu_garts->gart_device_num = 0;
474 }
475 
476 /*
477  * lyr_detect_master_type()
478  *
479  * Description:
480  * 	This function gets agp master type by querying agp master device.
481  *
482  * Arguments:
483  * 	master_hdlp		agp master device ldi handle pointer
484  *
485  * Returns:
486  * 	-1			unsupported device
487  * 	DEVICE_IS_I810		i810 series
488  * 	DEVICE_IS_I810		i830 series
489  * 	DEVICE_IS_AGP		true agp master
490  */
491 static int
492 lyr_detect_master_type(ldi_handle_t *master_hdlp)
493 {
494 	int vtype;
495 	int err;
496 
497 	ASSERT(master_hdlp);
498 
499 	/* ldi_ioctl(agpmaster) */
500 	err = ldi_ioctl(*master_hdlp, DEVICE_DETECT,
501 	    (intptr_t)&vtype, FKIOCTL, kcred, 0);
502 	if (err) /* Unsupported graphics device */
503 		return (-1);
504 	return (vtype);
505 }
506 
507 /*
508  * devtect_target_type()
509  *
510  * Description:
511  * 	This function gets the host bridge chipset type by querying the agp
512  *	target device.
513  *
514  * Arguments:
515  * 	target_hdlp		agp target device LDI handle pointer
516  *
517  * Returns:
518  * 	CHIP_IS_INTEL		Intel agp chipsets
519  * 	CHIP_IS_AMD		AMD agp chipset
520  * 	-1			unsupported chipset
521  */
522 static int
523 lyr_detect_target_type(ldi_handle_t *target_hdlp)
524 {
525 	int btype;
526 	int err;
527 
528 	ASSERT(target_hdlp);
529 
530 	err = ldi_ioctl(*target_hdlp, CHIP_DETECT, (intptr_t)&btype,
531 	    FKIOCTL, kcred, 0);
532 	if (err)	/* Unsupported bridge device */
533 		return (-1);
534 	return (btype);
535 }
536 
537 /*
538  * lyr_init()
539  *
540  * Description:
541  * 	This function detects the  graphics system architecture and
542  * 	registers all relative device handles in a global structure
543  * 	"agp_regdev". Then it stores the system arc type in driver
544  * 	soft state.
545  *
546  * Arguments:
547  *	agp_regdev		AGP devices registration struct pointer
548  *	agpgart_l		AGPGART driver LDI identifier
549  *
550  * Returns:
551  * 	0	System arc supported and agp devices registration successed.
552  * 	-1	System arc not supported or device registration failed.
553  */
554 int
555 lyr_init(agp_registered_dev_t *agp_regdev, ldi_ident_t agpgart_li)
556 {
557 	ldi_handle_t *master_hdlp;
558 	ldi_handle_t *target_hdlp;
559 	amd64_garts_dev_t *garts_dev;
560 	int card_type, chip_type;
561 	int ret;
562 
563 	ASSERT(agp_regdev);
564 
565 	bzero(agp_regdev, sizeof (agp_registered_dev_t));
566 	agp_regdev->agprd_arctype = ARC_UNKNOWN;
567 	/*
568 	 * Register agp devices, assuming all instances attached, and
569 	 * detect which agp architucture this server belongs to. This
570 	 * must be done before the agpgart driver starts to use layered
571 	 * driver interfaces.
572 	 */
573 	master_hdlp = &agp_regdev->agprd_masterhdl;
574 	target_hdlp = &agp_regdev->agprd_targethdl;
575 	garts_dev = &agp_regdev->agprd_cpugarts;
576 
577 	/* Check whether the system is amd64 arc */
578 	if ((ret = amd64_gart_regis_byname(garts_dev, agpgart_li)) == ENODEV) {
579 		/* No amd64 gart devices */
580 		AGPDB_PRINT1((CE_NOTE,
581 		    "lyr_init: this is not an amd64 system"));
582 		if (agp_master_regis_byname(master_hdlp, agpgart_li)) {
583 			AGPDB_PRINT2((CE_WARN,
584 			    "lyr_init: register master device unsuccessful"));
585 			goto err1;
586 		}
587 		if (agp_target_regis_byname(target_hdlp, agpgart_li)) {
588 			AGPDB_PRINT2((CE_WARN,
589 			    "lyr_init: register target device unsuccessful"));
590 			goto err2;
591 		}
592 		card_type = lyr_detect_master_type(master_hdlp);
593 		/*
594 		 * Detect system arc by master device. If it is a intel
595 		 * integrated device, finish the detection successfully.
596 		 */
597 		switch (card_type) {
598 		case DEVICE_IS_I810:	/* I810 likewise graphics */
599 			AGPDB_PRINT1((CE_NOTE,
600 			    "lyr_init: the system is Intel 810 arch"));
601 			agp_regdev->agprd_arctype = ARC_IGD810;
602 			return (0);
603 		case DEVICE_IS_I830:	/* I830 likewise graphics */
604 			AGPDB_PRINT1((CE_NOTE,
605 			    "lyr_init: the system is Intel 830 arch"));
606 			agp_regdev->agprd_arctype = ARC_IGD830;
607 			return (0);
608 		case DEVICE_IS_AGP:	/* AGP graphics */
609 			break;
610 		default:		/* Non IGD/AGP graphics */
611 			AGPDB_PRINT2((CE_WARN,
612 			    "lyr_init: non-supported master device"));
613 			goto err3;
614 		}
615 
616 		chip_type = lyr_detect_target_type(target_hdlp);
617 
618 		/* Continue to detect AGP arc by target device */
619 		switch (chip_type) {
620 		case CHIP_IS_INTEL:	/* Intel chipset */
621 			AGPDB_PRINT1((CE_NOTE,
622 			    "lyr_init: Intel AGP arch detected"));
623 			agp_regdev->agprd_arctype = ARC_INTELAGP;
624 			return (0);
625 		case CHIP_IS_AMD:	/* AMD chipset */
626 			AGPDB_PRINT2((CE_WARN,
627 			    "lyr_init: no cpu gart, but have AMD64 chipsets"));
628 			goto err3;
629 		default:		/* Non supported chipset */
630 			AGPDB_PRINT2((CE_WARN,
631 			    "lyr_init: detection can not continue"));
632 			goto err3;
633 		}
634 
635 	}
636 
637 	if (ret)
638 		return (-1); /* Errors in open amd64 cpu gart devices */
639 
640 	/*
641 	 * AMD64 cpu gart device exsits, continue detection
642 	 */
643 
644 	if (agp_master_regis_byname(master_hdlp, agpgart_li)) {
645 		AGPDB_PRINT1((CE_NOTE,
646 		    "lyr_init: register master device unsuccessful"));
647 
648 		agp_regdev->agprd_arctype = ARC_AMD64NOAGP;
649 		AGPDB_PRINT1((CE_NOTE,
650 		    "lyr_init: no AGP master, but supports IOMMU in amd64"));
651 		return (0); /* Finished successfully */
652 	}
653 
654 	if (agp_target_regis_byname(target_hdlp, agpgart_li)) {
655 		AGPDB_PRINT1((CE_NOTE,
656 		    "lyr_init: register target device unsuccessful"));
657 
658 		agp_regdev->agprd_arctype = ARC_AMD64NOAGP;
659 
660 		AGPDB_PRINT1((CE_NOTE,
661 		    "lyr_init: no AGP bridge, but supports IOMMU in amd64"));
662 
663 		agp_master_unregister(&agp_regdev->agprd_masterhdl);
664 		return (0); /* Finished successfully */
665 
666 	}
667 
668 
669 	AGPDB_PRINT1((CE_NOTE,
670 	    "lyr_init: the system is AMD64 AGP architecture"));
671 
672 	agp_regdev->agprd_arctype = ARC_AMD64AGP;
673 
674 	return (0); /* Finished successfully */
675 
676 err3:
677 	agp_target_unregister(&agp_regdev->agprd_targethdl);
678 err2:
679 	agp_master_unregister(&agp_regdev->agprd_masterhdl);
680 err1:
681 	agp_regdev->agprd_arctype = ARC_UNKNOWN;
682 	return (-1);
683 }
684 
685 void
686 lyr_end(agp_registered_dev_t *agp_regdev)
687 {
688 	ASSERT(agp_regdev);
689 
690 	switch (agp_regdev->agprd_arctype) {
691 	case ARC_IGD810:
692 	case ARC_IGD830:
693 	case ARC_INTELAGP:
694 		agp_master_unregister(&agp_regdev->agprd_masterhdl);
695 		agp_target_unregister(&agp_regdev->agprd_targethdl);
696 
697 		return;
698 	case ARC_AMD64AGP:
699 		agp_master_unregister(&agp_regdev->agprd_masterhdl);
700 		agp_target_unregister(&agp_regdev->agprd_targethdl);
701 		amd64_gart_unregister(&agp_regdev->agprd_cpugarts);
702 
703 		return;
704 	case ARC_AMD64NOAGP:
705 		amd64_gart_unregister(&agp_regdev->agprd_cpugarts);
706 
707 		return;
708 	default:
709 		ASSERT(0);
710 		return;
711 	}
712 }
713 
714 int
715 lyr_get_info(agp_kern_info_t *info, agp_registered_dev_t *agp_regdev)
716 {
717 	ldi_handle_t hdl;
718 	igd_info_t value1;
719 	i_agp_info_t value2;
720 	amdgart_info_t value3;
721 	size_t prealloc_size;
722 	int err;
723 	amd64_gart_dev_list_t	*gart_head;
724 
725 	ASSERT(info);
726 	ASSERT(agp_regdev);
727 
728 	switch (agp_regdev->agprd_arctype) {
729 	case ARC_IGD810:
730 		hdl = agp_regdev->agprd_masterhdl;
731 		err = ldi_ioctl(hdl, I8XX_GET_INFO, (intptr_t)&value1,
732 		    FKIOCTL, kcred, 0);
733 		if (err)
734 			return (-1);
735 		info->agpki_mdevid = value1.igd_devid;
736 		info->agpki_aperbase = value1.igd_aperbase;
737 		info->agpki_apersize = value1.igd_apersize;
738 
739 		hdl = agp_regdev->agprd_targethdl;
740 		err = ldi_ioctl(hdl, I8XX_GET_PREALLOC_SIZE,
741 		    (intptr_t)&prealloc_size, FKIOCTL, kcred, 0);
742 		if (err)
743 			return (-1);
744 		info->agpki_presize = prealloc_size;
745 
746 		break;
747 
748 	case ARC_IGD830:
749 		hdl = agp_regdev->agprd_masterhdl;
750 		err = ldi_ioctl(hdl, I8XX_GET_INFO, (intptr_t)&value1,
751 		    FKIOCTL, kcred, 0);
752 		if (err)
753 			return (-1);
754 		info->agpki_mdevid = value1.igd_devid;
755 		info->agpki_aperbase = value1.igd_aperbase;
756 		info->agpki_apersize = value1.igd_apersize;
757 
758 		hdl = agp_regdev->agprd_targethdl;
759 		err = ldi_ioctl(hdl, I8XX_GET_PREALLOC_SIZE,
760 		    (intptr_t)&prealloc_size, FKIOCTL, kcred, 0);
761 		if (err)
762 			return (-1);
763 
764 		/*
765 		 * Assume all units are kilobytes unless explicitly
766 		 * stated below:
767 		 * preallocated GTT memory = preallocated memory - GTT size
768 		 * 	- scratch page size
769 		 *
770 		 * scratch page size = 4
771 		 * GTT size (KB) = aperture size (MB)
772 		 * this algorithm came from Xorg source code
773 		 */
774 		if (prealloc_size > (info->agpki_apersize + 4))
775 			prealloc_size =
776 			    prealloc_size - info->agpki_apersize - 4;
777 		else {
778 			AGPDB_PRINT2((CE_WARN, "lyr_get_info: "
779 			    "pre-allocated memory too small, setting to zero"));
780 			prealloc_size = 0;
781 		}
782 		info->agpki_presize = prealloc_size;
783 		AGPDB_PRINT2((CE_NOTE,
784 		    "lyr_get_info: prealloc_size = %ldKB, apersize = %dMB",
785 		    prealloc_size, info->agpki_apersize));
786 		break;
787 	case ARC_INTELAGP:
788 	case ARC_AMD64AGP:
789 		/* AGP devices */
790 		hdl = agp_regdev->agprd_masterhdl;
791 		err = ldi_ioctl(hdl, AGP_MASTER_GETINFO,
792 		    (intptr_t)&value2, FKIOCTL, kcred, 0);
793 		if (err)
794 			return (-1);
795 		info->agpki_mdevid = value2.iagp_devid;
796 		info->agpki_mver = value2.iagp_ver;
797 		info->agpki_mstatus = value2.iagp_mode;
798 		hdl = agp_regdev->agprd_targethdl;
799 		err = ldi_ioctl(hdl, AGP_TARGET_GETINFO,
800 		    (intptr_t)&value2, FKIOCTL, kcred, 0);
801 		if (err)
802 			return (-1);
803 		info->agpki_tdevid = value2.iagp_devid;
804 		info->agpki_tver = value2.iagp_ver;
805 		info->agpki_tstatus = value2.iagp_mode;
806 		info->agpki_aperbase = value2.iagp_aperbase;
807 		info->agpki_apersize = value2.iagp_apersize;
808 		break;
809 	case ARC_AMD64NOAGP:
810 		/* Meaningful for IOMMU support only */
811 		gart_head = agp_regdev->agprd_cpugarts.gart_dev_list_head;
812 		err = ldi_ioctl(gart_head->gart_devhdl, AMD64_GET_INFO,
813 		    (intptr_t)&value3, FKIOCTL, kcred, 0);
814 		if (err)
815 			return (-1);
816 		info->agpki_aperbase = value3.cgart_aperbase;
817 		info->agpki_apersize = value3.cgart_apersize;
818 		break;
819 	default:
820 		AGPDB_PRINT2((CE_WARN,
821 		    "lyr_get_info: function doesn't work for unknown arc"));
822 		return (-1);
823 	}
824 	if ((info->agpki_apersize >= MAXAPERMEGAS) ||
825 	    (info->agpki_apersize == 0) ||
826 	    (info->agpki_aperbase == 0)) {
827 		AGPDB_PRINT2((CE_WARN,
828 		    "lyr_get_info: aperture is not programmed correctly!"));
829 		return (-1);
830 	}
831 
832 	return (0);
833 }
834 
835 /*
836  * lyr_i8xx_add_to_gtt()
837  *
838  * Description:
839  * 	This function sets up the integrated video device gtt table
840  * 	via an ioclt to the AGP master driver.
841  *
842  * Arguments:
843  * 	pg_offset	The start entry to be setup
844  * 	keyent		Keytable entity pointer
845  *	agp_regdev	AGP devices registration struct pointer
846  *
847  * Returns:
848  * 	0		success
849  * 	-1		invalid operations
850  */
851 int
852 lyr_i8xx_add_to_gtt(uint32_t pg_offset, keytable_ent_t *keyent,
853     agp_registered_dev_t *agp_regdev)
854 {
855 	int err = 0;
856 	int rval;
857 	ldi_handle_t hdl;
858 	igd_gtt_seg_t gttseg;
859 	uint32_t *addrp, i;
860 	uint32_t npages;
861 
862 	ASSERT(keyent);
863 	ASSERT(agp_regdev);
864 	gttseg.igs_pgstart =  pg_offset;
865 	npages = keyent->kte_pages;
866 	gttseg.igs_npage = npages;
867 	gttseg.igs_type = keyent->kte_type;
868 	gttseg.igs_phyaddr = (uint32_t *)kmem_zalloc
869 	    (sizeof (uint32_t) * gttseg.igs_npage, KM_SLEEP);
870 
871 	addrp = gttseg.igs_phyaddr;
872 	for (i = 0; i < npages; i++, addrp++) {
873 		*addrp =
874 		    (uint32_t)((keyent->kte_pfnarray[i]) << GTT_PAGE_SHIFT);
875 	}
876 
877 	hdl = agp_regdev->agprd_masterhdl;
878 	if (ldi_ioctl(hdl, I8XX_ADD2GTT, (intptr_t)&gttseg, FKIOCTL,
879 	    kcred, &rval)) {
880 		AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: ldi_ioctl error"));
881 		AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: pg_start=0x%x",
882 		    gttseg.igs_pgstart));
883 		AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: pages=0x%x",
884 		    gttseg.igs_npage));
885 		AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: type=0x%x",
886 		    gttseg.igs_type));
887 		err = -1;
888 	}
889 	kmem_free(gttseg.igs_phyaddr, sizeof (uint32_t) * gttseg.igs_npage);
890 	return (err);
891 }
892 
893 /*
894  * lyr_i8xx_remove_from_gtt()
895  *
896  * Description:
897  * 	This function clears the integrated video device gtt table via
898  * 	an ioctl to the agp master device.
899  *
900  * Arguments:
901  * 	pg_offset	The starting entry to be cleared
902  * 	npage		The number of entries to be cleared
903  *	agp_regdev	AGP devices struct pointer
904  *
905  * Returns:
906  * 	0		success
907  * 	-1		invalid operations
908  */
909 int
910 lyr_i8xx_remove_from_gtt(uint32_t pg_offset, uint32_t npage,
911     agp_registered_dev_t *agp_regdev)
912 {
913 	int			rval;
914 	ldi_handle_t		hdl;
915 	igd_gtt_seg_t		gttseg;
916 
917 	gttseg.igs_pgstart =  pg_offset;
918 	gttseg.igs_npage = npage;
919 
920 	hdl = agp_regdev->agprd_masterhdl;
921 	if (ldi_ioctl(hdl, I8XX_REM_GTT, (intptr_t)&gttseg, FKIOCTL,
922 	    kcred, &rval))
923 		return (-1);
924 
925 	return (0);
926 }
927 
928 /*
929  * lyr_set_gart_addr()
930  *
931  * Description:
932  *	This function puts the gart table physical address in the
933  * 	gart base register.
934  *	Please refer to gart and gtt table base register format for
935  *	gart base register format in agpdefs.h.
936  *
937  * Arguments:
938  * 	phy_base	The base physical address of gart table
939  *	agp_regdev	AGP devices registration struct pointer
940  *
941  * Returns:
942  * 	0		success
943  * 	-1		failed
944  *
945  */
946 
947 int
948 lyr_set_gart_addr(uint64_t phy_base, agp_registered_dev_t *agp_regdev)
949 {
950 	amd64_gart_dev_list_t	*gart_list;
951 	ldi_handle_t		hdl;
952 	int			err = 0;
953 
954 	ASSERT(agp_regdev);
955 	switch (agp_regdev->agprd_arctype) {
956 	case ARC_IGD810:
957 	{
958 		uint32_t base;
959 
960 		ASSERT((phy_base & ~I810_POINTER_MASK) == 0);
961 		base = (uint32_t)phy_base;
962 
963 		hdl = agp_regdev->agprd_masterhdl;
964 		err = ldi_ioctl(hdl, I810_SET_GTT_BASE,
965 		    (intptr_t)&base, FKIOCTL, kcred, 0);
966 		break;
967 	}
968 	case ARC_INTELAGP:
969 	{
970 		uint32_t addr;
971 		addr = (uint32_t)phy_base;
972 
973 		ASSERT((phy_base & ~GTT_POINTER_MASK) == 0);
974 		hdl = agp_regdev->agprd_targethdl;
975 		err = ldi_ioctl(hdl, AGP_TARGET_SET_GATTADDR,
976 		    (intptr_t)&addr, FKIOCTL, kcred, 0);
977 		break;
978 	}
979 	case ARC_AMD64NOAGP:
980 	case ARC_AMD64AGP:
981 	{
982 		uint32_t addr;
983 
984 		ASSERT((phy_base & ~AMD64_POINTER_MASK) == 0);
985 		addr = (uint32_t)((phy_base >> AMD64_GARTBASE_SHIFT)
986 		    & AMD64_GARTBASE_MASK);
987 
988 		for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
989 		    gart_list;
990 		    gart_list = gart_list->next) {
991 			hdl = gart_list->gart_devhdl;
992 			if (ldi_ioctl(hdl, AMD64_SET_GART_ADDR,
993 			    (intptr_t)&addr, FKIOCTL, kcred, 0)) {
994 				err = -1;
995 				break;
996 			}
997 		}
998 		break;
999 	}
1000 	default:
1001 		err = -1;
1002 	}
1003 
1004 	if (err)
1005 		return (-1);
1006 
1007 	return (0);
1008 }
1009 
1010 int
1011 lyr_set_agp_cmd(uint32_t cmd, agp_registered_dev_t *agp_regdev)
1012 {
1013 	ldi_handle_t hdl;
1014 	uint32_t command;
1015 
1016 	ASSERT(agp_regdev);
1017 	command = cmd;
1018 	hdl = agp_regdev->agprd_targethdl;
1019 	if (ldi_ioctl(hdl, AGP_TARGET_SETCMD,
1020 	    (intptr_t)&command, FKIOCTL, kcred, 0))
1021 		return (-1);
1022 	hdl = agp_regdev->agprd_masterhdl;
1023 	if (ldi_ioctl(hdl, AGP_MASTER_SETCMD,
1024 	    (intptr_t)&command, FKIOCTL, kcred, 0))
1025 		return (-1);
1026 
1027 	return (0);
1028 }
1029 
1030 int
1031 lyr_config_devices(agp_registered_dev_t *agp_regdev)
1032 {
1033 	amd64_gart_dev_list_t	*gart_list;
1034 	ldi_handle_t		hdl;
1035 	int			rc = 0;
1036 
1037 	ASSERT(agp_regdev);
1038 	switch (agp_regdev->agprd_arctype) {
1039 	case ARC_IGD830:
1040 	case ARC_IGD810:
1041 		break;
1042 	case ARC_INTELAGP:
1043 	{
1044 		hdl = agp_regdev->agprd_targethdl;
1045 		rc = ldi_ioctl(hdl, AGP_TARGET_CONFIGURE,
1046 		    0, FKIOCTL, kcred, 0);
1047 		break;
1048 	}
1049 	case ARC_AMD64NOAGP:
1050 	case ARC_AMD64AGP:
1051 	{
1052 		/*
1053 		 * BIOS always shadow registers such like Aperture Base
1054 		 * register, Aperture Size Register from the AGP bridge
1055 		 * to the AMD64 CPU host bridge. If future BIOSes are broken
1056 		 * in this regard, we may need to shadow these registers
1057 		 * in driver.
1058 		 */
1059 
1060 		for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1061 		    gart_list;
1062 		    gart_list = gart_list->next) {
1063 			hdl = gart_list->gart_devhdl;
1064 			if (ldi_ioctl(hdl, AMD64_CONFIGURE,
1065 			    0, FKIOCTL, kcred, 0)) {
1066 				rc = -1;
1067 				break;
1068 			}
1069 		}
1070 		break;
1071 	}
1072 	default:
1073 		rc = -1;
1074 	}
1075 
1076 	if (rc)
1077 		return (-1);
1078 
1079 	return (0);
1080 }
1081 
1082 int
1083 lyr_unconfig_devices(agp_registered_dev_t *agp_regdev)
1084 {
1085 	amd64_gart_dev_list_t	*gart_list;
1086 	ldi_handle_t		hdl;
1087 	int			rc = 0;
1088 
1089 	ASSERT(agp_regdev);
1090 	switch (agp_regdev->agprd_arctype) {
1091 	case ARC_IGD830:
1092 	case ARC_IGD810:
1093 	{
1094 		hdl = agp_regdev->agprd_masterhdl;
1095 		rc = ldi_ioctl(hdl, I8XX_UNCONFIG, 0, FKIOCTL, kcred, 0);
1096 		break;
1097 	}
1098 	case ARC_INTELAGP:
1099 	{
1100 		hdl = agp_regdev->agprd_targethdl;
1101 		rc = ldi_ioctl(hdl, AGP_TARGET_UNCONFIG,
1102 		    0, FKIOCTL, kcred, 0);
1103 		break;
1104 	}
1105 	case ARC_AMD64NOAGP:
1106 	case ARC_AMD64AGP:
1107 	{
1108 		for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1109 		    gart_list; gart_list = gart_list->next) {
1110 			hdl = gart_list->gart_devhdl;
1111 			if (ldi_ioctl(hdl, AMD64_UNCONFIG,
1112 			    0, FKIOCTL, kcred, 0)) {
1113 				rc = -1;
1114 				break;
1115 			}
1116 		}
1117 		break;
1118 	}
1119 	default:
1120 		rc = -1;
1121 	}
1122 
1123 	if (rc)
1124 		return (-1);
1125 
1126 	return (0);
1127 }
1128 
1129 /*
1130  * lyr_flush_gart_cache()
1131  *
1132  * Description:
1133  * 	This function flushes the GART translation look-aside buffer. All
1134  * 	GART translation caches will be flushed after this operation.
1135  *
1136  * Arguments:
1137  *	agp_regdev	AGP devices struct pointer
1138  */
1139 void
1140 lyr_flush_gart_cache(agp_registered_dev_t *agp_regdev)
1141 {
1142 	amd64_gart_dev_list_t	*gart_list;
1143 	ldi_handle_t		hdl;
1144 
1145 	ASSERT(agp_regdev);
1146 	if ((agp_regdev->agprd_arctype == ARC_AMD64AGP) ||
1147 	    (agp_regdev->agprd_arctype == ARC_AMD64NOAGP)) {
1148 		for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1149 		    gart_list; gart_list = gart_list->next) {
1150 			hdl = gart_list->gart_devhdl;
1151 			(void) ldi_ioctl(hdl, AMD64_FLUSH_GTLB,
1152 			    0, FKIOCTL, kcred, 0);
1153 		}
1154 	} else if (agp_regdev->agprd_arctype == ARC_INTELAGP) {
1155 		hdl = agp_regdev->agprd_targethdl;
1156 		(void) ldi_ioctl(hdl, AGP_TARGET_FLUSH_GTLB, 0,
1157 		    FKIOCTL, kcred, 0);
1158 	}
1159 }
1160 
1161 /*
1162  * get_max_pages()
1163  *
1164  * Description:
1165  * 	This function compute the total pages allowed for agp aperture
1166  *	based on the ammount of physical pages.
1167  * 	The algorithm is: compare the aperture size with 1/4 of total
1168  *	physical pages, and use the smaller one to for the max available
1169  * 	pages.
1170  *
1171  * Arguments:
1172  * 	aper_size	system agp aperture size (in MB)
1173  *
1174  * Returns:
1175  * 	The max possible number of agp memory pages available to users
1176  */
1177 static uint32_t
1178 get_max_pages(uint32_t aper_size)
1179 {
1180 	uint32_t i, j;
1181 
1182 	ASSERT(aper_size <= MAXAPERMEGAS);
1183 
1184 	i = AGP_MB2PAGES(aper_size);
1185 	j = (physmem >> 2);
1186 
1187 	return ((i < j) ? i : j);
1188 }
1189 
1190 /*
1191  * agp_fill_empty_keyent()
1192  *
1193  * Description:
1194  * 	This function finds a empty key table slot and
1195  * 	fills it with a new entity.
1196  *
1197  * Arguments:
1198  * 	softsate	driver soft state pointer
1199  * 	entryp		new entity data pointer
1200  *
1201  * Returns:
1202  * 	NULL	no key table slot available
1203  * 	entryp	the new entity slot pointer
1204  */
1205 static keytable_ent_t *
1206 agp_fill_empty_keyent(agpgart_softstate_t *softstate, keytable_ent_t *entryp)
1207 {
1208 	int key;
1209 	keytable_ent_t *newentryp;
1210 
1211 	ASSERT(softstate);
1212 	ASSERT(entryp);
1213 	ASSERT(entryp->kte_memhdl);
1214 	ASSERT(entryp->kte_pfnarray);
1215 	ASSERT(mutex_owned(&softstate->asoft_instmutex));
1216 
1217 	for (key = 0; key < AGP_MAXKEYS; key++) {
1218 		newentryp = &softstate->asoft_table[key];
1219 		if (newentryp->kte_memhdl == NULL) {
1220 			break;
1221 		}
1222 	}
1223 
1224 	if (key >= AGP_MAXKEYS) {
1225 		AGPDB_PRINT2((CE_WARN,
1226 		    "agp_fill_empty_keyent: key table exhausted"));
1227 		return (NULL);
1228 	}
1229 
1230 	ASSERT(newentryp->kte_pfnarray == NULL);
1231 	bcopy(entryp, newentryp, sizeof (keytable_ent_t));
1232 	newentryp->kte_key = key;
1233 
1234 	return (newentryp);
1235 }
1236 
1237 /*
1238  * agp_find_bound_keyent()
1239  *
1240  * Description:
1241  * 	This function finds the key table entity by agp aperture page offset.
1242  * 	Every keytable entity will have an agp aperture range after the binding
1243  *	operation.
1244  *
1245  * Arguments:
1246  * 	softsate	driver soft state pointer
1247  * 	pg_offset	agp aperture page offset
1248  *
1249  * Returns:
1250  * 	NULL		no such keytable entity
1251  * 	pointer		key table entity pointer found
1252  */
1253 static keytable_ent_t *
1254 agp_find_bound_keyent(agpgart_softstate_t *softstate, uint32_t pg_offset)
1255 {
1256 	int keycount;
1257 	keytable_ent_t *entryp;
1258 
1259 	ASSERT(softstate);
1260 	ASSERT(mutex_owned(&softstate->asoft_instmutex));
1261 
1262 	for (keycount = 0; keycount < AGP_MAXKEYS; keycount++) {
1263 		entryp = &softstate->asoft_table[keycount];
1264 		if (entryp->kte_bound == 0) {
1265 			continue;
1266 		}
1267 
1268 		if (pg_offset < entryp->kte_pgoff)
1269 			continue;
1270 		if (pg_offset >= (entryp->kte_pgoff + entryp->kte_pages))
1271 			continue;
1272 
1273 		ASSERT(entryp->kte_memhdl);
1274 		ASSERT(entryp->kte_pfnarray);
1275 
1276 		return (entryp);
1277 	}
1278 
1279 	return (NULL);
1280 }
1281 
1282 /*
1283  * agp_check_off()
1284  *
1285  * Description:
1286  * 	This function checks whether an AGP aperture range to be bound
1287  *	overlaps with AGP offset already bound.
1288  *
1289  * Arguments:
1290  *	entryp		key table start entry pointer
1291  * 	pg_start	AGP range start page offset
1292  *	pg_num		pages number to be bound
1293  *
1294  * Returns:
1295  *	0		Does not overlap
1296  *	-1		Overlaps
1297  */
1298 
1299 static int
1300 agp_check_off(keytable_ent_t *entryp, uint32_t pg_start, uint32_t pg_num)
1301 {
1302 	int key;
1303 	uint64_t pg_end;
1304 	uint64_t kpg_end;
1305 
1306 	ASSERT(entryp);
1307 
1308 	pg_end = pg_start + pg_num;
1309 	for (key = 0; key < AGP_MAXKEYS; key++) {
1310 		if (!entryp[key].kte_bound)
1311 			continue;
1312 
1313 		kpg_end = entryp[key].kte_pgoff + entryp[key].kte_pages;
1314 		if (!((pg_end <= entryp[key].kte_pgoff) ||
1315 		    (pg_start >= kpg_end)))
1316 			break;
1317 	}
1318 
1319 	if (key == AGP_MAXKEYS)
1320 		return (0);
1321 	else
1322 		return (-1);
1323 }
1324 
1325 static int
1326 is_controlling_proc(agpgart_softstate_t *st)
1327 {
1328 	ASSERT(st);
1329 
1330 	if (!st->asoft_acquired) {
1331 		AGPDB_PRINT2((CE_WARN,
1332 		    "ioctl_agpgart_setup: gart not acquired"));
1333 		return (-1);
1334 	}
1335 	if (st->asoft_curpid != ddi_get_pid()) {
1336 		AGPDB_PRINT2((CE_WARN,
1337 		    "ioctl_agpgart_release: not  controlling process"));
1338 		return (-1);
1339 	}
1340 
1341 	return (0);
1342 }
1343 
1344 static void release_control(agpgart_softstate_t *st)
1345 {
1346 	st->asoft_curpid = 0;
1347 	st->asoft_acquired = 0;
1348 }
1349 
1350 static void acquire_control(agpgart_softstate_t *st)
1351 {
1352 	st->asoft_curpid = ddi_get_pid();
1353 	st->asoft_acquired = 1;
1354 }
1355 
1356 /*
1357  * agp_remove_from_gart()
1358  *
1359  * Description:
1360  * 	This function fills the gart table entries by a given page
1361  * 	frame number array and setup the agp aperture page to physical
1362  * 	memory page translation.
1363  * Arguments:
1364  * 	pg_offset	Starting aperture page to be bound
1365  * 	entries		the number of pages to be bound
1366  * 	acc_hdl		GART table dma memory acc handle
1367  * 	tablep		GART table kernel virtual address
1368  */
1369 static void
1370 agp_remove_from_gart(
1371     uint32_t pg_offset,
1372     uint32_t entries,
1373     ddi_dma_handle_t dma_hdl,
1374     uint32_t *tablep)
1375 {
1376 	uint32_t items = 0;
1377 	uint32_t *entryp;
1378 
1379 	entryp = tablep + pg_offset;
1380 	while (items < entries) {
1381 		*(entryp + items) = 0;
1382 		items++;
1383 	}
1384 	(void) ddi_dma_sync(dma_hdl, pg_offset * sizeof (uint32_t),
1385 	    entries * sizeof (uint32_t), DDI_DMA_SYNC_FORDEV);
1386 }
1387 
1388 /*
1389  * agp_unbind_key()
1390  *
1391  * Description:
1392  * 	This function unbinds AGP memory from the gart table. It will clear
1393  * 	all the gart entries related to this agp memory.
1394  *
1395  * Arguments:
1396  * 	softstate		driver soft state pointer
1397  * 	entryp			key table entity pointer
1398  *
1399  * Returns:
1400  * 	EINVAL		invalid key table entity pointer
1401  * 	0		success
1402  *
1403  */
1404 static int
1405 agp_unbind_key(agpgart_softstate_t *softstate, keytable_ent_t *entryp)
1406 {
1407 	int retval = 0;
1408 
1409 	ASSERT(entryp);
1410 	ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
1411 
1412 	if (!entryp->kte_bound) {
1413 		AGPDB_PRINT2((CE_WARN,
1414 		    "agp_unbind_key: key = 0x%x, not bound",
1415 		    entryp->kte_key));
1416 		return (EINVAL);
1417 	}
1418 	if (entryp->kte_refcnt) {
1419 		AGPDB_PRINT2((CE_WARN,
1420 		    "agp_unbind_key: memory is exported to users"));
1421 		return (EINVAL);
1422 	}
1423 
1424 	ASSERT((entryp->kte_pgoff + entryp->kte_pages) <=
1425 	    AGP_MB2PAGES(softstate->asoft_info.agpki_apersize));
1426 	ASSERT((softstate->asoft_devreg.agprd_arctype != ARC_UNKNOWN));
1427 
1428 	switch (softstate->asoft_devreg.agprd_arctype) {
1429 	case ARC_IGD810:
1430 	case ARC_IGD830:
1431 		retval = lyr_i8xx_remove_from_gtt(
1432 		    entryp->kte_pgoff, entryp->kte_pages,
1433 		    &softstate->asoft_devreg);
1434 		if (retval) {
1435 			AGPDB_PRINT2((CE_WARN,
1436 			    "agp_unbind_key: Key = 0x%x, clear table error",
1437 			    entryp->kte_key));
1438 			return (EIO);
1439 		}
1440 		break;
1441 	case ARC_INTELAGP:
1442 	case ARC_AMD64NOAGP:
1443 	case ARC_AMD64AGP:
1444 		agp_remove_from_gart(entryp->kte_pgoff,
1445 		    entryp->kte_pages,
1446 		    softstate->gart_dma_handle,
1447 		    (uint32_t *)softstate->gart_vbase);
1448 		/* Flush GTLB table */
1449 		lyr_flush_gart_cache(&softstate->asoft_devreg);
1450 
1451 		break;
1452 	}
1453 
1454 	entryp->kte_bound = 0;
1455 
1456 	return (0);
1457 }
1458 
1459 /*
1460  * agp_dealloc_kmem()
1461  *
1462  * Description:
1463  * 	This function deallocates dma memory resources for userland
1464  * 	applications.
1465  *
1466  * Arguments:
1467  * 	entryp		keytable entity pointer
1468  */
1469 static void
1470 agp_dealloc_kmem(keytable_ent_t *entryp)
1471 {
1472 	kmem_free(entryp->kte_pfnarray, sizeof (pfn_t) * entryp->kte_pages);
1473 	entryp->kte_pfnarray = NULL;
1474 
1475 	(void) ddi_dma_unbind_handle(KMEMP(entryp->kte_memhdl)->kmem_handle);
1476 	KMEMP(entryp->kte_memhdl)->kmem_cookies_num = 0;
1477 	ddi_dma_mem_free(&KMEMP(entryp->kte_memhdl)->kmem_acchdl);
1478 	KMEMP(entryp->kte_memhdl)->kmem_acchdl = NULL;
1479 	KMEMP(entryp->kte_memhdl)->kmem_reallen = 0;
1480 	KMEMP(entryp->kte_memhdl)->kmem_kvaddr = NULL;
1481 
1482 	ddi_dma_free_handle(&(KMEMP(entryp->kte_memhdl)->kmem_handle));
1483 	KMEMP(entryp->kte_memhdl)->kmem_handle = NULL;
1484 
1485 	kmem_free(entryp->kte_memhdl, sizeof (agp_kmem_handle_t));
1486 	entryp->kte_memhdl = NULL;
1487 }
1488 
1489 /*
1490  * agp_dealloc_pmem()
1491  *
1492  * Description:
1493  * 	This function deallocates memory resource for direct mapping to
1494  * 	userland applications.
1495  *
1496  * Arguments:
1497  * 	entryp		key table entity pointer
1498  *
1499  */
1500 static void
1501 agp_dealloc_pmem(keytable_ent_t *entryp)
1502 {
1503 	devmap_pmem_free(PMEMP(entryp->kte_memhdl)->pmem_cookie);
1504 	PMEMP(entryp->kte_memhdl)->pmem_cookie = NULL;
1505 	kmem_free(entryp->kte_memhdl, sizeof (agp_pmem_handle_t));
1506 	entryp->kte_memhdl = NULL;
1507 
1508 	/* free the page frame number array */
1509 	kmem_free(entryp->kte_pfnarray, sizeof (pfn_t) * entryp->kte_pages);
1510 	entryp->kte_pfnarray = NULL;
1511 }
1512 
1513 /*
1514  * agp_dealloc_mem()
1515  *
1516  * Description:
1517  * 	This function deallocates physical memory resources allocated for
1518  *	userland applications.
1519  *
1520  * Arguments:
1521  * 	st		driver soft state pointer
1522  * 	entryp		key table entity pointer
1523  *
1524  * Returns:
1525  * 	-1		not a valid memory type or the memory is mapped by
1526  * 			user area applications
1527  * 	0		success
1528  */
1529 static int
1530 agp_dealloc_mem(agpgart_softstate_t *st, keytable_ent_t	*entryp)
1531 {
1532 
1533 	ASSERT(entryp);
1534 	ASSERT(st);
1535 	ASSERT(entryp->kte_memhdl);
1536 	ASSERT(mutex_owned(&st->asoft_instmutex));
1537 
1538 	/* auto unbind here */
1539 	if (entryp->kte_bound && !entryp->kte_refcnt) {
1540 		AGPDB_PRINT2((CE_WARN,
1541 		    "agp_dealloc_mem: key=0x%x, auto unbind",
1542 		    entryp->kte_key));
1543 
1544 		/*
1545 		 * agp_dealloc_mem may be called indirectly by agp_detach.
1546 		 * In the agp_detach function, agpgart_close is already
1547 		 * called which will free the gart table. agp_unbind_key
1548 		 * will panic if no valid gart table exists. So test if
1549 		 * gart table exsits here.
1550 		 */
1551 		if (st->asoft_opened)
1552 			(void) agp_unbind_key(st, entryp);
1553 	}
1554 	if (entryp->kte_refcnt) {
1555 		AGPDB_PRINT2((CE_WARN,
1556 		    "agp_dealloc_pmem: memory is exported to users"));
1557 		return (-1);
1558 	}
1559 
1560 	switch (entryp->kte_type) {
1561 	case AGP_NORMAL:
1562 		agp_dealloc_pmem(entryp);
1563 		break;
1564 	case AGP_PHYSICAL:
1565 		agp_dealloc_kmem(entryp);
1566 		break;
1567 	default:
1568 		return (-1);
1569 	}
1570 
1571 	return (0);
1572 }
1573 
1574 /*
1575  * agp_del_allkeys()
1576  *
1577  * Description:
1578  * 	This function calls agp_dealloc_mem to release all the agp memory
1579  *	resource allocated.
1580  *
1581  * Arguments:
1582  * 	softsate	driver soft state pointer
1583  * Returns:
1584  * 	-1		can not free all agp memory
1585  * 	0		success
1586  *
1587  */
1588 static int
1589 agp_del_allkeys(agpgart_softstate_t *softstate)
1590 {
1591 	int key;
1592 	int ret = 0;
1593 
1594 	ASSERT(softstate);
1595 	for (key = 0; key < AGP_MAXKEYS; key++) {
1596 		if (softstate->asoft_table[key].kte_memhdl != NULL) {
1597 			/*
1598 			 * Check if we can free agp memory now.
1599 			 * If agp memory is exported to user
1600 			 * applications, agp_dealloc_mem will fail.
1601 			 */
1602 			if (agp_dealloc_mem(softstate,
1603 			    &softstate->asoft_table[key]))
1604 				ret = -1;
1605 		}
1606 	}
1607 
1608 	return (ret);
1609 }
1610 
1611 /*
1612  * pfn2gartentry()
1613  *
1614  * Description:
1615  *	This function converts a physical address to GART entry.
1616  *	For AMD64, hardware only support addresses below 40bits,
1617  *	about 1024G physical address, so the largest pfn
1618  *	number is below 28 bits. Please refer to GART and GTT entry
1619  *	format table in agpdefs.h for entry format. Intel IGD only
1620  * 	only supports GTT entry below 1G. Intel AGP only supports
1621  * 	GART entry below 4G.
1622  *
1623  * Arguments:
1624  * 	arc_type		system agp arc type
1625  * 	pfn			page frame number
1626  * 	itemv			the entry item to be returned
1627  * Returns:
1628  * 	-1			not a invalid page frame
1629  * 	0			conversion success
1630  */
1631 static int
1632 pfn2gartentry(agp_arc_type_t arc_type, pfn_t pfn, uint32_t *itemv)
1633 {
1634 	uint64_t paddr;
1635 
1636 	paddr = pfn<<AGP_PAGE_SHIFT;
1637 
1638 	switch (arc_type) {
1639 	case ARC_INTELAGP:
1640 	{
1641 		/* Only support 32-bit hardware address */
1642 		if ((paddr & ~AGP_INTEL_POINTER_MASK) != 0) {
1643 			AGPDB_PRINT2((CE_WARN,
1644 			    "INTEL AGP Hardware only support 32 bits"));
1645 			return (-1);
1646 		}
1647 		*itemv =  (pfn << AGP_PAGE_SHIFT) | AGP_ENTRY_VALID;
1648 
1649 		break;
1650 	}
1651 	case ARC_AMD64NOAGP:
1652 	case ARC_AMD64AGP:
1653 	{
1654 		uint32_t value1, value2;
1655 		/* Physaddr should not exceed 40-bit */
1656 		if ((paddr & ~AMD64_POINTER_MASK) != 0) {
1657 			AGPDB_PRINT2((CE_WARN,
1658 			    "AMD64 GART hardware only supoort 40 bits"));
1659 			return (-1);
1660 		}
1661 		value1 = (uint32_t)pfn >> 20;
1662 		value1 <<= 4;
1663 		value2 = (uint32_t)pfn << 12;
1664 
1665 		*itemv = value1 | value2 | AMD64_ENTRY_VALID;
1666 		break;
1667 	}
1668 	case ARC_IGD810:
1669 		if ((paddr & ~I810_POINTER_MASK) != 0) {
1670 			AGPDB_PRINT2((CE_WARN,
1671 			    "Intel i810 only support 30 bits"));
1672 			return (-1);
1673 		}
1674 		break;
1675 
1676 	case ARC_IGD830:
1677 		if ((paddr & ~GTT_POINTER_MASK) != 0) {
1678 			AGPDB_PRINT2((CE_WARN,
1679 			    "Intel IGD only support 32 bits"));
1680 			return (-1);
1681 		}
1682 		break;
1683 	default:
1684 		AGPDB_PRINT2((CE_WARN,
1685 		    "pfn2gartentry: arc type = %d, not support", arc_type));
1686 		return (-1);
1687 	}
1688 	return (0);
1689 }
1690 
1691 /*
1692  * Check allocated physical pages validity, only called in DEBUG
1693  * mode.
1694  */
1695 static int
1696 agp_check_pfns(agp_arc_type_t arc_type, pfn_t *pfnarray, int items)
1697 {
1698 	int count;
1699 	uint32_t ret;
1700 
1701 	for (count = 0; count < items; count++) {
1702 		if (pfn2gartentry(arc_type, pfnarray[count], &ret))
1703 			break;
1704 	}
1705 	if (count < items)
1706 		return (-1);
1707 	else
1708 		return (0);
1709 }
1710 
1711 /*
1712  * kmem_getpfns()
1713  *
1714  * Description:
1715  * 	This function gets page frame numbers from dma handle.
1716  *
1717  * Arguments:
1718  * 	dma_handle		dma hanle allocated by ddi_dma_alloc_handle
1719  * 	dma_cookip		dma cookie pointer
1720  * 	cookies_num		cookies number
1721  * 	pfnarray		array to store page frames
1722  *
1723  * Returns:
1724  *	0		success
1725  */
1726 static int
1727 kmem_getpfns(
1728     ddi_dma_handle_t dma_handle,
1729     ddi_dma_cookie_t *dma_cookiep,
1730     int cookies_num,
1731     pfn_t *pfnarray)
1732 {
1733 	int	num_cookies;
1734 	int	index = 0;
1735 
1736 	num_cookies = cookies_num;
1737 
1738 	while (num_cookies > 0) {
1739 		uint64_t ck_startaddr, ck_length, ck_end;
1740 		ck_startaddr = dma_cookiep->dmac_address;
1741 		ck_length = dma_cookiep->dmac_size;
1742 
1743 		ck_end = ck_startaddr + ck_length;
1744 		while (ck_startaddr < ck_end) {
1745 			pfnarray[index] = (pfn_t)ck_startaddr >> AGP_PAGE_SHIFT;
1746 			ck_startaddr += AGP_PAGE_SIZE;
1747 			index++;
1748 		}
1749 
1750 		num_cookies--;
1751 		if (num_cookies > 0) {
1752 			ddi_dma_nextcookie(dma_handle, dma_cookiep);
1753 		}
1754 	}
1755 
1756 	return (0);
1757 }
1758 
1759 static int
1760 copyinfo(agpgart_softstate_t *softstate, agp_info_t *info)
1761 {
1762 	switch (softstate->asoft_devreg.agprd_arctype) {
1763 	case ARC_IGD810:
1764 	case ARC_IGD830:
1765 		info->agpi_version.agpv_major = 0;
1766 		info->agpi_version.agpv_minor = 0;
1767 		info->agpi_devid = softstate->asoft_info.agpki_mdevid;
1768 		info->agpi_mode = 0;
1769 		break;
1770 	case ARC_INTELAGP:
1771 	case ARC_AMD64AGP:
1772 		info->agpi_version = softstate->asoft_info.agpki_tver;
1773 		info->agpi_devid = softstate->asoft_info.agpki_tdevid;
1774 		info->agpi_mode = softstate->asoft_info.agpki_tstatus;
1775 		break;
1776 	case ARC_AMD64NOAGP:
1777 		break;
1778 	default:
1779 		AGPDB_PRINT2((CE_WARN, "copyinfo: UNKNOW ARC"));
1780 		return (-1);
1781 	}
1782 	/*
1783 	 * 64bit->32bit conversion possible
1784 	 */
1785 	info->agpi_aperbase = softstate->asoft_info.agpki_aperbase;
1786 	info->agpi_apersize = softstate->asoft_info.agpki_apersize;
1787 	info->agpi_pgtotal = softstate->asoft_pgtotal;
1788 	info->agpi_pgsystem = info->agpi_pgtotal;
1789 	info->agpi_pgused = softstate->asoft_pgused;
1790 
1791 	return (0);
1792 }
1793 
1794 static uint32_t
1795 agp_v2_setup(uint32_t tstatus, uint32_t mstatus, uint32_t mode)
1796 {
1797 	uint32_t cmd;
1798 	int rq, sba, over4g, fw, rate;
1799 
1800 	/*
1801 	 * tstatus: target device status
1802 	 * mstatus: master device status
1803 	 * mode: the agp mode to be sent
1804 	 */
1805 
1806 	/*
1807 	 * RQ - Request Queue size
1808 	 * set RQ to the min of mode and tstatus
1809 	 * if mode set a RQ larger than hardware can support,
1810 	 * use the max RQ which hardware can support.
1811 	 * tstatus & AGPSTAT_RQ_MASK is the max RQ hardware can support
1812 	 * Corelogic will enqueue agp transaction
1813 	 */
1814 	rq = mode & AGPSTAT_RQ_MASK;
1815 	if ((tstatus & AGPSTAT_RQ_MASK) < rq)
1816 		rq = tstatus & AGPSTAT_RQ_MASK;
1817 
1818 	/*
1819 	 * SBA - Sideband Addressing
1820 	 *
1821 	 * Sideband Addressing provides an additional bus to pass requests
1822 	 * (address and command) to the target from the master.
1823 	 *
1824 	 * set SBA if all three support it
1825 	 */
1826 	sba = (tstatus & AGPSTAT_SBA) & (mstatus & AGPSTAT_SBA)
1827 	    & (mode & AGPSTAT_SBA);
1828 
1829 	/* set OVER4G  if all three support it */
1830 	over4g = (tstatus & AGPSTAT_OVER4G) & (mstatus & AGPSTAT_OVER4G)
1831 	    & (mode & AGPSTAT_OVER4G);
1832 
1833 	/*
1834 	 * FW - fast write
1835 	 *
1836 	 * acceleration of memory write transactions from the corelogic to the
1837 	 * A.G.P. master device acting like a PCI target.
1838 	 *
1839 	 * set FW if all three support it
1840 	 */
1841 	fw = (tstatus & AGPSTAT_FW) & (mstatus & AGPSTAT_FW)
1842 	    & (mode & AGPSTAT_FW);
1843 
1844 	/*
1845 	 * figure out the max rate
1846 	 * AGP v2 support: 4X, 2X, 1X speed
1847 	 * status bit		meaning
1848 	 * ---------------------------------------------
1849 	 * 7:3			others
1850 	 * 3			0 stand for V2 support
1851 	 * 0:2			001:1X, 010:2X, 100:4X
1852 	 * ----------------------------------------------
1853 	 */
1854 	rate = (tstatus & AGPSTAT_RATE_MASK) & (mstatus & AGPSTAT_RATE_MASK)
1855 	    & (mode & AGPSTAT_RATE_MASK);
1856 	if (rate & AGP2_RATE_4X)
1857 		rate = AGP2_RATE_4X;
1858 	else if (rate & AGP2_RATE_2X)
1859 		rate = AGP2_RATE_2X;
1860 	else
1861 		rate = AGP2_RATE_1X;
1862 
1863 	cmd = rq | sba | over4g | fw | rate;
1864 	/* enable agp mode */
1865 	cmd |= AGPCMD_AGPEN;
1866 
1867 	return (cmd);
1868 }
1869 
1870 static uint32_t
1871 agp_v3_setup(uint32_t tstatus, uint32_t mstatus, uint32_t mode)
1872 {
1873 	uint32_t cmd = 0;
1874 	uint32_t rq, arqsz, cal, sba, over4g, fw, rate;
1875 
1876 	/*
1877 	 * tstatus: target device status
1878 	 * mstatus: master device status
1879 	 * mode: the agp mode to be set
1880 	 */
1881 
1882 	/*
1883 	 * RQ - Request Queue size
1884 	 * Set RQ to the min of mode and tstatus
1885 	 * If mode set a RQ larger than hardware can support,
1886 	 * use the max RQ which hardware can support.
1887 	 * tstatus & AGPSTAT_RQ_MASK is the max RQ hardware can support
1888 	 * Corelogic will enqueue agp transaction;
1889 	 */
1890 	rq = mode & AGPSTAT_RQ_MASK;
1891 	if ((tstatus & AGPSTAT_RQ_MASK) < rq)
1892 		rq = tstatus & AGPSTAT_RQ_MASK;
1893 
1894 	/*
1895 	 * ARQSZ - Asynchronous Request Queue size
1896 	 * Set the value equal to tstatus.
1897 	 * Don't allow the mode register to override values
1898 	 */
1899 	arqsz = tstatus & AGPSTAT_ARQSZ_MASK;
1900 
1901 	/*
1902 	 * CAL - Calibration cycle
1903 	 * Set to the min of tstatus and mstatus
1904 	 * Don't allow override by mode register
1905 	 */
1906 	cal = tstatus & AGPSTAT_CAL_MASK;
1907 	if ((mstatus & AGPSTAT_CAL_MASK) < cal)
1908 		cal = mstatus & AGPSTAT_CAL_MASK;
1909 
1910 	/*
1911 	 * SBA - Sideband Addressing
1912 	 *
1913 	 * Sideband Addressing provides an additional bus to pass requests
1914 	 * (address and command) to the target from the master.
1915 	 *
1916 	 * SBA in agp v3.0 must be set
1917 	 */
1918 	sba = AGPCMD_SBAEN;
1919 
1920 	/* GART64B is not set since no hardware supports it now */
1921 
1922 	/* Set OVER4G if all three support it */
1923 	over4g = (tstatus & AGPSTAT_OVER4G) & (mstatus & AGPSTAT_OVER4G)
1924 	    & (mode & AGPSTAT_OVER4G);
1925 
1926 	/*
1927 	 * FW - fast write
1928 	 *
1929 	 * Acceleration of memory write transactions from the corelogic to the
1930 	 * A.G.P. master device acting like a PCI target.
1931 	 *
1932 	 * Always set FW in AGP 3.0
1933 	 */
1934 	fw = (tstatus & AGPSTAT_FW) & (mstatus & AGPSTAT_FW)
1935 	    & (mode & AGPSTAT_FW);
1936 
1937 	/*
1938 	 * Figure out the max rate
1939 	 *
1940 	 * AGP v3 support: 8X, 4X speed
1941 	 *
1942 	 * status bit		meaning
1943 	 * ---------------------------------------------
1944 	 * 7:3			others
1945 	 * 3			1 stand for V3 support
1946 	 * 0:2			001:4X, 010:8X, 011:4X,8X
1947 	 * ----------------------------------------------
1948 	 */
1949 	rate = (tstatus & AGPSTAT_RATE_MASK) & (mstatus & AGPSTAT_RATE_MASK)
1950 	    & (mode & AGPSTAT_RATE_MASK);
1951 	if (rate & AGP3_RATE_8X)
1952 		rate = AGP3_RATE_8X;
1953 	else
1954 		rate = AGP3_RATE_4X;
1955 
1956 	cmd = rq | arqsz | cal | sba | over4g | fw | rate;
1957 	/* Enable AGP mode */
1958 	cmd |= AGPCMD_AGPEN;
1959 
1960 	return (cmd);
1961 }
1962 
1963 static int
1964 agp_setup(agpgart_softstate_t *softstate, uint32_t mode)
1965 {
1966 	uint32_t tstatus, mstatus;
1967 	uint32_t agp_mode;
1968 
1969 	tstatus = softstate->asoft_info.agpki_tstatus;
1970 	mstatus = softstate->asoft_info.agpki_mstatus;
1971 
1972 	/*
1973 	 * There are three kinds of AGP mode. AGP mode 1.0, 2.0, 3.0
1974 	 * AGP mode 2.0 is fully compatible with AGP mode 1.0, so we
1975 	 * only check 2.0 and 3.0 mode. AGP 3.0 device can work in
1976 	 * two AGP 2.0 or AGP 3.0 mode. By checking AGP status register,
1977 	 * we can get which mode it is working at. The working mode of
1978 	 * AGP master and AGP target must be consistent. That is, both
1979 	 * of them must work on AGP 3.0 mode or AGP 2.0 mode.
1980 	 */
1981 	if ((softstate->asoft_info.agpki_tver.agpv_major == 3) &&
1982 	    (tstatus & AGPSTAT_MODE3)) {
1983 		/* Master device should be 3.0 mode, too */
1984 		if ((softstate->asoft_info.agpki_mver.agpv_major != 3) ||
1985 		    ((mstatus & AGPSTAT_MODE3) == 0))
1986 			return (EIO);
1987 
1988 		agp_mode = agp_v3_setup(tstatus, mstatus, mode);
1989 		/* Write to the AGPCMD register of target and master devices */
1990 		if (lyr_set_agp_cmd(agp_mode,
1991 		    &softstate->asoft_devreg))
1992 			return (EIO);
1993 
1994 		softstate->asoft_mode = agp_mode;
1995 
1996 		return (0);
1997 	}
1998 
1999 	/*
2000 	 * If agp taget device doesn't work in AGP 3.0 mode,
2001 	 * it must work in AGP 2.0 mode. And make sure
2002 	 * master device work in AGP 2.0 mode too
2003 	 */
2004 	if ((softstate->asoft_info.agpki_mver.agpv_major == 3) &&
2005 	    (mstatus & AGPSTAT_MODE3))
2006 		return (EIO);
2007 
2008 	agp_mode = agp_v2_setup(tstatus, mstatus, mode);
2009 	if (lyr_set_agp_cmd(agp_mode, &softstate->asoft_devreg))
2010 		return (EIO);
2011 	softstate->asoft_mode = agp_mode;
2012 
2013 	return (0);
2014 }
2015 
2016 /*
2017  * agp_alloc_pmem()
2018  *
2019  * Description:
2020  * 	This function allocates physical memory for direct mapping to userland
2021  * 	applications.
2022  *
2023  * Arguments:
2024  * 	softsate	driver soft state pointer
2025  * 	length		memory size
2026  * 	type		AGP_NORMAL: normal agp memory, AGP_PHISYCAL: specical
2027  *			memory type for intel i810 IGD
2028  *
2029  * Returns:
2030  * 	entryp		new key table entity pointer
2031  * 	NULL		no key table slot available
2032  */
2033 static keytable_ent_t *
2034 agp_alloc_pmem(agpgart_softstate_t *softstate, size_t length, int type)
2035 {
2036 	keytable_ent_t	keyentry;
2037 	keytable_ent_t	*entryp;
2038 
2039 	ASSERT(AGP_ALIGNED(length));
2040 	bzero(&keyentry, sizeof (keytable_ent_t));
2041 
2042 	keyentry.kte_pages = AGP_BYTES2PAGES(length);
2043 	keyentry.kte_type = type;
2044 
2045 	keyentry.kte_memhdl =
2046 	    (agp_pmem_handle_t *)kmem_zalloc(sizeof (agp_pmem_handle_t),
2047 	    KM_SLEEP);
2048 
2049 	if (devmap_pmem_alloc(length,
2050 	    PMEM_SLEEP,
2051 	    &PMEMP(keyentry.kte_memhdl)->pmem_cookie) != DDI_SUCCESS)
2052 		goto err1;
2053 
2054 	keyentry.kte_pfnarray = (pfn_t *)kmem_zalloc(sizeof (pfn_t) *
2055 	    keyentry.kte_pages, KM_SLEEP);
2056 
2057 	if (devmap_pmem_getpfns(
2058 	    PMEMP(keyentry.kte_memhdl)->pmem_cookie,
2059 	    0, keyentry.kte_pages, keyentry.kte_pfnarray) != DDI_SUCCESS) {
2060 		AGPDB_PRINT2((CE_WARN,
2061 		    "agp_alloc_pmem: devmap_map_getpfns failed"));
2062 		goto err2;
2063 	}
2064 	ASSERT(!agp_check_pfns(softstate->asoft_devreg.agprd_arctype,
2065 	    keyentry.kte_pfnarray, keyentry.kte_pages));
2066 	entryp = agp_fill_empty_keyent(softstate, &keyentry);
2067 
2068 	if (!entryp) {
2069 		AGPDB_PRINT2((CE_WARN,
2070 		    "agp_alloc_pmem: agp_fill_empty_keyent error"));
2071 		goto err2;
2072 	}
2073 	ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
2074 
2075 	return (entryp);
2076 
2077 err2:
2078 	kmem_free(keyentry.kte_pfnarray, sizeof (pfn_t) * keyentry.kte_pages);
2079 	keyentry.kte_pfnarray = NULL;
2080 	devmap_pmem_free(PMEMP(keyentry.kte_memhdl)->pmem_cookie);
2081 	PMEMP(keyentry.kte_memhdl)->pmem_cookie = NULL;
2082 err1:
2083 	kmem_free(keyentry.kte_memhdl, sizeof (agp_pmem_handle_t));
2084 	keyentry.kte_memhdl = NULL;
2085 
2086 	return (NULL);
2087 
2088 }
2089 
2090 /*
2091  * agp_alloc_kmem()
2092  *
2093  * Description:
2094  * 	This function allocates physical memory for userland applications
2095  * 	by ddi interfaces. This function can only be called to allocate
2096  *	small phsyical contiguous pages, usually tens of kilobytes.
2097  *
2098  * Arguments:
2099  * 	softsate	driver soft state pointer
2100  * 	length		memory size
2101  *
2102  * Returns:
2103  * 	entryp		new keytable entity pointer
2104  * 	NULL		no keytable slot available or no physical
2105  *			memory available
2106  */
2107 static keytable_ent_t *
2108 agp_alloc_kmem(agpgart_softstate_t *softstate, size_t length)
2109 {
2110 	keytable_ent_t	keyentry;
2111 	keytable_ent_t	*entryp;
2112 	int		ret;
2113 
2114 	ASSERT(AGP_ALIGNED(length));
2115 
2116 	bzero(&keyentry, sizeof (keytable_ent_t));
2117 
2118 	keyentry.kte_pages = AGP_BYTES2PAGES(length);
2119 	keyentry.kte_type = AGP_PHYSICAL;
2120 
2121 	/*
2122 	 * Set dma_attr_sgllen to assure contiguous physical pages
2123 	 */
2124 	agpgart_dma_attr.dma_attr_sgllen = 1;
2125 
2126 	/* 4k size pages */
2127 	keyentry.kte_memhdl = kmem_zalloc(sizeof (agp_kmem_handle_t), KM_SLEEP);
2128 
2129 	if (ddi_dma_alloc_handle(softstate->asoft_dip,
2130 	    &agpgart_dma_attr,
2131 	    DDI_DMA_SLEEP, NULL,
2132 	    &(KMEMP(keyentry.kte_memhdl)->kmem_handle))) {
2133 		AGPDB_PRINT2((CE_WARN,
2134 		    "agp_alloc_kmem: ddi_dma_allco_hanlde error"));
2135 		goto err4;
2136 	}
2137 
2138 	if ((ret = ddi_dma_mem_alloc(
2139 	    KMEMP(keyentry.kte_memhdl)->kmem_handle,
2140 	    length,
2141 	    &gart_dev_acc_attr,
2142 	    DDI_DMA_CONSISTENT,
2143 	    DDI_DMA_SLEEP, NULL,
2144 	    &KMEMP(keyentry.kte_memhdl)->kmem_kvaddr,
2145 	    &KMEMP(keyentry.kte_memhdl)->kmem_reallen,
2146 	    &KMEMP(keyentry.kte_memhdl)->kmem_acchdl)) != 0) {
2147 		AGPDB_PRINT2((CE_WARN,
2148 		    "agp_alloc_kmem: ddi_dma_mem_alloc error"));
2149 
2150 		goto err3;
2151 	}
2152 
2153 	ret = ddi_dma_addr_bind_handle(
2154 	    KMEMP(keyentry.kte_memhdl)->kmem_handle,
2155 	    NULL,
2156 	    KMEMP(keyentry.kte_memhdl)->kmem_kvaddr,
2157 	    length,
2158 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2159 	    DDI_DMA_SLEEP,
2160 	    NULL,
2161 	    &KMEMP(keyentry.kte_memhdl)->kmem_dcookie,
2162 	    &KMEMP(keyentry.kte_memhdl)->kmem_cookies_num);
2163 
2164 	/*
2165 	 * Even dma_attr_sgllen = 1, ddi_dma_addr_bind_handle may return more
2166 	 * than one cookie, we check this in the if statement.
2167 	 */
2168 
2169 	if ((ret != DDI_DMA_MAPPED) ||
2170 	    (KMEMP(keyentry.kte_memhdl)->kmem_cookies_num != 1)) {
2171 		AGPDB_PRINT2((CE_WARN,
2172 		    "agp_alloc_kmem: can not alloc physical memory properly"));
2173 		goto err2;
2174 	}
2175 
2176 	keyentry.kte_pfnarray = (pfn_t *)kmem_zalloc(sizeof (pfn_t) *
2177 	    keyentry.kte_pages, KM_SLEEP);
2178 
2179 	if (kmem_getpfns(
2180 	    KMEMP(keyentry.kte_memhdl)->kmem_handle,
2181 	    &KMEMP(keyentry.kte_memhdl)->kmem_dcookie,
2182 	    KMEMP(keyentry.kte_memhdl)->kmem_cookies_num,
2183 	    keyentry.kte_pfnarray)) {
2184 		AGPDB_PRINT2((CE_WARN, "agp_alloc_kmem: get pfn array error"));
2185 		goto err1;
2186 	}
2187 
2188 	ASSERT(!agp_check_pfns(softstate->asoft_devreg.agprd_arctype,
2189 	    keyentry.kte_pfnarray, keyentry.kte_pages));
2190 	entryp = agp_fill_empty_keyent(softstate, &keyentry);
2191 	if (!entryp) {
2192 		AGPDB_PRINT2((CE_WARN,
2193 		    "agp_alloc_kmem: agp_fill_empty_keyent error"));
2194 
2195 		goto err1;
2196 	}
2197 	ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
2198 
2199 	return (entryp);
2200 
2201 err1:
2202 	kmem_free(keyentry.kte_pfnarray, sizeof (pfn_t) * keyentry.kte_pages);
2203 	keyentry.kte_pfnarray = NULL;
2204 	(void) ddi_dma_unbind_handle(KMEMP(keyentry.kte_memhdl)->kmem_handle);
2205 	KMEMP(keyentry.kte_memhdl)->kmem_cookies_num = 0;
2206 err2:
2207 	ddi_dma_mem_free(&KMEMP(keyentry.kte_memhdl)->kmem_acchdl);
2208 	KMEMP(keyentry.kte_memhdl)->kmem_acchdl = NULL;
2209 	KMEMP(keyentry.kte_memhdl)->kmem_reallen = 0;
2210 	KMEMP(keyentry.kte_memhdl)->kmem_kvaddr = NULL;
2211 err3:
2212 	ddi_dma_free_handle(&(KMEMP(keyentry.kte_memhdl)->kmem_handle));
2213 	KMEMP(keyentry.kte_memhdl)->kmem_handle = NULL;
2214 err4:
2215 	kmem_free(keyentry.kte_memhdl, sizeof (agp_kmem_handle_t));
2216 	keyentry.kte_memhdl = NULL;
2217 	return (NULL);
2218 
2219 }
2220 
2221 /*
2222  * agp_alloc_mem()
2223  *
2224  * Description:
2225  * 	This function allocate physical memory for userland applications,
2226  * 	in order to save kernel virtual space, we use the direct mapping
2227  * 	memory interface if it is available.
2228  *
2229  * Arguments:
2230  * 	st		driver soft state pointer
2231  * 	length		memory size
2232  * 	type		AGP_NORMAL: normal agp memory, AGP_PHISYCAL: specical
2233  *			memory type for intel i810 IGD
2234  *
2235  * Returns:
2236  * 	NULL 	Invalid memory type or can not allocate memory
2237  * 	Keytable entry pointer returned by agp_alloc_kmem or agp_alloc_pmem
2238  */
2239 static keytable_ent_t *
2240 agp_alloc_mem(agpgart_softstate_t *st, size_t length, int type)
2241 {
2242 
2243 	/*
2244 	 * AGP_PHYSICAL type require contiguous physical pages exported
2245 	 * to X drivers, like i810 HW cursor, ARGB cursor. the number of
2246 	 * pages needed is usuallysmall and contiguous, 4K, 16K. So we
2247 	 * use DDI interface to allocated such memory. And X use xsvc
2248 	 * drivers to map this memory into its own address space.
2249 	 */
2250 	ASSERT(st);
2251 
2252 	switch (type) {
2253 	case AGP_NORMAL:
2254 		return (agp_alloc_pmem(st, length, type));
2255 	case AGP_PHYSICAL:
2256 		return (agp_alloc_kmem(st, length));
2257 	default:
2258 		return (NULL);
2259 	}
2260 }
2261 
2262 /*
2263  * free_gart_table()
2264  *
2265  * Description:
2266  * 	This function frees the gart table memory allocated by driver.
2267  * 	Must disable gart table before calling this function.
2268  *
2269  * Arguments:
2270  * 	softstate		driver soft state pointer
2271  *
2272  */
2273 static void
2274 free_gart_table(agpgart_softstate_t *st)
2275 {
2276 
2277 	if (st->gart_dma_handle == NULL)
2278 		return;
2279 
2280 	(void) ddi_dma_unbind_handle(st->gart_dma_handle);
2281 	ddi_dma_mem_free(&st->gart_dma_acc_handle);
2282 	st->gart_dma_acc_handle = NULL;
2283 	ddi_dma_free_handle(&st->gart_dma_handle);
2284 	st->gart_dma_handle = NULL;
2285 	st->gart_vbase = 0;
2286 	st->gart_size = 0;
2287 }
2288 
2289 /*
2290  * alloc_gart_table()
2291  *
2292  * Description:
2293  * 	This function allocates one physical continuous gart table.
2294  * 	INTEL integrated video device except i810 have their special
2295  * 	video bios; No need to allocate gart table for them.
2296  *
2297  * Arguments:
2298  * 	st		driver soft state pointer
2299  *
2300  * Returns:
2301  * 	0		success
2302  * 	-1		can not allocate gart tabl
2303  */
2304 static int
2305 alloc_gart_table(agpgart_softstate_t *st)
2306 {
2307 	int			num_pages;
2308 	size_t			table_size;
2309 	int			ret = DDI_SUCCESS;
2310 	ddi_dma_cookie_t	cookie;
2311 	uint32_t		num_cookies;
2312 
2313 	num_pages = AGP_MB2PAGES(st->asoft_info.agpki_apersize);
2314 
2315 	/*
2316 	 * Only 40-bit maximum physical memory is supported by today's
2317 	 * AGP hardware (32-bit gart tables can hold 40-bit memory addresses).
2318 	 * No one supports 64-bit gart entries now, so the size of gart
2319 	 * entries defaults to 32-bit though AGP3.0 specifies the possibility
2320 	 * of 64-bit gart entries.
2321 	 */
2322 
2323 	table_size = num_pages * (sizeof (uint32_t));
2324 
2325 	/*
2326 	 * Only AMD64 can put gart table above 4G, 40 bits at maximum
2327 	 */
2328 	if ((st->asoft_devreg.agprd_arctype == ARC_AMD64AGP) ||
2329 	    (st->asoft_devreg.agprd_arctype == ARC_AMD64NOAGP))
2330 		garttable_dma_attr.dma_attr_addr_hi = 0xffffffffffLL;
2331 	else
2332 		garttable_dma_attr.dma_attr_addr_hi = 0xffffffffU;
2333 	/* Allocate physical continuous page frame for gart table */
2334 	if (ret = ddi_dma_alloc_handle(st->asoft_dip,
2335 	    &garttable_dma_attr,
2336 	    DDI_DMA_SLEEP,
2337 	    NULL, &st->gart_dma_handle)) {
2338 		AGPDB_PRINT2((CE_WARN,
2339 		    "alloc_gart_table: ddi_dma_alloc_handle failed"));
2340 		goto err3;
2341 	}
2342 
2343 	if (ret = ddi_dma_mem_alloc(st->gart_dma_handle,
2344 	    table_size,
2345 	    &gart_dev_acc_attr,
2346 	    DDI_DMA_CONSISTENT,
2347 	    DDI_DMA_SLEEP, NULL,
2348 	    &st->gart_vbase,
2349 	    &st->gart_size,
2350 	    &st->gart_dma_acc_handle)) {
2351 		AGPDB_PRINT2((CE_WARN,
2352 		    "alloc_gart_table: ddi_dma_mem_alloc failed"));
2353 		goto err2;
2354 
2355 	}
2356 
2357 	ret = ddi_dma_addr_bind_handle(st->gart_dma_handle,
2358 	    NULL, st->gart_vbase,
2359 	    table_size,
2360 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2361 	    DDI_DMA_SLEEP, NULL,
2362 	    &cookie,  &num_cookies);
2363 
2364 	st->gart_pbase = cookie.dmac_address;
2365 
2366 	if ((ret != DDI_DMA_MAPPED) || (num_cookies != 1)) {
2367 		if (num_cookies > 1)
2368 			(void) ddi_dma_unbind_handle(st->gart_dma_handle);
2369 		AGPDB_PRINT2((CE_WARN,
2370 		    "alloc_gart_table: alloc contiguous phys memory failed"));
2371 		goto err1;
2372 	}
2373 
2374 	return (0);
2375 err1:
2376 	ddi_dma_mem_free(&st->gart_dma_acc_handle);
2377 	st->gart_dma_acc_handle = NULL;
2378 err2:
2379 	ddi_dma_free_handle(&st->gart_dma_handle);
2380 	st->gart_dma_handle = NULL;
2381 err3:
2382 	st->gart_pbase = 0;
2383 	st->gart_size = 0;
2384 	st->gart_vbase = 0;
2385 
2386 	return (-1);
2387 }
2388 
2389 /*
2390  * agp_add_to_gart()
2391  *
2392  * Description:
2393  * 	This function fills the gart table entries by a given page frame number
2394  * 	array and set up the agp aperture page to physical memory page
2395  * 	translation.
2396  * Arguments:
2397  * 	type		valid sytem arc types ARC_AMD64AGP, ARC_INTELAGP,
2398  * 			ARC_AMD64AGP
2399  * 	pfnarray	allocated physical page frame number array
2400  * 	pg_offset	agp aperture start page to be bound
2401  * 	entries		the number of pages to be bound
2402  * 	dma_hdl		gart table dma memory handle
2403  * 	tablep		gart table kernel virtual address
2404  * Returns:
2405  * 	-1		failed
2406  * 	0		success
2407  */
2408 static int
2409 agp_add_to_gart(
2410     agp_arc_type_t type,
2411     pfn_t *pfnarray,
2412     uint32_t pg_offset,
2413     uint32_t entries,
2414     ddi_dma_handle_t dma_hdl,
2415     uint32_t *tablep)
2416 {
2417 	int items = 0;
2418 	uint32_t *entryp;
2419 	uint32_t itemv;
2420 
2421 	entryp = tablep + pg_offset;
2422 	while (items < entries) {
2423 		if (pfn2gartentry(type, pfnarray[items], &itemv))
2424 			break;
2425 		*(entryp + items) = itemv;
2426 		items++;
2427 	}
2428 	if (items < entries)
2429 		return (-1);
2430 
2431 	(void) ddi_dma_sync(dma_hdl, pg_offset * sizeof (uint32_t),
2432 	    entries * sizeof (uint32_t), DDI_DMA_SYNC_FORDEV);
2433 
2434 	return (0);
2435 }
2436 
2437 /*
2438  * agp_bind_key()
2439  *
2440  * Description:
2441  * 	This function will call low level gart table access functions to
2442  * 	set up gart table translation. Also it will do some sanity
2443  * 	checking on key table entry.
2444  *
2445  * Arguments:
2446  * 	softstate		driver soft state pointer
2447  * 	keyent			key table entity pointer to be bound
2448  * 	pg_offset		aperture start page to be bound
2449  * Returns:
2450  * 	EINVAL			not a valid operation
2451  */
2452 static int
2453 agp_bind_key(agpgart_softstate_t *softstate,
2454     keytable_ent_t  *keyent, uint32_t  pg_offset)
2455 {
2456 	uint64_t pg_end;
2457 	int ret = 0;
2458 
2459 	ASSERT(keyent);
2460 	ASSERT((keyent->kte_key >= 0) && (keyent->kte_key < AGP_MAXKEYS));
2461 	ASSERT(mutex_owned(&softstate->asoft_instmutex));
2462 
2463 	pg_end = pg_offset + keyent->kte_pages;
2464 
2465 	if (pg_end > AGP_MB2PAGES(softstate->asoft_info.agpki_apersize)) {
2466 		AGPDB_PRINT2((CE_WARN,
2467 		    "agp_bind_key: key=0x%x,exceed aper range",
2468 		    keyent->kte_key));
2469 
2470 		return (EINVAL);
2471 	}
2472 
2473 	if (agp_check_off(softstate->asoft_table,
2474 	    pg_offset, keyent->kte_pages)) {
2475 		AGPDB_PRINT2((CE_WARN,
2476 		    "agp_bind_key: pg_offset=0x%x, pages=0x%lx overlaped",
2477 		    pg_offset, keyent->kte_pages));
2478 		return (EINVAL);
2479 	}
2480 
2481 	ASSERT(keyent->kte_pfnarray != NULL);
2482 
2483 	switch (softstate->asoft_devreg.agprd_arctype) {
2484 	case ARC_IGD810:
2485 	case ARC_IGD830:
2486 		ret = lyr_i8xx_add_to_gtt(pg_offset, keyent,
2487 		    &softstate->asoft_devreg);
2488 		if (ret)
2489 			return (EIO);
2490 		break;
2491 	case ARC_INTELAGP:
2492 	case ARC_AMD64NOAGP:
2493 	case ARC_AMD64AGP:
2494 		ret =  agp_add_to_gart(
2495 		    softstate->asoft_devreg.agprd_arctype,
2496 		    keyent->kte_pfnarray,
2497 		    pg_offset,
2498 		    keyent->kte_pages,
2499 		    softstate->gart_dma_handle,
2500 		    (uint32_t *)softstate->gart_vbase);
2501 		if (ret)
2502 			return (EINVAL);
2503 		/* Flush GTLB table */
2504 		lyr_flush_gart_cache(&softstate->asoft_devreg);
2505 		break;
2506 	default:
2507 		AGPDB_PRINT2((CE_WARN,
2508 		    "agp_bind_key: arc type = 0x%x unsupported",
2509 		    softstate->asoft_devreg.agprd_arctype));
2510 		return (EINVAL);
2511 	}
2512 	return (0);
2513 }
2514 
2515 static int
2516 agpgart_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2517 {
2518 	int instance;
2519 	agpgart_softstate_t *softstate;
2520 
2521 	if (cmd != DDI_ATTACH) {
2522 		AGPDB_PRINT2((CE_WARN,
2523 		    "agpgart_attach: only attach op supported"));
2524 		return (DDI_FAILURE);
2525 	}
2526 	instance = ddi_get_instance(dip);
2527 
2528 	if (ddi_soft_state_zalloc(agpgart_glob_soft_handle, instance)
2529 	    != DDI_SUCCESS) {
2530 		AGPDB_PRINT2((CE_WARN,
2531 		    "agpgart_attach: soft state zalloc failed"));
2532 		goto err1;
2533 
2534 	}
2535 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2536 	mutex_init(&softstate->asoft_instmutex, NULL, MUTEX_DRIVER, NULL);
2537 	softstate->asoft_dip = dip;
2538 	/*
2539 	 * Allocate LDI identifier for agpgart driver
2540 	 * Agpgart driver is the kernel consumer
2541 	 */
2542 	if (ldi_ident_from_dip(dip, &softstate->asoft_li)) {
2543 		AGPDB_PRINT2((CE_WARN,
2544 		    "agpgart_attach: LDI indentifier allcation failed"));
2545 		goto err2;
2546 	}
2547 
2548 	softstate->asoft_devreg.agprd_arctype = ARC_UNKNOWN;
2549 	/* Install agp kstat */
2550 	if (agp_init_kstats(softstate)) {
2551 		AGPDB_PRINT2((CE_WARN, "agpgart_attach: init kstats error"));
2552 		goto err3;
2553 	}
2554 	/*
2555 	 * devfs will create /dev/agpgart
2556 	 * and  /devices/agpgart:agpgart
2557 	 */
2558 
2559 	if (ddi_create_minor_node(dip, AGPGART_DEVNODE, S_IFCHR,
2560 	    AGP_INST2MINOR(instance),
2561 	    DDI_NT_AGP_PSEUDO, 0)) {
2562 		AGPDB_PRINT2((CE_WARN,
2563 		    "agpgart_attach: Can not create minor node"));
2564 		goto err4;
2565 	}
2566 
2567 	softstate->asoft_table = kmem_zalloc(
2568 	    AGP_MAXKEYS * (sizeof (keytable_ent_t)),
2569 	    KM_SLEEP);
2570 
2571 	return (DDI_SUCCESS);
2572 err4:
2573 	agp_fini_kstats(softstate);
2574 err3:
2575 	ldi_ident_release(softstate->asoft_li);
2576 err2:
2577 	ddi_soft_state_free(agpgart_glob_soft_handle, instance);
2578 err1:
2579 	return (DDI_FAILURE);
2580 }
2581 
2582 static int
2583 agpgart_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2584 {
2585 	int instance;
2586 	agpgart_softstate_t *st;
2587 
2588 	instance = ddi_get_instance(dip);
2589 
2590 	st = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2591 
2592 	if (cmd != DDI_DETACH)
2593 		return (DDI_FAILURE);
2594 
2595 	/*
2596 	 * Caller should free all the memory allocated explicitly.
2597 	 * We release the memory allocated by caller which is not
2598 	 * properly freed. mutex_enter here make sure assertion on
2599 	 * softstate mutex success in agp_dealloc_mem.
2600 	 */
2601 	mutex_enter(&st->asoft_instmutex);
2602 	if (agp_del_allkeys(st)) {
2603 		AGPDB_PRINT2((CE_WARN, "agpgart_detach: agp_del_allkeys err"));
2604 		AGPDB_PRINT2((CE_WARN,
2605 		    "you might free agp memory exported to your applications"));
2606 
2607 		mutex_exit(&st->asoft_instmutex);
2608 		return (DDI_FAILURE);
2609 	}
2610 	mutex_exit(&st->asoft_instmutex);
2611 	if (st->asoft_table) {
2612 		kmem_free(st->asoft_table,
2613 		    AGP_MAXKEYS * (sizeof (keytable_ent_t)));
2614 		st->asoft_table = 0;
2615 	}
2616 
2617 	ddi_remove_minor_node(dip, AGPGART_DEVNODE);
2618 	agp_fini_kstats(st);
2619 	ldi_ident_release(st->asoft_li);
2620 	mutex_destroy(&st->asoft_instmutex);
2621 	ddi_soft_state_free(agpgart_glob_soft_handle, instance);
2622 
2623 	return (DDI_SUCCESS);
2624 }
2625 
2626 /*ARGSUSED*/
2627 static int
2628 agpgart_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
2629     void **resultp)
2630 {
2631 	agpgart_softstate_t *st;
2632 	int instance, rval = DDI_FAILURE;
2633 	dev_t dev;
2634 
2635 	switch (cmd) {
2636 	case DDI_INFO_DEVT2DEVINFO:
2637 		dev = (dev_t)arg;
2638 		instance = AGP_DEV2INST(dev);
2639 		st = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2640 		if (st != NULL) {
2641 			mutex_enter(&st->asoft_instmutex);
2642 			*resultp = st->asoft_dip;
2643 			mutex_exit(&st->asoft_instmutex);
2644 			rval = DDI_SUCCESS;
2645 		} else
2646 			*resultp = NULL;
2647 
2648 		break;
2649 	case DDI_INFO_DEVT2INSTANCE:
2650 		dev = (dev_t)arg;
2651 		instance = AGP_DEV2INST(dev);
2652 		*resultp = (void *)(uintptr_t)instance;
2653 		rval = DDI_SUCCESS;
2654 
2655 		break;
2656 	default:
2657 		break;
2658 	}
2659 
2660 	return (rval);
2661 }
2662 
2663 /*
2664  * agpgart_open()
2665  *
2666  * Description:
2667  * 	This function is the driver open entry point. If it is the
2668  * 	first time the agpgart driver is opened, the driver will
2669  * 	open other agp related layered drivers and set up the agpgart
2670  * 	table properly.
2671  *
2672  * Arguments:
2673  * 	dev			device number pointer
2674  * 	openflags		open flags
2675  *	otyp			OTYP_BLK, OTYP_CHR
2676  * 	credp			user's credential's struct pointer
2677  *
2678  * Returns:
2679  * 	ENXIO			operation error
2680  * 	EAGAIN			resoure temporarily unvailable
2681  * 	0			success
2682  */
2683 /*ARGSUSED*/
2684 static int
2685 agpgart_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
2686 {
2687 	int instance = AGP_DEV2INST(*dev);
2688 	agpgart_softstate_t *softstate;
2689 	int rc = 0;
2690 
2691 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2692 	if (softstate == NULL) {
2693 		AGPDB_PRINT2((CE_WARN, "agpgart_open: get soft state err"));
2694 		return (ENXIO);
2695 	}
2696 	mutex_enter(&softstate->asoft_instmutex);
2697 
2698 	if (softstate->asoft_opened) {
2699 		softstate->asoft_opened++;
2700 		mutex_exit(&softstate->asoft_instmutex);
2701 		return (0);
2702 	}
2703 
2704 	/*
2705 	 * The driver is opened first time, so we initialize layered
2706 	 * driver interface and softstate member here.
2707 	 */
2708 	softstate->asoft_pgused = 0;
2709 	if (lyr_init(&softstate->asoft_devreg, softstate->asoft_li)) {
2710 		AGPDB_PRINT2((CE_WARN, "agpgart_open: lyr_init failed"));
2711 		mutex_exit(&softstate->asoft_instmutex);
2712 		return (EAGAIN);
2713 	}
2714 
2715 	/* Call into layered driver */
2716 	if (lyr_get_info(&softstate->asoft_info, &softstate->asoft_devreg)) {
2717 		AGPDB_PRINT2((CE_WARN, "agpgart_open: lyr_get_info error"));
2718 		lyr_end(&softstate->asoft_devreg);
2719 		mutex_exit(&softstate->asoft_instmutex);
2720 		return (EIO);
2721 	}
2722 
2723 	/*
2724 	 * BIOS already set up gtt table for ARC_IGD830
2725 	 */
2726 	if (IS_INTEL_830(softstate->asoft_devreg.agprd_arctype)) {
2727 		softstate->asoft_opened++;
2728 
2729 		softstate->asoft_pgtotal =
2730 		    get_max_pages(softstate->asoft_info.agpki_apersize);
2731 
2732 		if (lyr_config_devices(&softstate->asoft_devreg)) {
2733 			AGPDB_PRINT2((CE_WARN,
2734 			    "agpgart_open: lyr_config_devices error"));
2735 			lyr_end(&softstate->asoft_devreg);
2736 			mutex_exit(&softstate->asoft_instmutex);
2737 
2738 			return (EIO);
2739 		}
2740 		mutex_exit(&softstate->asoft_instmutex);
2741 		return (0);
2742 	}
2743 
2744 	rc = alloc_gart_table(softstate);
2745 
2746 	/*
2747 	 * Allocate physically contiguous pages for AGP arc or
2748 	 * i810 arc. If failed, divide aper_size by 2 to
2749 	 * reduce gart table size until 4 megabytes. This
2750 	 * is just a workaround for systems with very few
2751 	 * physically contiguous memory.
2752 	 */
2753 	if (rc) {
2754 		while ((softstate->asoft_info.agpki_apersize >= 4) &&
2755 		    (alloc_gart_table(softstate))) {
2756 			softstate->asoft_info.agpki_apersize >>= 1;
2757 		}
2758 		if (softstate->asoft_info.agpki_apersize >= 4)
2759 			rc = 0;
2760 	}
2761 
2762 	if (rc != 0) {
2763 		AGPDB_PRINT2((CE_WARN,
2764 		    "agpgart_open: alloc gart table failed"));
2765 		lyr_end(&softstate->asoft_devreg);
2766 		mutex_exit(&softstate->asoft_instmutex);
2767 		return (EAGAIN);
2768 	}
2769 
2770 	softstate->asoft_pgtotal =
2771 	    get_max_pages(softstate->asoft_info.agpki_apersize);
2772 	/*
2773 	 * BIOS doesn't initialize GTT for i810,
2774 	 * So i810 GTT must be created by driver.
2775 	 *
2776 	 * Set up gart table and enable it.
2777 	 */
2778 	if (lyr_set_gart_addr(softstate->gart_pbase,
2779 	    &softstate->asoft_devreg)) {
2780 		AGPDB_PRINT2((CE_WARN,
2781 		    "agpgart_open: set gart table addr failed"));
2782 		free_gart_table(softstate);
2783 		lyr_end(&softstate->asoft_devreg);
2784 		mutex_exit(&softstate->asoft_instmutex);
2785 		return (EIO);
2786 	}
2787 	if (lyr_config_devices(&softstate->asoft_devreg)) {
2788 		AGPDB_PRINT2((CE_WARN,
2789 		    "agpgart_open: lyr_config_devices failed"));
2790 		free_gart_table(softstate);
2791 		lyr_end(&softstate->asoft_devreg);
2792 		mutex_exit(&softstate->asoft_instmutex);
2793 		return (EIO);
2794 	}
2795 
2796 	softstate->asoft_opened++;
2797 	mutex_exit(&softstate->asoft_instmutex);
2798 
2799 	return (0);
2800 }
2801 
2802 /*
2803  * agpgart_close()
2804  *
2805  * Description:
2806  * 	agpgart_close will release resources allocated in the first open
2807  * 	and close other open layered drivers. Also it frees the memory
2808  *	allocated by ioctls.
2809  *
2810  * Arguments:
2811  * 	dev			device number
2812  * 	flag			file status flag
2813  *	otyp			OTYP_BLK, OTYP_CHR
2814  * 	credp			user's credential's struct pointer
2815  *
2816  * Returns:
2817  * 	ENXIO			not an error, to support "deferred attach"
2818  * 	0			success
2819  */
2820 /*ARGSUSED*/
2821 static int
2822 agpgart_close(dev_t dev, int flag, int otyp, cred_t *credp)
2823 {
2824 	int instance = AGP_DEV2INST(dev);
2825 	agpgart_softstate_t *softstate;
2826 
2827 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2828 	if (softstate == NULL) {
2829 		AGPDB_PRINT2((CE_WARN, "agpgart_close: get soft state err"));
2830 		return (ENXIO);
2831 	}
2832 
2833 	mutex_enter(&softstate->asoft_instmutex);
2834 	ASSERT(softstate->asoft_opened);
2835 
2836 
2837 	/*
2838 	 * If the last process close this device is not the controlling
2839 	 * process, also release the control over agpgart driver here if the
2840 	 * the controlling process fails to release the control before it
2841 	 * close the driver.
2842 	 */
2843 	if (softstate->asoft_acquired == 1) {
2844 		AGPDB_PRINT2((CE_WARN,
2845 		    "agpgart_close: auto release control over driver"));
2846 		release_control(softstate);
2847 	}
2848 
2849 	if (lyr_unconfig_devices(&softstate->asoft_devreg)) {
2850 		AGPDB_PRINT2((CE_WARN,
2851 		    "agpgart_close: lyr_unconfig_device error"));
2852 		mutex_exit(&softstate->asoft_instmutex);
2853 		return (EIO);
2854 	}
2855 	softstate->asoft_agpen = 0;
2856 
2857 	if (!IS_INTEL_830(softstate->asoft_devreg.agprd_arctype)) {
2858 		free_gart_table(softstate);
2859 	}
2860 
2861 	lyr_end(&softstate->asoft_devreg);
2862 
2863 	/*
2864 	 * This statement must be positioned before agp_del_allkeys
2865 	 * agp_dealloc_mem indirectly called by agp_del_allkeys
2866 	 * will test this variable.
2867 	 */
2868 	softstate->asoft_opened = 0;
2869 
2870 	/*
2871 	 * Free the memory allocated by user applications which
2872 	 * was never deallocated.
2873 	 */
2874 	(void) agp_del_allkeys(softstate);
2875 
2876 	mutex_exit(&softstate->asoft_instmutex);
2877 
2878 	return (0);
2879 }
2880 
2881 static int
2882 ioctl_agpgart_info(agpgart_softstate_t  *softstate, void  *arg, int flags)
2883 {
2884 	agp_info_t infostruct;
2885 #ifdef _MULTI_DATAMODEL
2886 	agp_info32_t infostruct32;
2887 #endif
2888 
2889 	bzero(&infostruct, sizeof (agp_info_t));
2890 
2891 #ifdef _MULTI_DATAMODEL
2892 	bzero(&infostruct32, sizeof (agp_info32_t));
2893 	if (ddi_model_convert_from(flags & FMODELS) == DDI_MODEL_ILP32) {
2894 		if (copyinfo(softstate, &infostruct))
2895 			return (EINVAL);
2896 
2897 		agpinfo_default_to_32(infostruct, infostruct32);
2898 		if (ddi_copyout(&infostruct32, arg,
2899 		    sizeof (agp_info32_t), flags) != 0)
2900 			return (EFAULT);
2901 
2902 		return (0);
2903 	}
2904 #endif /* _MULTI_DATAMODEL */
2905 	if (copyinfo(softstate, &infostruct))
2906 		return (EINVAL);
2907 
2908 	if (ddi_copyout(&infostruct, arg, sizeof (agp_info_t), flags) != 0) {
2909 		return (EFAULT);
2910 	}
2911 
2912 	return (0);
2913 }
2914 
2915 static int
2916 ioctl_agpgart_acquire(agpgart_softstate_t  *st)
2917 {
2918 	if (st->asoft_acquired) {
2919 		AGPDB_PRINT2((CE_WARN, "ioctl_acquire: already acquired"));
2920 		return (EBUSY);
2921 	}
2922 	acquire_control(st);
2923 	return (0);
2924 }
2925 
2926 static int
2927 ioctl_agpgart_release(agpgart_softstate_t  *st)
2928 {
2929 	if (is_controlling_proc(st) < 0) {
2930 		AGPDB_PRINT2((CE_WARN,
2931 		    "ioctl_agpgart_release: not a controlling process"));
2932 		return (EPERM);
2933 	}
2934 	release_control(st);
2935 	return (0);
2936 }
2937 
2938 static int
2939 ioctl_agpgart_setup(agpgart_softstate_t  *st, void  *arg, int flags)
2940 {
2941 	agp_setup_t data;
2942 	int rc = 0;
2943 
2944 	if (is_controlling_proc(st) < 0) {
2945 		AGPDB_PRINT2((CE_WARN,
2946 		    "ioctl_agpgart_setup: not a controlling process"));
2947 		return (EPERM);
2948 	}
2949 
2950 	if (!IS_TRUE_AGP(st->asoft_devreg.agprd_arctype)) {
2951 		AGPDB_PRINT2((CE_WARN,
2952 		    "ioctl_agpgart_setup: no true agp bridge"));
2953 		return (EINVAL);
2954 	}
2955 
2956 	if (ddi_copyin(arg, &data, sizeof (agp_setup_t), flags) != 0)
2957 		return (EFAULT);
2958 
2959 	if (rc = agp_setup(st, data.agps_mode))
2960 		return (rc);
2961 	/* Store agp mode status for kstat */
2962 	st->asoft_agpen = 1;
2963 	return (0);
2964 }
2965 
2966 static int
2967 ioctl_agpgart_alloc(agpgart_softstate_t  *st, void  *arg, int flags)
2968 {
2969 	agp_allocate_t	alloc_info;
2970 	keytable_ent_t	*entryp;
2971 	size_t		length;
2972 	uint64_t	pg_num;
2973 
2974 	if (is_controlling_proc(st) < 0) {
2975 		AGPDB_PRINT2((CE_WARN,
2976 		    "ioctl_agpgart_alloc: not a controlling process"));
2977 		return (EPERM);
2978 	}
2979 
2980 	if (ddi_copyin(arg, &alloc_info,
2981 	    sizeof (agp_allocate_t), flags) != 0) {
2982 		return (EFAULT);
2983 	}
2984 	pg_num = st->asoft_pgused + alloc_info.agpa_pgcount;
2985 	if (pg_num > st->asoft_pgtotal) {
2986 		AGPDB_PRINT2((CE_WARN,
2987 		    "ioctl_agpgart_alloc: exceeding the memory pages limit"));
2988 		AGPDB_PRINT2((CE_WARN,
2989 		    "ioctl_agpgart_alloc: request %x pages failed",
2990 		    alloc_info.agpa_pgcount));
2991 		AGPDB_PRINT2((CE_WARN,
2992 		    "ioctl_agpgart_alloc: pages used %x total is %x",
2993 		    st->asoft_pgused, st->asoft_pgtotal));
2994 
2995 		return (EINVAL);
2996 	}
2997 
2998 	length = AGP_PAGES2BYTES(alloc_info.agpa_pgcount);
2999 	entryp = agp_alloc_mem(st, length, alloc_info.agpa_type);
3000 	if (!entryp) {
3001 		AGPDB_PRINT2((CE_WARN,
3002 		    "ioctl_agpgart_alloc: allocate 0x%lx bytes failed",
3003 		    length));
3004 		return (ENOMEM);
3005 	}
3006 	ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
3007 	alloc_info.agpa_key = entryp->kte_key;
3008 	if (alloc_info.agpa_type == AGP_PHYSICAL) {
3009 		alloc_info.agpa_physical =
3010 		    (uint32_t)(entryp->kte_pfnarray[0] << AGP_PAGE_SHIFT);
3011 	}
3012 	/* Update the memory pagse used */
3013 	st->asoft_pgused += alloc_info.agpa_pgcount;
3014 
3015 	if (ddi_copyout(&alloc_info, arg,
3016 	    sizeof (agp_allocate_t), flags) != 0) {
3017 
3018 		return (EFAULT);
3019 	}
3020 
3021 	return (0);
3022 }
3023 
3024 static int
3025 ioctl_agpgart_dealloc(agpgart_softstate_t  *st, intptr_t arg)
3026 {
3027 	int key;
3028 	keytable_ent_t  *keyent;
3029 
3030 	if (is_controlling_proc(st) < 0) {
3031 		AGPDB_PRINT2((CE_WARN,
3032 		    "ioctl_agpgart_dealloc: not a controlling process"));
3033 		return (EPERM);
3034 	}
3035 	key = (int)arg;
3036 	if ((key >= AGP_MAXKEYS) || key < 0) {
3037 		return (EINVAL);
3038 	}
3039 	keyent = &st->asoft_table[key];
3040 	if (!keyent->kte_memhdl) {
3041 		return (EINVAL);
3042 	}
3043 
3044 	if (agp_dealloc_mem(st, keyent))
3045 		return (EINVAL);
3046 
3047 	/* Update the memory pages used */
3048 	st->asoft_pgused -= keyent->kte_pages;
3049 	bzero(keyent, sizeof (keytable_ent_t));
3050 
3051 	return (0);
3052 }
3053 
3054 static int
3055 ioctl_agpgart_bind(agpgart_softstate_t  *st, void  *arg, int flags)
3056 {
3057 	agp_bind_t 	bind_info;
3058 	keytable_ent_t	*keyent;
3059 	int		key;
3060 	uint32_t	pg_offset;
3061 	int		retval = 0;
3062 
3063 	if (is_controlling_proc(st) < 0) {
3064 		AGPDB_PRINT2((CE_WARN,
3065 		    "ioctl_agpgart_bind: not a controlling process"));
3066 		return (EPERM);
3067 	}
3068 
3069 	if (ddi_copyin(arg, &bind_info, sizeof (agp_bind_t), flags) != 0) {
3070 		return (EFAULT);
3071 	}
3072 
3073 	key = bind_info.agpb_key;
3074 	if ((key >= AGP_MAXKEYS) || key < 0) {
3075 		AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_bind: invalid key"));
3076 		return (EINVAL);
3077 	}
3078 
3079 	if (IS_INTEL_830(st->asoft_devreg.agprd_arctype)) {
3080 		if (AGP_PAGES2KB(bind_info.agpb_pgstart) <
3081 		    st->asoft_info.agpki_presize) {
3082 			AGPDB_PRINT2((CE_WARN,
3083 			    "ioctl_agpgart_bind: bind to prealloc area "
3084 			    "pgstart = %dKB < presize = %ldKB",
3085 			    AGP_PAGES2KB(bind_info.agpb_pgstart),
3086 			    st->asoft_info.agpki_presize));
3087 			return (EINVAL);
3088 		}
3089 	}
3090 
3091 	pg_offset = bind_info.agpb_pgstart;
3092 	keyent = &st->asoft_table[key];
3093 	if (!keyent->kte_memhdl) {
3094 		AGPDB_PRINT2((CE_WARN,
3095 		    "ioctl_agpgart_bind: Key = 0x%x can't get keyenty",
3096 		    key));
3097 		return (EINVAL);
3098 	}
3099 
3100 	if (keyent->kte_bound != 0) {
3101 		AGPDB_PRINT2((CE_WARN,
3102 		    "ioctl_agpgart_bind: Key = 0x%x already bound",
3103 		    key));
3104 		return (EINVAL);
3105 	}
3106 	retval = agp_bind_key(st, keyent, pg_offset);
3107 
3108 	if (retval == 0) {
3109 		keyent->kte_pgoff = pg_offset;
3110 		keyent->kte_bound = 1;
3111 	}
3112 
3113 	return (retval);
3114 }
3115 
3116 static int
3117 ioctl_agpgart_unbind(agpgart_softstate_t  *st, void  *arg, int flags)
3118 {
3119 	int key, retval = 0;
3120 	agp_unbind_t unbindinfo;
3121 	keytable_ent_t *keyent;
3122 
3123 	if (is_controlling_proc(st) < 0) {
3124 		AGPDB_PRINT2((CE_WARN,
3125 		    "ioctl_agpgart_bind: not a controlling process"));
3126 		return (EPERM);
3127 	}
3128 
3129 	if (ddi_copyin(arg, &unbindinfo, sizeof (unbindinfo), flags) != 0) {
3130 		return (EFAULT);
3131 	}
3132 	key = unbindinfo.agpu_key;
3133 	if ((key >= AGP_MAXKEYS) || key < 0) {
3134 		AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_unbind: invalid key"));
3135 		return (EINVAL);
3136 	}
3137 	keyent = &st->asoft_table[key];
3138 	if (!keyent->kte_bound) {
3139 		return (EINVAL);
3140 	}
3141 
3142 	if ((retval = agp_unbind_key(st, keyent)) != 0)
3143 		return (retval);
3144 
3145 	return (0);
3146 }
3147 
3148 /*ARGSUSED*/
3149 static int
3150 agpgart_ioctl(dev_t dev, int cmd, intptr_t intarg, int flags,
3151     cred_t *credp, int *rvalp)
3152 {
3153 	int instance;
3154 	int retval = 0;
3155 	void *arg = (void*)intarg;
3156 
3157 	agpgart_softstate_t *softstate;
3158 
3159 	instance = AGP_DEV2INST(dev);
3160 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
3161 	if (softstate == NULL) {
3162 		AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: get soft state err"));
3163 		return (ENXIO);
3164 	}
3165 
3166 	if ((cmd != AGPIOC_INFO) && secpolicy_gart_access(credp)) {
3167 		AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: permission denied"));
3168 		return (EPERM);
3169 	}
3170 
3171 	mutex_enter(&softstate->asoft_instmutex);
3172 
3173 	switch (cmd) {
3174 	case AGPIOC_INFO:
3175 		retval = ioctl_agpgart_info(softstate, arg, flags);
3176 		break;
3177 	case AGPIOC_ACQUIRE:
3178 		retval = ioctl_agpgart_acquire(softstate);
3179 		break;
3180 	case AGPIOC_RELEASE:
3181 		retval = ioctl_agpgart_release(softstate);
3182 		break;
3183 	case AGPIOC_SETUP:
3184 		retval = ioctl_agpgart_setup(softstate, arg, flags);
3185 		break;
3186 	case AGPIOC_ALLOCATE:
3187 		retval = ioctl_agpgart_alloc(softstate, arg, flags);
3188 		break;
3189 	case AGPIOC_DEALLOCATE:
3190 		retval = ioctl_agpgart_dealloc(softstate, intarg);
3191 		break;
3192 	case AGPIOC_BIND:
3193 		retval = ioctl_agpgart_bind(softstate, arg, flags);
3194 		break;
3195 	case AGPIOC_UNBIND:
3196 		retval = ioctl_agpgart_unbind(softstate, arg, flags);
3197 		break;
3198 	default:
3199 		AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: wrong argument"));
3200 		retval = ENXIO;
3201 		break;
3202 	}
3203 
3204 	mutex_exit(&softstate->asoft_instmutex);
3205 	return (retval);
3206 }
3207 
3208 static int
3209 agpgart_segmap(dev_t dev, off_t off, struct as *asp,
3210     caddr_t *addrp, off_t len, unsigned int prot,
3211     unsigned int maxprot, unsigned int flags, cred_t *credp)
3212 {
3213 
3214 	struct agpgart_softstate *softstate;
3215 	int instance;
3216 	int rc = 0;
3217 
3218 	instance = AGP_DEV2INST(dev);
3219 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
3220 	if (softstate == NULL) {
3221 		AGPDB_PRINT2((CE_WARN, "agpgart_segmap: get soft state err"));
3222 		return (ENXIO);
3223 	}
3224 	if (!AGP_ALIGNED(len))
3225 		return (EINVAL);
3226 
3227 	mutex_enter(&softstate->asoft_instmutex);
3228 
3229 	/*
3230 	 * Process must have gart map privilege or gart access privilege
3231 	 * to map agp memory.
3232 	 */
3233 	if (secpolicy_gart_map(credp)) {
3234 		mutex_exit(&softstate->asoft_instmutex);
3235 		AGPDB_PRINT2((CE_WARN, "agpgart_segmap: permission denied"));
3236 		return (EPERM);
3237 	}
3238 
3239 	rc = devmap_setup(dev, (offset_t)off, asp, addrp,
3240 	    (size_t)len, prot, maxprot, flags, credp);
3241 
3242 	mutex_exit(&softstate->asoft_instmutex);
3243 	return (rc);
3244 }
3245 
3246 /*ARGSUSED*/
3247 static int
3248 agpgart_devmap(dev_t dev, devmap_cookie_t cookie, offset_t offset, size_t len,
3249     size_t *mappedlen, uint_t model)
3250 {
3251 	struct agpgart_softstate *softstate;
3252 	int instance, status;
3253 	struct keytable_ent *mementry;
3254 	offset_t local_offset;
3255 
3256 	instance = AGP_DEV2INST(dev);
3257 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
3258 	if (softstate == NULL) {
3259 		AGPDB_PRINT2((CE_WARN, "agpgart_devmap: get soft state err"));
3260 		return (ENXIO);
3261 	}
3262 
3263 
3264 	if (offset > MB2BYTES(softstate->asoft_info.agpki_apersize)) {
3265 		AGPDB_PRINT2((CE_WARN, "agpgart_devmap: offset is too large"));
3266 		return (EINVAL);
3267 	}
3268 
3269 	/*
3270 	 * Can not find any memory now, so fail.
3271 	 */
3272 
3273 	mementry = agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(offset));
3274 
3275 	if (mementry == NULL) {
3276 		AGPDB_PRINT2((CE_WARN,
3277 		    "agpgart_devmap: can not find the proper keyent"));
3278 		return (EINVAL);
3279 	}
3280 
3281 	local_offset = offset - AGP_PAGES2BYTES(mementry->kte_pgoff);
3282 
3283 	if (len > (AGP_PAGES2BYTES(mementry->kte_pages) - local_offset)) {
3284 		len = AGP_PAGES2BYTES(mementry->kte_pages) - local_offset;
3285 	}
3286 
3287 	switch (mementry->kte_type) {
3288 	case AGP_NORMAL:
3289 		status = devmap_pmem_setup(cookie, softstate->asoft_dip,
3290 		    &agp_devmap_cb,
3291 		    PMEMP(mementry->kte_memhdl)->pmem_cookie, local_offset,
3292 		    len, PROT_ALL, (DEVMAP_DEFAULTS|IOMEM_DATA_UC_WR_COMBINE),
3293 		    &mem_dev_acc_attr);
3294 		break;
3295 	default:
3296 		AGPDB_PRINT2((CE_WARN,
3297 		    "agpgart_devmap: not a valid memory type"));
3298 		return (EINVAL);
3299 	}
3300 
3301 
3302 	if (status == 0) {
3303 		*mappedlen = len;
3304 	} else {
3305 		*mappedlen = 0;
3306 		AGPDB_PRINT2((CE_WARN,
3307 		    "agpgart_devmap: devmap interface failed"));
3308 		return (EINVAL);
3309 	}
3310 
3311 	return (0);
3312 }
3313 
3314 static struct cb_ops	agpgart_cb_ops = {
3315 	agpgart_open,		/* open() */
3316 	agpgart_close,		/* close() */
3317 	nodev,			/* strategy() */
3318 	nodev,			/* print routine */
3319 	nodev,			/* no dump routine */
3320 	nodev,			/* read() */
3321 	nodev,			/* write() */
3322 	agpgart_ioctl,		/* agpgart_ioctl */
3323 	agpgart_devmap,		/* devmap routine */
3324 	nodev,			/* no longer use mmap routine */
3325 	agpgart_segmap,		/* system segmap routine */
3326 	nochpoll,		/* no chpoll routine */
3327 	ddi_prop_op,		/* system prop operations */
3328 	0,			/* not a STREAMS driver */
3329 	D_DEVMAP | D_MP,	/* safe for multi-thread/multi-processor */
3330 	CB_REV,			/* cb_ops version? */
3331 	nodev,			/* cb_aread() */
3332 	nodev,			/* cb_awrite() */
3333 };
3334 
3335 static struct dev_ops agpgart_ops = {
3336 	DEVO_REV,		/* devo_rev */
3337 	0,			/* devo_refcnt */
3338 	agpgart_getinfo,	/* devo_getinfo */
3339 	nulldev,		/* devo_identify */
3340 	nulldev,		/* devo_probe */
3341 	agpgart_attach,		/* devo_attach */
3342 	agpgart_detach,		/* devo_detach */
3343 	nodev,			/* devo_reset */
3344 	&agpgart_cb_ops,	/* devo_cb_ops */
3345 	(struct bus_ops *)0,	/* devo_bus_ops */
3346 	NULL,			/* devo_power */
3347 };
3348 
3349 static	struct modldrv modldrv = {
3350 	&mod_driverops,
3351 	"AGP driver v%I%",
3352 	&agpgart_ops,
3353 };
3354 
3355 static struct modlinkage modlinkage = {
3356 	MODREV_1,		/* MODREV_1 is indicated by manual */
3357 	{&modldrv, NULL, NULL, NULL}
3358 };
3359 
3360 static void *agpgart_glob_soft_handle;
3361 
3362 int
3363 _init(void)
3364 {
3365 	int ret = DDI_SUCCESS;
3366 
3367 	ret = ddi_soft_state_init(&agpgart_glob_soft_handle,
3368 	    sizeof (agpgart_softstate_t),
3369 	    AGPGART_MAX_INSTANCES);
3370 
3371 	if (ret != 0) {
3372 		AGPDB_PRINT2((CE_WARN,
3373 		    "_init: soft state init error code=0x%x", ret));
3374 		return (ret);
3375 	}
3376 
3377 	if ((ret = mod_install(&modlinkage)) != 0) {
3378 		AGPDB_PRINT2((CE_WARN,
3379 		    "_init: mod install error code=0x%x", ret));
3380 		ddi_soft_state_fini(&agpgart_glob_soft_handle);
3381 		return (ret);
3382 	}
3383 
3384 	return (DDI_SUCCESS);
3385 }
3386 
3387 int
3388 _info(struct modinfo *modinfop)
3389 {
3390 	return (mod_info(&modlinkage, modinfop));
3391 }
3392 
3393 int
3394 _fini(void)
3395 {
3396 	int ret;
3397 
3398 	if ((ret = mod_remove(&modlinkage)) == 0) {
3399 		ddi_soft_state_fini(&agpgart_glob_soft_handle);
3400 	}
3401 
3402 	return (ret);
3403 }
3404