xref: /titanic_41/usr/src/uts/intel/io/agpgart/agpgart.c (revision da2e3ebdc1edfbc5028edf1354e7dd2fa69a7968)
1 /*
2  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 /*
6  * Portions Philip Brown phil@bolthole.com Dec 2001
7  */
8 
9 #pragma ident	"%Z%%M%	%I%	%E% SMI"
10 
11 /*
12  * agpgart driver
13  *
14  * This driver is primary targeted at providing memory support for INTEL
15  * AGP device, INTEL memory less video card, and AMD64 cpu GART devices.
16  * So there are four main architectures, ARC_IGD810, ARC_IGD830, ARC_INTELAGP,
17  * ARC_AMD64AGP, ARC_AMD64NOAGP to agpgart driver. However, the memory
18  * interfaces are the same for these architectures. The difference is how to
19  * manage the hardware GART table for them.
20  *
21  * For large memory allocation, this driver use direct mapping to userland
22  * application interface to save kernel virtual memory .
23  */
24 
25 #include <sys/types.h>
26 #include <sys/pci.h>
27 #include <sys/systm.h>
28 #include <sys/conf.h>
29 #include <sys/file.h>
30 #include <sys/kstat.h>
31 #include <sys/stat.h>
32 #include <sys/modctl.h>
33 #include <sys/ddi.h>
34 #include <sys/sunddi.h>
35 #include <sys/sunldi.h>
36 #include <sys/policy.h>
37 #include <sys/ddidevmap.h>
38 #include <vm/seg_dev.h>
39 #include <sys/pmem.h>
40 #include <sys/agpgart.h>
41 #include <sys/agp/agpdefs.h>
42 #include <sys/agp/agpgart_impl.h>
43 #include <sys/agp/agpamd64gart_io.h>
44 #include <sys/agp/agpmaster_io.h>
45 #include <sys/agp/agptarget_io.h>
46 
47 /* Dynamic debug support */
48 int agp_debug_var = 0;
49 #define	AGPDB_PRINT1(fmt)	if (agp_debug_var == 1) cmn_err fmt
50 #define	AGPDB_PRINT2(fmt)	if (agp_debug_var >= 1) cmn_err fmt
51 
52 /* Driver global softstate handle */
53 static void *agpgart_glob_soft_handle;
54 
55 #define	MAX_INSTNUM			16
56 
57 #define	AGP_DEV2INST(devt)	(getminor((devt)) >> 4)
58 #define	AGP_INST2MINOR(instance)	((instance) << 4)
59 #define	IS_INTEL_830(type)	((type) == ARC_IGD830)
60 #define	IS_TRUE_AGP(type)	(((type) == ARC_INTELAGP) || \
61 	((type) == ARC_AMD64AGP))
62 
63 #define	agpinfo_default_to_32(v, v32)	\
64 	{				\
65 		(v32).agpi32_version = (v).agpi_version;	\
66 		(v32).agpi32_devid = (v).agpi_devid;	\
67 		(v32).agpi32_mode = (v).agpi_mode;	\
68 		(v32).agpi32_aperbase = (v).agpi_aperbase;	\
69 		(v32).agpi32_apersize = (v).agpi_apersize;	\
70 		(v32).agpi32_pgtotal = (v).agpi_pgtotal;	\
71 		(v32).agpi32_pgsystem = (v).agpi_pgsystem;	\
72 		(v32).agpi32_pgused = (v).agpi_pgused;	\
73 	}
74 
75 static ddi_dma_attr_t agpgart_dma_attr = {
76 	DMA_ATTR_V0,
77 	0U,				/* dma_attr_addr_lo */
78 	0xffffffffU,			/* dma_attr_addr_hi */
79 	0xffffffffU,			/* dma_attr_count_max */
80 	(uint64_t)AGP_PAGE_SIZE,	/* dma_attr_align */
81 	1,				/* dma_attr_burstsizes */
82 	1,				/* dma_attr_minxfer */
83 	0xffffffffU,			/* dma_attr_maxxfer */
84 	0xffffffffU,			/* dma_attr_seg */
85 	1,				/* dma_attr_sgllen, variable */
86 	4,				/* dma_attr_granular */
87 	0				/* dma_attr_flags */
88 };
89 
90 /*
91  * AMD64 supports gart table above 4G. See alloc_gart_table.
92  */
93 static ddi_dma_attr_t garttable_dma_attr = {
94 	DMA_ATTR_V0,
95 	0U,				/* dma_attr_addr_lo */
96 	0xffffffffU,			/* dma_attr_addr_hi */
97 	0xffffffffU,			/* dma_attr_count_max */
98 	(uint64_t)AGP_PAGE_SIZE,	/* dma_attr_align */
99 	1,				/* dma_attr_burstsizes */
100 	1,				/* dma_attr_minxfer */
101 	0xffffffffU,			/* dma_attr_maxxfer */
102 	0xffffffffU,			/* dma_attr_seg */
103 	1,				/* dma_attr_sgllen, variable */
104 	4,				/* dma_attr_granular */
105 	0				/* dma_attr_flags */
106 };
107 
108 /*
109  * AGPGART table need a physical contiguous memory. To assure that
110  * each access to gart table is strongly ordered and uncachable,
111  * we use DDI_STRICTORDER_ACC.
112  */
113 static ddi_device_acc_attr_t gart_dev_acc_attr = {
114 	DDI_DEVICE_ATTR_V0,
115 	DDI_NEVERSWAP_ACC,
116 	DDI_STRICTORDER_ACC	/* must be DDI_STRICTORDER_ACC */
117 };
118 
119 /*
120  * AGP memory is usually used as texture memory or for a framebuffer, so we
121  * can set the memory attribute to write combining. Video drivers will
122  * determine the frame buffer attributes, for example the memory is write
123  * combinging or non-cachable. However, the interface between Xorg and agpgart
124  * driver to support attribute selcetion doesn't exist yet. So we set agp memory
125  * to non-cachable by default now. This attribute might be overridden
126  * by MTTR in X86.
127  */
128 static ddi_device_acc_attr_t mem_dev_acc_attr = {
129 	DDI_DEVICE_ATTR_V0,
130 	DDI_NEVERSWAP_ACC,
131 	DDI_STRICTORDER_ACC	/* Can be DDI_MERGING_OK_ACC */
132 };
133 
134 static keytable_ent_t *
135 agp_find_bound_keyent(agpgart_softstate_t *softstate, uint32_t pg_offset);
136 static void
137 amd64_gart_unregister(amd64_garts_dev_t *cpu_garts);
138 
139 
140 static void
141 agp_devmap_unmap(devmap_cookie_t handle, void *devprivate,
142     offset_t off, size_t len, devmap_cookie_t new_handle1,
143     void **new_devprivate1, devmap_cookie_t new_handle2,
144     void **new_devprivate2)
145 {
146 
147 	struct keytable_ent *mementry;
148 	agpgart_softstate_t *softstate;
149 	agpgart_ctx_t *ctxp, *newctxp1, *newctxp2;
150 
151 	ASSERT(AGP_ALIGNED(len) && AGP_ALIGNED(off));
152 	ASSERT(devprivate);
153 	ASSERT(handle);
154 
155 	ctxp = (agpgart_ctx_t *)devprivate;
156 	softstate = ctxp->actx_sc;
157 	ASSERT(softstate);
158 
159 	if (new_handle1 != NULL) {
160 		newctxp1 = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
161 		newctxp1->actx_sc = softstate;
162 		newctxp1->actx_off = ctxp->actx_off;
163 		*new_devprivate1 = newctxp1;
164 	}
165 
166 	if (new_handle2 != NULL) {
167 		newctxp2 = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
168 		newctxp2->actx_sc = softstate;
169 		newctxp2->actx_off = off + len;
170 		*new_devprivate2 = newctxp2;
171 	}
172 
173 	mutex_enter(&softstate->asoft_instmutex);
174 	if ((new_handle1 == NULL) && (new_handle2 == NULL)) {
175 		mementry =
176 		    agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(off));
177 		ASSERT(mementry);
178 		mementry->kte_refcnt--;
179 	} else if ((new_handle1 != NULL) && (new_handle2 != NULL)) {
180 		mementry =
181 		    agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(off));
182 		ASSERT(mementry);
183 		mementry->kte_refcnt++;
184 	}
185 	ASSERT(mementry->kte_refcnt >= 0);
186 	mutex_exit(&softstate->asoft_instmutex);
187 	kmem_free(ctxp, sizeof (struct agpgart_ctx));
188 }
189 
190 /*ARGSUSED*/
191 static int
192 agp_devmap_map(devmap_cookie_t handle, dev_t dev,
193     uint_t flags, offset_t offset, size_t len, void **new_devprivate)
194 {
195 	agpgart_softstate_t *softstate;
196 	int instance;
197 	struct keytable_ent *mementry;
198 	agpgart_ctx_t *newctxp;
199 
200 	ASSERT(handle);
201 	instance = AGP_DEV2INST(dev);
202 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
203 	if (softstate == NULL) {
204 		AGPDB_PRINT2((CE_WARN, "agp_devmap_map: get soft state err"));
205 		return (ENXIO);
206 	}
207 
208 	ASSERT(softstate);
209 	ASSERT(mutex_owned(&softstate->asoft_instmutex));
210 	ASSERT(len);
211 	ASSERT(AGP_ALIGNED(offset) && AGP_ALIGNED(len));
212 
213 	mementry =
214 	    agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(offset));
215 	ASSERT(mementry);
216 	mementry->kte_refcnt++;
217 	ASSERT(mementry->kte_refcnt >= 0);
218 	newctxp = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
219 	newctxp->actx_off = offset;
220 	newctxp->actx_sc = softstate;
221 	*new_devprivate = newctxp;
222 
223 	return (0);
224 }
225 
226 /*ARGSUSED*/
227 static int agp_devmap_dup(devmap_cookie_t handle, void *devprivate,
228     devmap_cookie_t new_handle, void **new_devprivate)
229 {
230 	struct keytable_ent *mementry;
231 	agpgart_ctx_t *newctxp, *ctxp;
232 	agpgart_softstate_t *softstate;
233 
234 	ASSERT(devprivate);
235 	ASSERT(handle && new_handle);
236 
237 	ctxp = (agpgart_ctx_t *)devprivate;
238 	ASSERT(AGP_ALIGNED(ctxp->actx_off));
239 
240 	newctxp = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
241 	newctxp->actx_off = ctxp->actx_off;
242 	newctxp->actx_sc = ctxp->actx_sc;
243 	softstate = (agpgart_softstate_t *)newctxp->actx_sc;
244 
245 	mutex_enter(&softstate->asoft_instmutex);
246 	mementry = agp_find_bound_keyent(softstate,
247 	    AGP_BYTES2PAGES(newctxp->actx_off));
248 	mementry->kte_refcnt++;
249 	ASSERT(mementry->kte_refcnt >= 0);
250 	mutex_exit(&softstate->asoft_instmutex);
251 	*new_devprivate = newctxp;
252 
253 	return (0);
254 }
255 
256 struct devmap_callback_ctl agp_devmap_cb = {
257 	DEVMAP_OPS_REV,		/* rev */
258 	agp_devmap_map,		/* map */
259 	NULL,			/* access */
260 	agp_devmap_dup,		/* dup */
261 	agp_devmap_unmap,	/* unmap */
262 };
263 
264 /*
265  * agp_master_regis_byname()
266  *
267  * Description:
268  * 	Open the AGP master device node by device path name and
269  * 	register the device handle for later operations.
270  * 	We check all possible driver instance from 0
271  * 	to MAX_INSTNUM because the master device could be
272  * 	at any instance number. Only one AGP master is supported.
273  *
274  * Arguments:
275  * 	master_hdlp		AGP master device LDI handle pointer
276  *	agpgart_l		AGPGART driver LDI identifier
277  *
278  * Returns:
279  * 	-1			failed
280  * 	0			success
281  */
282 static int
283 agp_master_regis_byname(ldi_handle_t *master_hdlp, ldi_ident_t agpgart_li)
284 {
285 	int	i;
286 	char	buf[MAXPATHLEN];
287 
288 	ASSERT(master_hdlp);
289 	ASSERT(agpgart_li);
290 
291 	/*
292 	 * Search all possible instance numbers for the agp master device.
293 	 * Only one master device is supported now, so the search ends
294 	 * when one master device is found.
295 	 */
296 	for (i = 0; i < MAX_INSTNUM; i++) {
297 		(void) snprintf(buf, MAXPATHLEN, "%s%d", AGPMASTER_DEVLINK, i);
298 		if ((ldi_open_by_name(buf, 0, kcred,
299 		    master_hdlp, agpgart_li)))
300 			continue;
301 		AGPDB_PRINT1((CE_NOTE,
302 		    "master device found: instance number=%d", i));
303 		break;
304 
305 	}
306 
307 	/* AGP master device not found */
308 	if (i == MAX_INSTNUM)
309 		return (-1);
310 
311 	return (0);
312 }
313 
314 /*
315  * agp_target_regis_byname()
316  *
317  * Description:
318  * 	This function opens agp bridge device node by
319  * 	device path name and registers the device handle
320  * 	for later operations.
321  * 	We check driver instance from 0 to MAX_INSTNUM
322  * 	because the master device could be at any instance
323  * 	number. Only one agp target is supported.
324  *
325  *
326  * Arguments:
327  *	target_hdlp		AGP target device LDI handle pointer
328  *	agpgart_l		AGPGART driver LDI identifier
329  *
330  * Returns:
331  * 	-1			failed
332  * 	0			success
333  */
334 static int
335 agp_target_regis_byname(ldi_handle_t *target_hdlp, ldi_ident_t agpgart_li)
336 {
337 	int	i;
338 	char	buf[MAXPATHLEN];
339 
340 	ASSERT(target_hdlp);
341 	ASSERT(agpgart_li);
342 
343 	for (i = 0; i < MAX_INSTNUM; i++) {
344 		(void) snprintf(buf, MAXPATHLEN, "%s%d", AGPTARGET_DEVLINK, i);
345 		if ((ldi_open_by_name(buf, 0, kcred,
346 		    target_hdlp, agpgart_li)))
347 			continue;
348 
349 		AGPDB_PRINT1((CE_NOTE,
350 		    "bridge device found: instance number=%d", i));
351 		break;
352 
353 	}
354 
355 	/* AGP bridge device not found */
356 	if (i == MAX_INSTNUM) {
357 		AGPDB_PRINT2((CE_WARN, "bridge device not found"));
358 		return (-1);
359 	}
360 
361 	return (0);
362 }
363 
364 /*
365  * amd64_gart_regis_byname()
366  *
367  * Description:
368  * 	Open all amd64 gart device nodes by deice path name and
369  * 	register the device handles for later operations. Each cpu
370  * 	has its own amd64 gart device.
371  *
372  * Arguments:
373  * 	cpu_garts		cpu garts device list header
374  *	agpgart_l		AGPGART driver LDI identifier
375  *
376  * Returns:
377  * 	-1			failed
378  * 	0			success
379  */
380 static int
381 amd64_gart_regis_byname(amd64_garts_dev_t *cpu_garts, ldi_ident_t agpgart_li)
382 {
383 	amd64_gart_dev_list_t	*gart_list;
384 	int			i;
385 	char			buf[MAXPATHLEN];
386 	ldi_handle_t		gart_hdl;
387 	int			ret;
388 
389 	ASSERT(cpu_garts);
390 	ASSERT(agpgart_li);
391 
392 	/*
393 	 * Search all possible instance numbers for the gart devices.
394 	 * There can be multiple on-cpu gart devices for Opteron server.
395 	 */
396 	for (i = 0; i < MAX_INSTNUM; i++) {
397 		(void) snprintf(buf, MAXPATHLEN, "%s%d", CPUGART_DEVLINK, i);
398 		ret = ldi_open_by_name(buf, 0, kcred,
399 		    &gart_hdl, agpgart_li);
400 
401 		if (ret == ENODEV)
402 			continue;
403 		else if (ret != 0) { /* There was an error opening the device */
404 			amd64_gart_unregister(cpu_garts);
405 			return (ret);
406 		}
407 
408 		AGPDB_PRINT1((CE_NOTE,
409 		    "amd64 gart device found: instance number=%d", i));
410 
411 		gart_list = (amd64_gart_dev_list_t *)
412 		    kmem_zalloc(sizeof (amd64_gart_dev_list_t), KM_SLEEP);
413 
414 		/* Add new item to the head of the gart device list */
415 		gart_list->gart_devhdl = gart_hdl;
416 		gart_list->next = cpu_garts->gart_dev_list_head;
417 		cpu_garts->gart_dev_list_head = gart_list;
418 		cpu_garts->gart_device_num++;
419 	}
420 
421 	if (cpu_garts->gart_device_num == 0)
422 		return (ENODEV);
423 	return (0);
424 }
425 
426 /*
427  * Unregister agp master device handle
428  */
429 static void
430 agp_master_unregister(ldi_handle_t *master_hdlp)
431 {
432 	ASSERT(master_hdlp);
433 
434 	if (master_hdlp) {
435 		(void) ldi_close(*master_hdlp, 0, kcred);
436 		*master_hdlp = NULL;
437 	}
438 }
439 
440 /*
441  * Unregister agp bridge device handle
442  */
443 static void
444 agp_target_unregister(ldi_handle_t *target_hdlp)
445 {
446 	if (target_hdlp) {
447 		(void) ldi_close(*target_hdlp, 0, kcred);
448 		*target_hdlp = NULL;
449 	}
450 }
451 
452 /*
453  * Unregister all amd64 gart device handles
454  */
455 static void
456 amd64_gart_unregister(amd64_garts_dev_t *cpu_garts)
457 {
458 	amd64_gart_dev_list_t	*gart_list;
459 	amd64_gart_dev_list_t	*next;
460 
461 	ASSERT(cpu_garts);
462 
463 	for (gart_list = cpu_garts->gart_dev_list_head;
464 	    gart_list; gart_list = next) {
465 
466 		ASSERT(gart_list->gart_devhdl);
467 		(void) ldi_close(gart_list->gart_devhdl, 0, kcred);
468 		next = gart_list->next;
469 		/* Free allocated memory */
470 		kmem_free(gart_list, sizeof (amd64_gart_dev_list_t));
471 	}
472 	cpu_garts->gart_dev_list_head = NULL;
473 	cpu_garts->gart_device_num = 0;
474 }
475 
476 /*
477  * lyr_detect_master_type()
478  *
479  * Description:
480  * 	This function gets agp master type by querying agp master device.
481  *
482  * Arguments:
483  * 	master_hdlp		agp master device ldi handle pointer
484  *
485  * Returns:
486  * 	-1			unsupported device
487  * 	DEVICE_IS_I810		i810 series
488  * 	DEVICE_IS_I810		i830 series
489  * 	DEVICE_IS_AGP		true agp master
490  */
491 static int
492 lyr_detect_master_type(ldi_handle_t *master_hdlp)
493 {
494 	int vtype;
495 	int err;
496 
497 	ASSERT(master_hdlp);
498 
499 	/* ldi_ioctl(agpmaster) */
500 	err = ldi_ioctl(*master_hdlp, DEVICE_DETECT,
501 	    (intptr_t)&vtype, FKIOCTL, kcred, 0);
502 	if (err) /* Unsupported graphics device */
503 		return (-1);
504 	return (vtype);
505 }
506 
507 /*
508  * devtect_target_type()
509  *
510  * Description:
511  * 	This function gets the host bridge chipset type by querying the agp
512  *	target device.
513  *
514  * Arguments:
515  * 	target_hdlp		agp target device LDI handle pointer
516  *
517  * Returns:
518  * 	CHIP_IS_INTEL		Intel agp chipsets
519  * 	CHIP_IS_AMD		AMD agp chipset
520  * 	-1			unsupported chipset
521  */
522 static int
523 lyr_detect_target_type(ldi_handle_t *target_hdlp)
524 {
525 	int btype;
526 	int err;
527 
528 	ASSERT(target_hdlp);
529 
530 	err = ldi_ioctl(*target_hdlp, CHIP_DETECT, (intptr_t)&btype,
531 	    FKIOCTL, kcred, 0);
532 	if (err)	/* Unsupported bridge device */
533 		return (-1);
534 	return (btype);
535 }
536 
537 /*
538  * lyr_init()
539  *
540  * Description:
541  * 	This function detects the  graphics system architecture and
542  * 	registers all relative device handles in a global structure
543  * 	"agp_regdev". Then it stores the system arc type in driver
544  * 	soft state.
545  *
546  * Arguments:
547  *	agp_regdev		AGP devices registration struct pointer
548  *	agpgart_l		AGPGART driver LDI identifier
549  *
550  * Returns:
551  * 	0	System arc supported and agp devices registration successed.
552  * 	-1	System arc not supported or device registration failed.
553  */
554 int
555 lyr_init(agp_registered_dev_t *agp_regdev, ldi_ident_t agpgart_li)
556 {
557 	ldi_handle_t *master_hdlp;
558 	ldi_handle_t *target_hdlp;
559 	amd64_garts_dev_t *garts_dev;
560 	int card_type, chip_type;
561 	int ret;
562 
563 	ASSERT(agp_regdev);
564 
565 	bzero(agp_regdev, sizeof (agp_registered_dev_t));
566 	agp_regdev->agprd_arctype = ARC_UNKNOWN;
567 	/*
568 	 * Register agp devices, assuming all instances attached, and
569 	 * detect which agp architucture this server belongs to. This
570 	 * must be done before the agpgart driver starts to use layered
571 	 * driver interfaces.
572 	 */
573 	master_hdlp = &agp_regdev->agprd_masterhdl;
574 	target_hdlp = &agp_regdev->agprd_targethdl;
575 	garts_dev = &agp_regdev->agprd_cpugarts;
576 
577 	/* Check whether the system is amd64 arc */
578 	if ((ret = amd64_gart_regis_byname(garts_dev, agpgart_li)) == ENODEV) {
579 		/* No amd64 gart devices */
580 		AGPDB_PRINT1((CE_NOTE,
581 		    "lyr_init: this is not an amd64 system"));
582 		if (agp_master_regis_byname(master_hdlp, agpgart_li)) {
583 			AGPDB_PRINT2((CE_WARN,
584 			    "lyr_init: register master device unsuccessful"));
585 			goto err1;
586 		}
587 		if (agp_target_regis_byname(target_hdlp, agpgart_li)) {
588 			AGPDB_PRINT2((CE_WARN,
589 			    "lyr_init: register target device unsuccessful"));
590 			goto err2;
591 		}
592 		card_type = lyr_detect_master_type(master_hdlp);
593 		/*
594 		 * Detect system arc by master device. If it is a intel
595 		 * integrated device, finish the detection successfully.
596 		 */
597 		switch (card_type) {
598 		case DEVICE_IS_I810:	/* I810 likewise graphics */
599 			AGPDB_PRINT1((CE_NOTE,
600 			    "lyr_init: the system is Intel 810 arch"));
601 			agp_regdev->agprd_arctype = ARC_IGD810;
602 			return (0);
603 		case DEVICE_IS_I830:	/* I830 likewise graphics */
604 			AGPDB_PRINT1((CE_NOTE,
605 			    "lyr_init: the system is Intel 830 arch"));
606 			agp_regdev->agprd_arctype = ARC_IGD830;
607 			return (0);
608 		case DEVICE_IS_AGP:	/* AGP graphics */
609 			break;
610 		default:		/* Non IGD/AGP graphics */
611 			AGPDB_PRINT2((CE_WARN,
612 			    "lyr_init: non-supported master device"));
613 			goto err3;
614 		}
615 
616 		chip_type = lyr_detect_target_type(target_hdlp);
617 
618 		/* Continue to detect AGP arc by target device */
619 		switch (chip_type) {
620 		case CHIP_IS_INTEL:	/* Intel chipset */
621 			AGPDB_PRINT1((CE_NOTE,
622 			    "lyr_init: Intel AGP arch detected"));
623 			agp_regdev->agprd_arctype = ARC_INTELAGP;
624 			return (0);
625 		case CHIP_IS_AMD:	/* AMD chipset */
626 			AGPDB_PRINT2((CE_WARN,
627 			    "lyr_init: no cpu gart, but have AMD64 chipsets"));
628 			goto err3;
629 		default:		/* Non supported chipset */
630 			AGPDB_PRINT2((CE_WARN,
631 			    "lyr_init: detection can not continue"));
632 			goto err3;
633 		}
634 
635 	}
636 
637 	if (ret)
638 		return (-1); /* Errors in open amd64 cpu gart devices */
639 
640 	/*
641 	 * AMD64 cpu gart device exsits, continue detection
642 	 */
643 
644 	if (agp_master_regis_byname(master_hdlp, agpgart_li)) {
645 		AGPDB_PRINT1((CE_NOTE,
646 		    "lyr_init: register master device unsuccessful"));
647 
648 		agp_regdev->agprd_arctype = ARC_AMD64NOAGP;
649 		AGPDB_PRINT1((CE_NOTE,
650 		    "lyr_init: no AGP master, but supports IOMMU in amd64"));
651 		return (0); /* Finished successfully */
652 	}
653 
654 	if (agp_target_regis_byname(target_hdlp, agpgart_li)) {
655 		AGPDB_PRINT1((CE_NOTE,
656 		    "lyr_init: register target device unsuccessful"));
657 
658 		agp_regdev->agprd_arctype = ARC_AMD64NOAGP;
659 
660 		AGPDB_PRINT1((CE_NOTE,
661 		    "lyr_init: no AGP bridge, but supports IOMMU in amd64"));
662 
663 		agp_master_unregister(&agp_regdev->agprd_masterhdl);
664 		return (0); /* Finished successfully */
665 
666 	}
667 
668 
669 	AGPDB_PRINT1((CE_NOTE,
670 	    "lyr_init: the system is AMD64 AGP architecture"));
671 
672 	agp_regdev->agprd_arctype = ARC_AMD64AGP;
673 
674 	return (0); /* Finished successfully */
675 
676 err3:
677 	agp_target_unregister(&agp_regdev->agprd_targethdl);
678 err2:
679 	agp_master_unregister(&agp_regdev->agprd_masterhdl);
680 err1:
681 	agp_regdev->agprd_arctype = ARC_UNKNOWN;
682 	return (-1);
683 }
684 
685 void
686 lyr_end(agp_registered_dev_t *agp_regdev)
687 {
688 	ASSERT(agp_regdev);
689 
690 	switch (agp_regdev->agprd_arctype) {
691 	case ARC_IGD810:
692 	case ARC_IGD830:
693 	case ARC_INTELAGP:
694 		agp_master_unregister(&agp_regdev->agprd_masterhdl);
695 		agp_target_unregister(&agp_regdev->agprd_targethdl);
696 
697 		return;
698 	case ARC_AMD64AGP:
699 		agp_master_unregister(&agp_regdev->agprd_masterhdl);
700 		agp_target_unregister(&agp_regdev->agprd_targethdl);
701 		amd64_gart_unregister(&agp_regdev->agprd_cpugarts);
702 
703 		return;
704 	case ARC_AMD64NOAGP:
705 		amd64_gart_unregister(&agp_regdev->agprd_cpugarts);
706 
707 		return;
708 	default:
709 		ASSERT(0);
710 		return;
711 	}
712 }
713 
714 int
715 lyr_get_info(agp_kern_info_t *info, agp_registered_dev_t *agp_regdev)
716 {
717 	ldi_handle_t hdl;
718 	igd_info_t value1;
719 	i_agp_info_t value2;
720 	amdgart_info_t value3;
721 	size_t prealloc_size;
722 	int err;
723 	amd64_gart_dev_list_t	*gart_head;
724 
725 	ASSERT(info);
726 	ASSERT(agp_regdev);
727 
728 	switch (agp_regdev->agprd_arctype) {
729 	case ARC_IGD810:
730 		hdl = agp_regdev->agprd_masterhdl;
731 		err = ldi_ioctl(hdl, I8XX_GET_INFO, (intptr_t)&value1,
732 		    FKIOCTL, kcred, 0);
733 		if (err)
734 			return (-1);
735 		info->agpki_mdevid = value1.igd_devid;
736 		info->agpki_aperbase = value1.igd_aperbase;
737 		info->agpki_apersize = value1.igd_apersize;
738 
739 		hdl = agp_regdev->agprd_targethdl;
740 		err = ldi_ioctl(hdl, I8XX_GET_PREALLOC_SIZE,
741 		    (intptr_t)&prealloc_size, FKIOCTL, kcred, 0);
742 		if (err)
743 			return (-1);
744 		info->agpki_presize = prealloc_size;
745 
746 		break;
747 
748 	case ARC_IGD830:
749 		hdl = agp_regdev->agprd_masterhdl;
750 		err = ldi_ioctl(hdl, I8XX_GET_INFO, (intptr_t)&value1,
751 		    FKIOCTL, kcred, 0);
752 		if (err)
753 			return (-1);
754 		info->agpki_mdevid = value1.igd_devid;
755 		info->agpki_aperbase = value1.igd_aperbase;
756 		info->agpki_apersize = value1.igd_apersize;
757 
758 		hdl = agp_regdev->agprd_targethdl;
759 		err = ldi_ioctl(hdl, I8XX_GET_PREALLOC_SIZE,
760 		    (intptr_t)&prealloc_size, FKIOCTL, kcred, 0);
761 		if (err)
762 			return (-1);
763 
764 		/*
765 		 * Assume all units are kilobytes unless explicitly
766 		 * stated below:
767 		 * preallocated GTT memory = preallocated memory - GTT size
768 		 * 	- scratch page size
769 		 *
770 		 * scratch page size = 4
771 		 * GTT size = aperture size (in MBs)
772 		 * this algorithm came from Xorg source code
773 		 */
774 		prealloc_size = prealloc_size - info->agpki_apersize - 4;
775 		info->agpki_presize = prealloc_size;
776 		AGPDB_PRINT2((CE_NOTE,
777 		    "lyr_get_info: prealloc_size = %ldKB, apersize = %dMB",
778 		    prealloc_size, info->agpki_apersize));
779 		break;
780 	case ARC_INTELAGP:
781 	case ARC_AMD64AGP:
782 		/* AGP devices */
783 		hdl = agp_regdev->agprd_masterhdl;
784 		err = ldi_ioctl(hdl, AGP_MASTER_GETINFO,
785 		    (intptr_t)&value2, FKIOCTL, kcred, 0);
786 		if (err)
787 			return (-1);
788 		info->agpki_mdevid = value2.iagp_devid;
789 		info->agpki_mver = value2.iagp_ver;
790 		info->agpki_mstatus = value2.iagp_mode;
791 		hdl = agp_regdev->agprd_targethdl;
792 		err = ldi_ioctl(hdl, AGP_TARGET_GETINFO,
793 		    (intptr_t)&value2, FKIOCTL, kcred, 0);
794 		if (err)
795 			return (-1);
796 		info->agpki_tdevid = value2.iagp_devid;
797 		info->agpki_tver = value2.iagp_ver;
798 		info->agpki_tstatus = value2.iagp_mode;
799 		info->agpki_aperbase = value2.iagp_aperbase;
800 		info->agpki_apersize = value2.iagp_apersize;
801 		break;
802 	case ARC_AMD64NOAGP:
803 		/* Meaningful for IOMMU support only */
804 		gart_head = agp_regdev->agprd_cpugarts.gart_dev_list_head;
805 		err = ldi_ioctl(gart_head->gart_devhdl, AMD64_GET_INFO,
806 		    (intptr_t)&value3, FKIOCTL, kcred, 0);
807 		if (err)
808 			return (-1);
809 		info->agpki_aperbase = value3.cgart_aperbase;
810 		info->agpki_apersize = value3.cgart_apersize;
811 		break;
812 	default:
813 		AGPDB_PRINT2((CE_WARN,
814 		    "lyr_get_info: function doesn't work for unknown arc"));
815 		return (-1);
816 	}
817 	if ((info->agpki_apersize >= MAXAPERMEGAS) ||
818 	    (info->agpki_apersize == 0) ||
819 	    (info->agpki_aperbase == 0)) {
820 		AGPDB_PRINT2((CE_WARN,
821 		    "lyr_get_info: aperture is not programmed correctly!"));
822 		return (-1);
823 	}
824 
825 	return (0);
826 }
827 
828 /*
829  * lyr_i8xx_add_to_gtt()
830  *
831  * Description:
832  * 	This function sets up the integrated video device gtt table
833  * 	via an ioclt to the AGP master driver.
834  *
835  * Arguments:
836  * 	pg_offset	The start entry to be setup
837  * 	keyent		Keytable entity pointer
838  *	agp_regdev	AGP devices registration struct pointer
839  *
840  * Returns:
841  * 	0		success
842  * 	-1		invalid operations
843  */
844 int
845 lyr_i8xx_add_to_gtt(uint32_t pg_offset, keytable_ent_t *keyent,
846     agp_registered_dev_t *agp_regdev)
847 {
848 	int err = 0;
849 	int rval;
850 	ldi_handle_t hdl;
851 	igd_gtt_seg_t gttseg;
852 	uint32_t *addrp, i;
853 	uint32_t npages;
854 
855 	ASSERT(keyent);
856 	ASSERT(agp_regdev);
857 	gttseg.igs_pgstart =  pg_offset;
858 	npages = keyent->kte_pages;
859 	gttseg.igs_npage = npages;
860 	gttseg.igs_type = keyent->kte_type;
861 	gttseg.igs_phyaddr = (uint32_t *)kmem_zalloc
862 	    (sizeof (uint32_t) * gttseg.igs_npage, KM_SLEEP);
863 
864 	addrp = gttseg.igs_phyaddr;
865 	for (i = 0; i < npages; i++, addrp++) {
866 		*addrp =
867 		    (uint32_t)((keyent->kte_pfnarray[i]) << GTT_PAGE_SHIFT);
868 	}
869 
870 	hdl = agp_regdev->agprd_masterhdl;
871 	if (ldi_ioctl(hdl, I8XX_ADD2GTT, (intptr_t)&gttseg, FKIOCTL,
872 	    kcred, &rval)) {
873 		AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: ldi_ioctl error"));
874 		AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: pg_start=0x%x",
875 		    gttseg.igs_pgstart));
876 		AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: pages=0x%x",
877 		    gttseg.igs_npage));
878 		AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: type=0x%x",
879 		    gttseg.igs_type));
880 		err = -1;
881 	}
882 	kmem_free(gttseg.igs_phyaddr, sizeof (uint32_t) * gttseg.igs_npage);
883 	return (err);
884 }
885 
886 /*
887  * lyr_i8xx_remove_from_gtt()
888  *
889  * Description:
890  * 	This function clears the integrated video device gtt table via
891  * 	an ioctl to the agp master device.
892  *
893  * Arguments:
894  * 	pg_offset	The starting entry to be cleared
895  * 	npage		The number of entries to be cleared
896  *	agp_regdev	AGP devices struct pointer
897  *
898  * Returns:
899  * 	0		success
900  * 	-1		invalid operations
901  */
902 int
903 lyr_i8xx_remove_from_gtt(uint32_t pg_offset, uint32_t npage,
904     agp_registered_dev_t *agp_regdev)
905 {
906 	int			rval;
907 	ldi_handle_t		hdl;
908 	igd_gtt_seg_t		gttseg;
909 
910 	gttseg.igs_pgstart =  pg_offset;
911 	gttseg.igs_npage = npage;
912 
913 	hdl = agp_regdev->agprd_masterhdl;
914 	if (ldi_ioctl(hdl, I8XX_REM_GTT, (intptr_t)&gttseg, FKIOCTL,
915 	    kcred, &rval))
916 		return (-1);
917 
918 	return (0);
919 }
920 
921 /*
922  * lyr_set_gart_addr()
923  *
924  * Description:
925  *	This function puts the gart table physical address in the
926  * 	gart base register.
927  *	Please refer to gart and gtt table base register format for
928  *	gart base register format in agpdefs.h.
929  *
930  * Arguments:
931  * 	phy_base	The base physical address of gart table
932  *	agp_regdev	AGP devices registration struct pointer
933  *
934  * Returns:
935  * 	0		success
936  * 	-1		failed
937  *
938  */
939 
940 int
941 lyr_set_gart_addr(uint64_t phy_base, agp_registered_dev_t *agp_regdev)
942 {
943 	amd64_gart_dev_list_t	*gart_list;
944 	ldi_handle_t		hdl;
945 	int			err = 0;
946 
947 	ASSERT(agp_regdev);
948 	switch (agp_regdev->agprd_arctype) {
949 	case ARC_IGD810:
950 	{
951 		uint32_t base;
952 
953 		ASSERT((phy_base & ~I810_POINTER_MASK) == 0);
954 		base = (uint32_t)phy_base;
955 
956 		hdl = agp_regdev->agprd_masterhdl;
957 		err = ldi_ioctl(hdl, I810_SET_GTT_BASE,
958 		    (intptr_t)&base, FKIOCTL, kcred, 0);
959 		break;
960 	}
961 	case ARC_INTELAGP:
962 	{
963 		uint32_t addr;
964 		addr = (uint32_t)phy_base;
965 
966 		ASSERT((phy_base & ~GTT_POINTER_MASK) == 0);
967 		hdl = agp_regdev->agprd_targethdl;
968 		err = ldi_ioctl(hdl, AGP_TARGET_SET_GATTADDR,
969 		    (intptr_t)&addr, FKIOCTL, kcred, 0);
970 		break;
971 	}
972 	case ARC_AMD64NOAGP:
973 	case ARC_AMD64AGP:
974 	{
975 		uint32_t addr;
976 
977 		ASSERT((phy_base & ~AMD64_POINTER_MASK) == 0);
978 		addr = (uint32_t)((phy_base >> AMD64_GARTBASE_SHIFT)
979 		    & AMD64_GARTBASE_MASK);
980 
981 		for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
982 		    gart_list;
983 		    gart_list = gart_list->next) {
984 			hdl = gart_list->gart_devhdl;
985 			if (ldi_ioctl(hdl, AMD64_SET_GART_ADDR,
986 			    (intptr_t)&addr, FKIOCTL, kcred, 0)) {
987 				err = -1;
988 				break;
989 			}
990 		}
991 		break;
992 	}
993 	default:
994 		err = -1;
995 	}
996 
997 	if (err)
998 		return (-1);
999 
1000 	return (0);
1001 }
1002 
1003 int
1004 lyr_set_agp_cmd(uint32_t cmd, agp_registered_dev_t *agp_regdev)
1005 {
1006 	ldi_handle_t hdl;
1007 	uint32_t command;
1008 
1009 	ASSERT(agp_regdev);
1010 	command = cmd;
1011 	hdl = agp_regdev->agprd_targethdl;
1012 	if (ldi_ioctl(hdl, AGP_TARGET_SETCMD,
1013 	    (intptr_t)&command, FKIOCTL, kcred, 0))
1014 		return (-1);
1015 	hdl = agp_regdev->agprd_masterhdl;
1016 	if (ldi_ioctl(hdl, AGP_MASTER_SETCMD,
1017 	    (intptr_t)&command, FKIOCTL, kcred, 0))
1018 		return (-1);
1019 
1020 	return (0);
1021 }
1022 
1023 int
1024 lyr_config_devices(agp_registered_dev_t *agp_regdev)
1025 {
1026 	amd64_gart_dev_list_t	*gart_list;
1027 	ldi_handle_t		hdl;
1028 	int			rc = 0;
1029 
1030 	ASSERT(agp_regdev);
1031 	switch (agp_regdev->agprd_arctype) {
1032 	case ARC_IGD830:
1033 	case ARC_IGD810:
1034 		break;
1035 	case ARC_INTELAGP:
1036 	{
1037 		hdl = agp_regdev->agprd_targethdl;
1038 		rc = ldi_ioctl(hdl, AGP_TARGET_CONFIGURE,
1039 		    0, FKIOCTL, kcred, 0);
1040 		break;
1041 	}
1042 	case ARC_AMD64NOAGP:
1043 	case ARC_AMD64AGP:
1044 	{
1045 		/*
1046 		 * BIOS always shadow registers such like Aperture Base
1047 		 * register, Aperture Size Register from the AGP bridge
1048 		 * to the AMD64 CPU host bridge. If future BIOSes are broken
1049 		 * in this regard, we may need to shadow these registers
1050 		 * in driver.
1051 		 */
1052 
1053 		for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1054 		    gart_list;
1055 		    gart_list = gart_list->next) {
1056 			hdl = gart_list->gart_devhdl;
1057 			if (ldi_ioctl(hdl, AMD64_CONFIGURE,
1058 			    0, FKIOCTL, kcred, 0)) {
1059 				rc = -1;
1060 				break;
1061 			}
1062 		}
1063 		break;
1064 	}
1065 	default:
1066 		rc = -1;
1067 	}
1068 
1069 	if (rc)
1070 		return (-1);
1071 
1072 	return (0);
1073 }
1074 
1075 int
1076 lyr_unconfig_devices(agp_registered_dev_t *agp_regdev)
1077 {
1078 	amd64_gart_dev_list_t	*gart_list;
1079 	ldi_handle_t		hdl;
1080 	int			rc = 0;
1081 
1082 	ASSERT(agp_regdev);
1083 	switch (agp_regdev->agprd_arctype) {
1084 	case ARC_IGD830:
1085 	case ARC_IGD810:
1086 	{
1087 		hdl = agp_regdev->agprd_masterhdl;
1088 		rc = ldi_ioctl(hdl, I8XX_UNCONFIG, 0, FKIOCTL, kcred, 0);
1089 		break;
1090 	}
1091 	case ARC_INTELAGP:
1092 	{
1093 		hdl = agp_regdev->agprd_targethdl;
1094 		rc = ldi_ioctl(hdl, AGP_TARGET_UNCONFIG,
1095 		    0, FKIOCTL, kcred, 0);
1096 		break;
1097 	}
1098 	case ARC_AMD64NOAGP:
1099 	case ARC_AMD64AGP:
1100 	{
1101 		for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1102 		    gart_list; gart_list = gart_list->next) {
1103 			hdl = gart_list->gart_devhdl;
1104 			if (ldi_ioctl(hdl, AMD64_UNCONFIG,
1105 			    0, FKIOCTL, kcred, 0)) {
1106 				rc = -1;
1107 				break;
1108 			}
1109 		}
1110 		break;
1111 	}
1112 	default:
1113 		rc = -1;
1114 	}
1115 
1116 	if (rc)
1117 		return (-1);
1118 
1119 	return (0);
1120 }
1121 
1122 /*
1123  * lyr_flush_gart_cache()
1124  *
1125  * Description:
1126  * 	This function flushes the GART translation look-aside buffer. All
1127  * 	GART translation caches will be flushed after this operation.
1128  *
1129  * Arguments:
1130  *	agp_regdev	AGP devices struct pointer
1131  */
1132 void
1133 lyr_flush_gart_cache(agp_registered_dev_t *agp_regdev)
1134 {
1135 	amd64_gart_dev_list_t	*gart_list;
1136 	ldi_handle_t		hdl;
1137 
1138 	ASSERT(agp_regdev);
1139 	if ((agp_regdev->agprd_arctype == ARC_AMD64AGP) ||
1140 	    (agp_regdev->agprd_arctype == ARC_AMD64NOAGP)) {
1141 		for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1142 		    gart_list; gart_list = gart_list->next) {
1143 			hdl = gart_list->gart_devhdl;
1144 			(void) ldi_ioctl(hdl, AMD64_FLUSH_GTLB,
1145 			    0, FKIOCTL, kcred, 0);
1146 		}
1147 	} else if (agp_regdev->agprd_arctype == ARC_INTELAGP) {
1148 		hdl = agp_regdev->agprd_targethdl;
1149 		(void) ldi_ioctl(hdl, AGP_TARGET_FLUSH_GTLB, 0,
1150 		    FKIOCTL, kcred, 0);
1151 	}
1152 }
1153 
1154 /*
1155  * get_max_pages()
1156  *
1157  * Description:
1158  * 	This function compute the total pages allowed for agp aperture
1159  *	based on the ammount of physical pages.
1160  * 	The algorithm is: compare the aperture size with 1/4 of total
1161  *	physical pages, and use the smaller one to for the max available
1162  * 	pages.
1163  *
1164  * Arguments:
1165  * 	aper_size	system agp aperture size (in MB)
1166  *
1167  * Returns:
1168  * 	The max possible number of agp memory pages available to users
1169  */
1170 static uint32_t
1171 get_max_pages(uint32_t aper_size)
1172 {
1173 	uint32_t i, j;
1174 
1175 	ASSERT(aper_size <= MAXAPERMEGAS);
1176 
1177 	i = AGP_MB2PAGES(aper_size);
1178 	j = (physmem >> 2);
1179 
1180 	return ((i < j) ? i : j);
1181 }
1182 
1183 /*
1184  * agp_fill_empty_keyent()
1185  *
1186  * Description:
1187  * 	This function finds a empty key table slot and
1188  * 	fills it with a new entity.
1189  *
1190  * Arguments:
1191  * 	softsate	driver soft state pointer
1192  * 	entryp		new entity data pointer
1193  *
1194  * Returns:
1195  * 	NULL	no key table slot available
1196  * 	entryp	the new entity slot pointer
1197  */
1198 static keytable_ent_t *
1199 agp_fill_empty_keyent(agpgart_softstate_t *softstate, keytable_ent_t *entryp)
1200 {
1201 	int key;
1202 	keytable_ent_t *newentryp;
1203 
1204 	ASSERT(softstate);
1205 	ASSERT(entryp);
1206 	ASSERT(entryp->kte_memhdl);
1207 	ASSERT(entryp->kte_pfnarray);
1208 	ASSERT(mutex_owned(&softstate->asoft_instmutex));
1209 
1210 	for (key = 0; key < AGP_MAXKEYS; key++) {
1211 		newentryp = &softstate->asoft_table[key];
1212 		if (newentryp->kte_memhdl == NULL) {
1213 			break;
1214 		}
1215 	}
1216 
1217 	if (key >= AGP_MAXKEYS) {
1218 		AGPDB_PRINT2((CE_WARN,
1219 		    "agp_fill_empty_keyent: key table exhausted"));
1220 		return (NULL);
1221 	}
1222 
1223 	ASSERT(newentryp->kte_pfnarray == NULL);
1224 	bcopy(entryp, newentryp, sizeof (keytable_ent_t));
1225 	newentryp->kte_key = key;
1226 
1227 	return (newentryp);
1228 }
1229 
1230 /*
1231  * agp_find_bound_keyent()
1232  *
1233  * Description:
1234  * 	This function finds the key table entity by agp aperture page offset.
1235  * 	Every keytable entity will have an agp aperture range after the binding
1236  *	operation.
1237  *
1238  * Arguments:
1239  * 	softsate	driver soft state pointer
1240  * 	pg_offset	agp aperture page offset
1241  *
1242  * Returns:
1243  * 	NULL		no such keytable entity
1244  * 	pointer		key table entity pointer found
1245  */
1246 static keytable_ent_t *
1247 agp_find_bound_keyent(agpgart_softstate_t *softstate, uint32_t pg_offset)
1248 {
1249 	int keycount;
1250 	keytable_ent_t *entryp;
1251 
1252 	ASSERT(softstate);
1253 	ASSERT(mutex_owned(&softstate->asoft_instmutex));
1254 
1255 	for (keycount = 0; keycount < AGP_MAXKEYS; keycount++) {
1256 		entryp = &softstate->asoft_table[keycount];
1257 		if (entryp->kte_bound == 0) {
1258 			continue;
1259 		}
1260 
1261 		if (pg_offset < entryp->kte_pgoff)
1262 			continue;
1263 		if (pg_offset >= (entryp->kte_pgoff + entryp->kte_pages))
1264 			continue;
1265 
1266 		ASSERT(entryp->kte_memhdl);
1267 		ASSERT(entryp->kte_pfnarray);
1268 
1269 		return (entryp);
1270 	}
1271 
1272 	return (NULL);
1273 }
1274 
1275 /*
1276  * agp_check_off()
1277  *
1278  * Description:
1279  * 	This function checks whether an AGP aperture range to be bound
1280  *	overlaps with AGP offset already bound.
1281  *
1282  * Arguments:
1283  *	entryp		key table start entry pointer
1284  * 	pg_start	AGP range start page offset
1285  *	pg_num		pages number to be bound
1286  *
1287  * Returns:
1288  *	0		Does not overlap
1289  *	-1		Overlaps
1290  */
1291 
1292 static int
1293 agp_check_off(keytable_ent_t *entryp, uint32_t pg_start, uint32_t pg_num)
1294 {
1295 	int key;
1296 	uint64_t pg_end;
1297 	uint64_t kpg_end;
1298 
1299 	ASSERT(entryp);
1300 
1301 	pg_end = pg_start + pg_num;
1302 	for (key = 0; key < AGP_MAXKEYS; key++) {
1303 		if (!entryp[key].kte_bound)
1304 			continue;
1305 
1306 		kpg_end = entryp[key].kte_pgoff + entryp[key].kte_pages;
1307 		if (!((pg_end <= entryp[key].kte_pgoff) ||
1308 		    (pg_start >= kpg_end)))
1309 			break;
1310 	}
1311 
1312 	if (key == AGP_MAXKEYS)
1313 		return (0);
1314 	else
1315 		return (-1);
1316 }
1317 
1318 static int
1319 is_controlling_proc(agpgart_softstate_t *st)
1320 {
1321 	ASSERT(st);
1322 
1323 	if (!st->asoft_acquired) {
1324 		AGPDB_PRINT2((CE_WARN,
1325 		    "ioctl_agpgart_setup: gart not acquired"));
1326 		return (-1);
1327 	}
1328 	if (st->asoft_curpid != ddi_get_pid()) {
1329 		AGPDB_PRINT2((CE_WARN,
1330 		    "ioctl_agpgart_release: not  controlling process"));
1331 		return (-1);
1332 	}
1333 
1334 	return (0);
1335 }
1336 
1337 static void release_control(agpgart_softstate_t *st)
1338 {
1339 	st->asoft_curpid = 0;
1340 	st->asoft_acquired = 0;
1341 }
1342 
1343 static void acquire_control(agpgart_softstate_t *st)
1344 {
1345 	st->asoft_curpid = ddi_get_pid();
1346 	st->asoft_acquired = 1;
1347 }
1348 
1349 /*
1350  * agp_remove_from_gart()
1351  *
1352  * Description:
1353  * 	This function fills the gart table entries by a given page
1354  * 	frame number array and setup the agp aperture page to physical
1355  * 	memory page translation.
1356  * Arguments:
1357  * 	pg_offset	Starting aperture page to be bound
1358  * 	entries		the number of pages to be bound
1359  * 	acc_hdl		GART table dma memory acc handle
1360  * 	tablep		GART table kernel virtual address
1361  */
1362 static void
1363 agp_remove_from_gart(
1364     uint32_t pg_offset,
1365     uint32_t entries,
1366     ddi_dma_handle_t dma_hdl,
1367     uint32_t *tablep)
1368 {
1369 	uint32_t items = 0;
1370 	uint32_t *entryp;
1371 
1372 	entryp = tablep + pg_offset;
1373 	while (items < entries) {
1374 		*(entryp + items) = 0;
1375 		items++;
1376 	}
1377 	(void) ddi_dma_sync(dma_hdl, pg_offset * sizeof (uint32_t),
1378 	    entries * sizeof (uint32_t), DDI_DMA_SYNC_FORDEV);
1379 }
1380 
1381 /*
1382  * agp_unbind_key()
1383  *
1384  * Description:
1385  * 	This function unbinds AGP memory from the gart table. It will clear
1386  * 	all the gart entries related to this agp memory.
1387  *
1388  * Arguments:
1389  * 	softstate		driver soft state pointer
1390  * 	entryp			key table entity pointer
1391  *
1392  * Returns:
1393  * 	EINVAL		invalid key table entity pointer
1394  * 	0		success
1395  *
1396  */
1397 static int
1398 agp_unbind_key(agpgart_softstate_t *softstate, keytable_ent_t *entryp)
1399 {
1400 	int retval = 0;
1401 
1402 	ASSERT(entryp);
1403 	ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
1404 
1405 	if (!entryp->kte_bound) {
1406 		AGPDB_PRINT2((CE_WARN,
1407 		    "agp_unbind_key: key = 0x%x, not bound",
1408 		    entryp->kte_key));
1409 		return (EINVAL);
1410 	}
1411 	if (entryp->kte_refcnt) {
1412 		AGPDB_PRINT2((CE_WARN,
1413 		    "agp_unbind_key: memory is exported to users"));
1414 		return (EINVAL);
1415 	}
1416 
1417 	ASSERT((entryp->kte_pgoff + entryp->kte_pages) <=
1418 	    AGP_MB2PAGES(softstate->asoft_info.agpki_apersize));
1419 	ASSERT((softstate->asoft_devreg.agprd_arctype != ARC_UNKNOWN));
1420 
1421 	switch (softstate->asoft_devreg.agprd_arctype) {
1422 	case ARC_IGD810:
1423 	case ARC_IGD830:
1424 		retval = lyr_i8xx_remove_from_gtt(
1425 		    entryp->kte_pgoff, entryp->kte_pages,
1426 		    &softstate->asoft_devreg);
1427 		if (retval) {
1428 			AGPDB_PRINT2((CE_WARN,
1429 			    "agp_unbind_key: Key = 0x%x, clear table error",
1430 			    entryp->kte_key));
1431 			return (EIO);
1432 		}
1433 		break;
1434 	case ARC_INTELAGP:
1435 	case ARC_AMD64NOAGP:
1436 	case ARC_AMD64AGP:
1437 		agp_remove_from_gart(entryp->kte_pgoff,
1438 		    entryp->kte_pages,
1439 		    softstate->gart_dma_handle,
1440 		    (uint32_t *)softstate->gart_vbase);
1441 		/* Flush GTLB table */
1442 		lyr_flush_gart_cache(&softstate->asoft_devreg);
1443 
1444 		break;
1445 	}
1446 
1447 	entryp->kte_bound = 0;
1448 
1449 	return (0);
1450 }
1451 
1452 /*
1453  * agp_dealloc_kmem()
1454  *
1455  * Description:
1456  * 	This function deallocates dma memory resources for userland
1457  * 	applications.
1458  *
1459  * Arguments:
1460  * 	entryp		keytable entity pointer
1461  */
1462 static void
1463 agp_dealloc_kmem(keytable_ent_t *entryp)
1464 {
1465 	kmem_free(entryp->kte_pfnarray, sizeof (pfn_t) * entryp->kte_pages);
1466 	entryp->kte_pfnarray = NULL;
1467 
1468 	(void) ddi_dma_unbind_handle(KMEMP(entryp->kte_memhdl)->kmem_handle);
1469 	KMEMP(entryp->kte_memhdl)->kmem_cookies_num = 0;
1470 	ddi_dma_mem_free(&KMEMP(entryp->kte_memhdl)->kmem_acchdl);
1471 	KMEMP(entryp->kte_memhdl)->kmem_acchdl = NULL;
1472 	KMEMP(entryp->kte_memhdl)->kmem_reallen = 0;
1473 	KMEMP(entryp->kte_memhdl)->kmem_kvaddr = NULL;
1474 
1475 	ddi_dma_free_handle(&(KMEMP(entryp->kte_memhdl)->kmem_handle));
1476 	KMEMP(entryp->kte_memhdl)->kmem_handle = NULL;
1477 
1478 	kmem_free(entryp->kte_memhdl, sizeof (agp_kmem_handle_t));
1479 	entryp->kte_memhdl = NULL;
1480 }
1481 
1482 /*
1483  * agp_dealloc_pmem()
1484  *
1485  * Description:
1486  * 	This function deallocates memory resource for direct mapping to
1487  * 	userland applications.
1488  *
1489  * Arguments:
1490  * 	entryp		key table entity pointer
1491  *
1492  */
1493 static void
1494 agp_dealloc_pmem(keytable_ent_t *entryp)
1495 {
1496 	devmap_pmem_free(PMEMP(entryp->kte_memhdl)->pmem_cookie);
1497 	PMEMP(entryp->kte_memhdl)->pmem_cookie = NULL;
1498 	kmem_free(entryp->kte_memhdl, sizeof (agp_pmem_handle_t));
1499 	entryp->kte_memhdl = NULL;
1500 
1501 	/* free the page frame number array */
1502 	kmem_free(entryp->kte_pfnarray, sizeof (pfn_t) * entryp->kte_pages);
1503 	entryp->kte_pfnarray = NULL;
1504 }
1505 
1506 /*
1507  * agp_dealloc_mem()
1508  *
1509  * Description:
1510  * 	This function deallocates physical memory resources allocated for
1511  *	userland applications.
1512  *
1513  * Arguments:
1514  * 	st		driver soft state pointer
1515  * 	entryp		key table entity pointer
1516  *
1517  * Returns:
1518  * 	-1		not a valid memory type or the memory is mapped by
1519  * 			user area applications
1520  * 	0		success
1521  */
1522 static int
1523 agp_dealloc_mem(agpgart_softstate_t *st, keytable_ent_t	*entryp)
1524 {
1525 
1526 	ASSERT(entryp);
1527 	ASSERT(st);
1528 	ASSERT(entryp->kte_memhdl);
1529 	ASSERT(mutex_owned(&st->asoft_instmutex));
1530 
1531 	/* auto unbind here */
1532 	if (entryp->kte_bound && !entryp->kte_refcnt) {
1533 		AGPDB_PRINT2((CE_WARN,
1534 		    "agp_dealloc_mem: key=0x%x, auto unbind",
1535 		    entryp->kte_key));
1536 
1537 		/*
1538 		 * agp_dealloc_mem may be called indirectly by agp_detach.
1539 		 * In the agp_detach function, agpgart_close is already
1540 		 * called which will free the gart table. agp_unbind_key
1541 		 * will panic if no valid gart table exists. So test if
1542 		 * gart table exsits here.
1543 		 */
1544 		if (st->asoft_opened)
1545 			(void) agp_unbind_key(st, entryp);
1546 	}
1547 	if (entryp->kte_refcnt) {
1548 		AGPDB_PRINT2((CE_WARN,
1549 		    "agp_dealloc_pmem: memory is exported to users"));
1550 		return (-1);
1551 	}
1552 
1553 	switch (entryp->kte_type) {
1554 	case AGP_NORMAL:
1555 		agp_dealloc_pmem(entryp);
1556 		break;
1557 	case AGP_PHYSICAL:
1558 		agp_dealloc_kmem(entryp);
1559 		break;
1560 	default:
1561 		return (-1);
1562 	}
1563 
1564 	return (0);
1565 }
1566 
1567 /*
1568  * agp_del_allkeys()
1569  *
1570  * Description:
1571  * 	This function calls agp_dealloc_mem to release all the agp memory
1572  *	resource allocated.
1573  *
1574  * Arguments:
1575  * 	softsate	driver soft state pointer
1576  * Returns:
1577  * 	-1		can not free all agp memory
1578  * 	0		success
1579  *
1580  */
1581 static int
1582 agp_del_allkeys(agpgart_softstate_t *softstate)
1583 {
1584 	int key;
1585 	int ret = 0;
1586 
1587 	ASSERT(softstate);
1588 	for (key = 0; key < AGP_MAXKEYS; key++) {
1589 		if (softstate->asoft_table[key].kte_memhdl != NULL) {
1590 			/*
1591 			 * Check if we can free agp memory now.
1592 			 * If agp memory is exported to user
1593 			 * applications, agp_dealloc_mem will fail.
1594 			 */
1595 			if (agp_dealloc_mem(softstate,
1596 			    &softstate->asoft_table[key]))
1597 				ret = -1;
1598 		}
1599 	}
1600 
1601 	return (ret);
1602 }
1603 
1604 /*
1605  * pfn2gartentry()
1606  *
1607  * Description:
1608  *	This function converts a physical address to GART entry.
1609  *	For AMD64, hardware only support addresses below 40bits,
1610  *	about 1024G physical address, so the largest pfn
1611  *	number is below 28 bits. Please refer to GART and GTT entry
1612  *	format table in agpdefs.h for entry format. Intel IGD only
1613  * 	only supports GTT entry below 1G. Intel AGP only supports
1614  * 	GART entry below 4G.
1615  *
1616  * Arguments:
1617  * 	arc_type		system agp arc type
1618  * 	pfn			page frame number
1619  * 	itemv			the entry item to be returned
1620  * Returns:
1621  * 	-1			not a invalid page frame
1622  * 	0			conversion success
1623  */
1624 static int
1625 pfn2gartentry(agp_arc_type_t arc_type, pfn_t pfn, uint32_t *itemv)
1626 {
1627 	uint64_t paddr;
1628 
1629 	paddr = pfn<<AGP_PAGE_SHIFT;
1630 
1631 	switch (arc_type) {
1632 	case ARC_INTELAGP:
1633 	{
1634 		/* Only support 32-bit hardware address */
1635 		if ((paddr & ~AGP_INTEL_POINTER_MASK) != 0) {
1636 			AGPDB_PRINT2((CE_WARN,
1637 			    "INTEL AGP Hardware only support 32 bits"));
1638 			return (-1);
1639 		}
1640 		*itemv =  (pfn << AGP_PAGE_SHIFT) | AGP_ENTRY_VALID;
1641 
1642 		break;
1643 	}
1644 	case ARC_AMD64NOAGP:
1645 	case ARC_AMD64AGP:
1646 	{
1647 		uint32_t value1, value2;
1648 		/* Physaddr should not exceed 40-bit */
1649 		if ((paddr & ~AMD64_POINTER_MASK) != 0) {
1650 			AGPDB_PRINT2((CE_WARN,
1651 			    "AMD64 GART hardware only supoort 40 bits"));
1652 			return (-1);
1653 		}
1654 		value1 = (uint32_t)pfn >> 20;
1655 		value1 <<= 4;
1656 		value2 = (uint32_t)pfn << 12;
1657 
1658 		*itemv = value1 | value2 | AMD64_ENTRY_VALID;
1659 		break;
1660 	}
1661 	case ARC_IGD810:
1662 		if ((paddr & ~I810_POINTER_MASK) != 0) {
1663 			AGPDB_PRINT2((CE_WARN,
1664 			    "Intel i810 only support 30 bits"));
1665 			return (-1);
1666 		}
1667 		break;
1668 
1669 	case ARC_IGD830:
1670 		if ((paddr & ~GTT_POINTER_MASK) != 0) {
1671 			AGPDB_PRINT2((CE_WARN,
1672 			    "Intel IGD only support 32 bits"));
1673 			return (-1);
1674 		}
1675 		break;
1676 	default:
1677 		AGPDB_PRINT2((CE_WARN,
1678 		    "pfn2gartentry: arc type = %d, not support", arc_type));
1679 		return (-1);
1680 	}
1681 	return (0);
1682 }
1683 
1684 /*
1685  * Check allocated physical pages validity, only called in DEBUG
1686  * mode.
1687  */
1688 static int
1689 agp_check_pfns(agp_arc_type_t arc_type, pfn_t *pfnarray, int items)
1690 {
1691 	int count;
1692 	uint32_t ret;
1693 
1694 	for (count = 0; count < items; count++) {
1695 		if (pfn2gartentry(arc_type, pfnarray[count], &ret))
1696 			break;
1697 	}
1698 	if (count < items)
1699 		return (-1);
1700 	else
1701 		return (0);
1702 }
1703 
1704 /*
1705  * kmem_getpfns()
1706  *
1707  * Description:
1708  * 	This function gets page frame numbers from dma handle.
1709  *
1710  * Arguments:
1711  * 	dma_handle		dma hanle allocated by ddi_dma_alloc_handle
1712  * 	dma_cookip		dma cookie pointer
1713  * 	cookies_num		cookies number
1714  * 	pfnarray		array to store page frames
1715  *
1716  * Returns:
1717  *	0		success
1718  */
1719 static int
1720 kmem_getpfns(
1721     ddi_dma_handle_t dma_handle,
1722     ddi_dma_cookie_t *dma_cookiep,
1723     int cookies_num,
1724     pfn_t *pfnarray)
1725 {
1726 	int	num_cookies;
1727 	int	index = 0;
1728 
1729 	num_cookies = cookies_num;
1730 
1731 	while (num_cookies > 0) {
1732 		uint64_t ck_startaddr, ck_length, ck_end;
1733 		ck_startaddr = dma_cookiep->dmac_address;
1734 		ck_length = dma_cookiep->dmac_size;
1735 
1736 		ck_end = ck_startaddr + ck_length;
1737 		while (ck_startaddr < ck_end) {
1738 			pfnarray[index] = (pfn_t)ck_startaddr >> AGP_PAGE_SHIFT;
1739 			ck_startaddr += AGP_PAGE_SIZE;
1740 			index++;
1741 		}
1742 
1743 		num_cookies--;
1744 		if (num_cookies > 0) {
1745 			ddi_dma_nextcookie(dma_handle, dma_cookiep);
1746 		}
1747 	}
1748 
1749 	return (0);
1750 }
1751 
1752 static int
1753 copyinfo(agpgart_softstate_t *softstate, agp_info_t *info)
1754 {
1755 	switch (softstate->asoft_devreg.agprd_arctype) {
1756 	case ARC_IGD810:
1757 	case ARC_IGD830:
1758 		info->agpi_version.agpv_major = 0;
1759 		info->agpi_version.agpv_minor = 0;
1760 		info->agpi_devid = softstate->asoft_info.agpki_mdevid;
1761 		info->agpi_mode = 0;
1762 		break;
1763 	case ARC_INTELAGP:
1764 	case ARC_AMD64AGP:
1765 		info->agpi_version = softstate->asoft_info.agpki_tver;
1766 		info->agpi_devid = softstate->asoft_info.agpki_tdevid;
1767 		info->agpi_mode = softstate->asoft_info.agpki_tstatus;
1768 		break;
1769 	case ARC_AMD64NOAGP:
1770 		break;
1771 	default:
1772 		AGPDB_PRINT2((CE_WARN, "copyinfo: UNKNOW ARC"));
1773 		return (-1);
1774 	}
1775 	/*
1776 	 * 64bit->32bit conversion possible
1777 	 */
1778 	info->agpi_aperbase = softstate->asoft_info.agpki_aperbase;
1779 	info->agpi_apersize = softstate->asoft_info.agpki_apersize;
1780 	info->agpi_pgtotal = softstate->asoft_pgtotal;
1781 	info->agpi_pgsystem = info->agpi_pgtotal;
1782 	info->agpi_pgused = softstate->asoft_pgused;
1783 
1784 	return (0);
1785 }
1786 
1787 static uint32_t
1788 agp_v2_setup(uint32_t tstatus, uint32_t mstatus, uint32_t mode)
1789 {
1790 	uint32_t cmd;
1791 	int rq, sba, over4g, fw, rate;
1792 
1793 	/*
1794 	 * tstatus: target device status
1795 	 * mstatus: master device status
1796 	 * mode: the agp mode to be sent
1797 	 */
1798 
1799 	/*
1800 	 * RQ - Request Queue size
1801 	 * set RQ to the min of mode and tstatus
1802 	 * if mode set a RQ larger than hardware can support,
1803 	 * use the max RQ which hardware can support.
1804 	 * tstatus & AGPSTAT_RQ_MASK is the max RQ hardware can support
1805 	 * Corelogic will enqueue agp transaction
1806 	 */
1807 	rq = mode & AGPSTAT_RQ_MASK;
1808 	if ((tstatus & AGPSTAT_RQ_MASK) < rq)
1809 		rq = tstatus & AGPSTAT_RQ_MASK;
1810 
1811 	/*
1812 	 * SBA - Sideband Addressing
1813 	 *
1814 	 * Sideband Addressing provides an additional bus to pass requests
1815 	 * (address and command) to the target from the master.
1816 	 *
1817 	 * set SBA if all three support it
1818 	 */
1819 	sba = (tstatus & AGPSTAT_SBA) & (mstatus & AGPSTAT_SBA)
1820 	    & (mode & AGPSTAT_SBA);
1821 
1822 	/* set OVER4G  if all three support it */
1823 	over4g = (tstatus & AGPSTAT_OVER4G) & (mstatus & AGPSTAT_OVER4G)
1824 	    & (mode & AGPSTAT_OVER4G);
1825 
1826 	/*
1827 	 * FW - fast write
1828 	 *
1829 	 * acceleration of memory write transactions from the corelogic to the
1830 	 * A.G.P. master device acting like a PCI target.
1831 	 *
1832 	 * set FW if all three support it
1833 	 */
1834 	fw = (tstatus & AGPSTAT_FW) & (mstatus & AGPSTAT_FW)
1835 	    & (mode & AGPSTAT_FW);
1836 
1837 	/*
1838 	 * figure out the max rate
1839 	 * AGP v2 support: 4X, 2X, 1X speed
1840 	 * status bit		meaning
1841 	 * ---------------------------------------------
1842 	 * 7:3			others
1843 	 * 3			0 stand for V2 support
1844 	 * 0:2			001:1X, 010:2X, 100:4X
1845 	 * ----------------------------------------------
1846 	 */
1847 	rate = (tstatus & AGPSTAT_RATE_MASK) & (mstatus & AGPSTAT_RATE_MASK)
1848 	    & (mode & AGPSTAT_RATE_MASK);
1849 	if (rate & AGP2_RATE_4X)
1850 		rate = AGP2_RATE_4X;
1851 	else if (rate & AGP2_RATE_2X)
1852 		rate = AGP2_RATE_2X;
1853 	else
1854 		rate = AGP2_RATE_1X;
1855 
1856 	cmd = rq | sba | over4g | fw | rate;
1857 	/* enable agp mode */
1858 	cmd |= AGPCMD_AGPEN;
1859 
1860 	return (cmd);
1861 }
1862 
1863 static uint32_t
1864 agp_v3_setup(uint32_t tstatus, uint32_t mstatus, uint32_t mode)
1865 {
1866 	uint32_t cmd = 0;
1867 	uint32_t rq, arqsz, cal, sba, over4g, fw, rate;
1868 
1869 	/*
1870 	 * tstatus: target device status
1871 	 * mstatus: master device status
1872 	 * mode: the agp mode to be set
1873 	 */
1874 
1875 	/*
1876 	 * RQ - Request Queue size
1877 	 * Set RQ to the min of mode and tstatus
1878 	 * If mode set a RQ larger than hardware can support,
1879 	 * use the max RQ which hardware can support.
1880 	 * tstatus & AGPSTAT_RQ_MASK is the max RQ hardware can support
1881 	 * Corelogic will enqueue agp transaction;
1882 	 */
1883 	rq = mode & AGPSTAT_RQ_MASK;
1884 	if ((tstatus & AGPSTAT_RQ_MASK) < rq)
1885 		rq = tstatus & AGPSTAT_RQ_MASK;
1886 
1887 	/*
1888 	 * ARQSZ - Asynchronous Request Queue size
1889 	 * Set the value equal to tstatus.
1890 	 * Don't allow the mode register to override values
1891 	 */
1892 	arqsz = tstatus & AGPSTAT_ARQSZ_MASK;
1893 
1894 	/*
1895 	 * CAL - Calibration cycle
1896 	 * Set to the min of tstatus and mstatus
1897 	 * Don't allow override by mode register
1898 	 */
1899 	cal = tstatus & AGPSTAT_CAL_MASK;
1900 	if ((mstatus & AGPSTAT_CAL_MASK) < cal)
1901 		cal = mstatus & AGPSTAT_CAL_MASK;
1902 
1903 	/*
1904 	 * SBA - Sideband Addressing
1905 	 *
1906 	 * Sideband Addressing provides an additional bus to pass requests
1907 	 * (address and command) to the target from the master.
1908 	 *
1909 	 * SBA in agp v3.0 must be set
1910 	 */
1911 	sba = AGPCMD_SBAEN;
1912 
1913 	/* GART64B is not set since no hardware supports it now */
1914 
1915 	/* Set OVER4G if all three support it */
1916 	over4g = (tstatus & AGPSTAT_OVER4G) & (mstatus & AGPSTAT_OVER4G)
1917 	    & (mode & AGPSTAT_OVER4G);
1918 
1919 	/*
1920 	 * FW - fast write
1921 	 *
1922 	 * Acceleration of memory write transactions from the corelogic to the
1923 	 * A.G.P. master device acting like a PCI target.
1924 	 *
1925 	 * Always set FW in AGP 3.0
1926 	 */
1927 	fw = (tstatus & AGPSTAT_FW) & (mstatus & AGPSTAT_FW)
1928 	    & (mode & AGPSTAT_FW);
1929 
1930 	/*
1931 	 * Figure out the max rate
1932 	 *
1933 	 * AGP v3 support: 8X, 4X speed
1934 	 *
1935 	 * status bit		meaning
1936 	 * ---------------------------------------------
1937 	 * 7:3			others
1938 	 * 3			1 stand for V3 support
1939 	 * 0:2			001:4X, 010:8X, 011:4X,8X
1940 	 * ----------------------------------------------
1941 	 */
1942 	rate = (tstatus & AGPSTAT_RATE_MASK) & (mstatus & AGPSTAT_RATE_MASK)
1943 	    & (mode & AGPSTAT_RATE_MASK);
1944 	if (rate & AGP3_RATE_8X)
1945 		rate = AGP3_RATE_8X;
1946 	else
1947 		rate = AGP3_RATE_4X;
1948 
1949 	cmd = rq | arqsz | cal | sba | over4g | fw | rate;
1950 	/* Enable AGP mode */
1951 	cmd |= AGPCMD_AGPEN;
1952 
1953 	return (cmd);
1954 }
1955 
1956 static int
1957 agp_setup(agpgart_softstate_t *softstate, uint32_t mode)
1958 {
1959 	uint32_t tstatus, mstatus;
1960 	uint32_t agp_mode;
1961 
1962 	tstatus = softstate->asoft_info.agpki_tstatus;
1963 	mstatus = softstate->asoft_info.agpki_mstatus;
1964 
1965 	/*
1966 	 * There are three kinds of AGP mode. AGP mode 1.0, 2.0, 3.0
1967 	 * AGP mode 2.0 is fully compatible with AGP mode 1.0, so we
1968 	 * only check 2.0 and 3.0 mode. AGP 3.0 device can work in
1969 	 * two AGP 2.0 or AGP 3.0 mode. By checking AGP status register,
1970 	 * we can get which mode it is working at. The working mode of
1971 	 * AGP master and AGP target must be consistent. That is, both
1972 	 * of them must work on AGP 3.0 mode or AGP 2.0 mode.
1973 	 */
1974 	if ((softstate->asoft_info.agpki_tver.agpv_major == 3) &&
1975 	    (tstatus & AGPSTAT_MODE3)) {
1976 		/* Master device should be 3.0 mode, too */
1977 		if ((softstate->asoft_info.agpki_mver.agpv_major != 3) ||
1978 		    ((mstatus & AGPSTAT_MODE3) == 0))
1979 			return (EIO);
1980 
1981 		agp_mode = agp_v3_setup(tstatus, mstatus, mode);
1982 		/* Write to the AGPCMD register of target and master devices */
1983 		if (lyr_set_agp_cmd(agp_mode,
1984 		    &softstate->asoft_devreg))
1985 			return (EIO);
1986 
1987 		softstate->asoft_mode = agp_mode;
1988 
1989 		return (0);
1990 	}
1991 
1992 	/*
1993 	 * If agp taget device doesn't work in AGP 3.0 mode,
1994 	 * it must work in AGP 2.0 mode. And make sure
1995 	 * master device work in AGP 2.0 mode too
1996 	 */
1997 	if ((softstate->asoft_info.agpki_mver.agpv_major == 3) &&
1998 	    (mstatus & AGPSTAT_MODE3))
1999 		return (EIO);
2000 
2001 	agp_mode = agp_v2_setup(tstatus, mstatus, mode);
2002 	if (lyr_set_agp_cmd(agp_mode, &softstate->asoft_devreg))
2003 		return (EIO);
2004 	softstate->asoft_mode = agp_mode;
2005 
2006 	return (0);
2007 }
2008 
2009 /*
2010  * agp_alloc_pmem()
2011  *
2012  * Description:
2013  * 	This function allocates physical memory for direct mapping to userland
2014  * 	applications.
2015  *
2016  * Arguments:
2017  * 	softsate	driver soft state pointer
2018  * 	length		memory size
2019  * 	type		AGP_NORMAL: normal agp memory, AGP_PHISYCAL: specical
2020  *			memory type for intel i810 IGD
2021  *
2022  * Returns:
2023  * 	entryp		new key table entity pointer
2024  * 	NULL		no key table slot available
2025  */
2026 static keytable_ent_t *
2027 agp_alloc_pmem(agpgart_softstate_t *softstate, size_t length, int type)
2028 {
2029 	keytable_ent_t	keyentry;
2030 	keytable_ent_t	*entryp;
2031 
2032 	ASSERT(AGP_ALIGNED(length));
2033 	bzero(&keyentry, sizeof (keytable_ent_t));
2034 
2035 	keyentry.kte_pages = AGP_BYTES2PAGES(length);
2036 	keyentry.kte_type = type;
2037 
2038 	keyentry.kte_memhdl =
2039 	    (agp_pmem_handle_t *)kmem_zalloc(sizeof (agp_pmem_handle_t),
2040 	    KM_SLEEP);
2041 
2042 	if (devmap_pmem_alloc(length,
2043 	    PMEM_SLEEP,
2044 	    &PMEMP(keyentry.kte_memhdl)->pmem_cookie) != DDI_SUCCESS)
2045 		goto err1;
2046 
2047 	keyentry.kte_pfnarray = (pfn_t *)kmem_zalloc(sizeof (pfn_t) *
2048 	    keyentry.kte_pages, KM_SLEEP);
2049 
2050 	if (devmap_pmem_getpfns(
2051 	    PMEMP(keyentry.kte_memhdl)->pmem_cookie,
2052 	    0, keyentry.kte_pages, keyentry.kte_pfnarray) != DDI_SUCCESS) {
2053 		AGPDB_PRINT2((CE_WARN,
2054 		    "agp_alloc_pmem: devmap_map_getpfns failed"));
2055 		goto err2;
2056 	}
2057 	ASSERT(!agp_check_pfns(softstate->asoft_devreg.agprd_arctype,
2058 	    keyentry.kte_pfnarray, keyentry.kte_pages));
2059 	entryp = agp_fill_empty_keyent(softstate, &keyentry);
2060 
2061 	if (!entryp) {
2062 		AGPDB_PRINT2((CE_WARN,
2063 		    "agp_alloc_pmem: agp_fill_empty_keyent error"));
2064 		goto err2;
2065 	}
2066 	ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
2067 
2068 	return (entryp);
2069 
2070 err2:
2071 	kmem_free(keyentry.kte_pfnarray, sizeof (pfn_t) * keyentry.kte_pages);
2072 	keyentry.kte_pfnarray = NULL;
2073 	devmap_pmem_free(PMEMP(keyentry.kte_memhdl)->pmem_cookie);
2074 	PMEMP(keyentry.kte_memhdl)->pmem_cookie = NULL;
2075 err1:
2076 	kmem_free(keyentry.kte_memhdl, sizeof (agp_pmem_handle_t));
2077 	keyentry.kte_memhdl = NULL;
2078 
2079 	return (NULL);
2080 
2081 }
2082 
2083 /*
2084  * agp_alloc_kmem()
2085  *
2086  * Description:
2087  * 	This function allocates physical memory for userland applications
2088  * 	by ddi interfaces. This function can only be called to allocate
2089  *	small phsyical contiguous pages, usually tens of kilobytes.
2090  *
2091  * Arguments:
2092  * 	softsate	driver soft state pointer
2093  * 	length		memory size
2094  *
2095  * Returns:
2096  * 	entryp		new keytable entity pointer
2097  * 	NULL		no keytable slot available or no physical
2098  *			memory available
2099  */
2100 static keytable_ent_t *
2101 agp_alloc_kmem(agpgart_softstate_t *softstate, size_t length)
2102 {
2103 	keytable_ent_t	keyentry;
2104 	keytable_ent_t	*entryp;
2105 	int		ret;
2106 
2107 	ASSERT(AGP_ALIGNED(length));
2108 
2109 	bzero(&keyentry, sizeof (keytable_ent_t));
2110 
2111 	keyentry.kte_pages = AGP_BYTES2PAGES(length);
2112 	keyentry.kte_type = AGP_PHYSICAL;
2113 
2114 	/*
2115 	 * Set dma_attr_sgllen to assure contiguous physical pages
2116 	 */
2117 	agpgart_dma_attr.dma_attr_sgllen = 1;
2118 
2119 	/* 4k size pages */
2120 	keyentry.kte_memhdl = kmem_zalloc(sizeof (agp_kmem_handle_t), KM_SLEEP);
2121 
2122 	if (ddi_dma_alloc_handle(softstate->asoft_dip,
2123 	    &agpgart_dma_attr,
2124 	    DDI_DMA_SLEEP, NULL,
2125 	    &(KMEMP(keyentry.kte_memhdl)->kmem_handle))) {
2126 		AGPDB_PRINT2((CE_WARN,
2127 		    "agp_alloc_kmem: ddi_dma_allco_hanlde error"));
2128 		goto err4;
2129 	}
2130 
2131 	if ((ret = ddi_dma_mem_alloc(
2132 	    KMEMP(keyentry.kte_memhdl)->kmem_handle,
2133 	    length,
2134 	    &gart_dev_acc_attr,
2135 	    DDI_DMA_CONSISTENT,
2136 	    DDI_DMA_SLEEP, NULL,
2137 	    &KMEMP(keyentry.kte_memhdl)->kmem_kvaddr,
2138 	    &KMEMP(keyentry.kte_memhdl)->kmem_reallen,
2139 	    &KMEMP(keyentry.kte_memhdl)->kmem_acchdl)) != 0) {
2140 		AGPDB_PRINT2((CE_WARN,
2141 		    "agp_alloc_kmem: ddi_dma_mem_alloc error"));
2142 
2143 		goto err3;
2144 	}
2145 
2146 	ret = ddi_dma_addr_bind_handle(
2147 	    KMEMP(keyentry.kte_memhdl)->kmem_handle,
2148 	    NULL,
2149 	    KMEMP(keyentry.kte_memhdl)->kmem_kvaddr,
2150 	    length,
2151 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2152 	    DDI_DMA_SLEEP,
2153 	    NULL,
2154 	    &KMEMP(keyentry.kte_memhdl)->kmem_dcookie,
2155 	    &KMEMP(keyentry.kte_memhdl)->kmem_cookies_num);
2156 
2157 	/*
2158 	 * Even dma_attr_sgllen = 1, ddi_dma_addr_bind_handle may return more
2159 	 * than one cookie, we check this in the if statement.
2160 	 */
2161 
2162 	if ((ret != DDI_DMA_MAPPED) ||
2163 	    (KMEMP(keyentry.kte_memhdl)->kmem_cookies_num != 1)) {
2164 		AGPDB_PRINT2((CE_WARN,
2165 		    "agp_alloc_kmem: can not alloc physical memory properly"));
2166 		goto err2;
2167 	}
2168 
2169 	keyentry.kte_pfnarray = (pfn_t *)kmem_zalloc(sizeof (pfn_t) *
2170 	    keyentry.kte_pages, KM_SLEEP);
2171 
2172 	if (kmem_getpfns(
2173 	    KMEMP(keyentry.kte_memhdl)->kmem_handle,
2174 	    &KMEMP(keyentry.kte_memhdl)->kmem_dcookie,
2175 	    KMEMP(keyentry.kte_memhdl)->kmem_cookies_num,
2176 	    keyentry.kte_pfnarray)) {
2177 		AGPDB_PRINT2((CE_WARN, "agp_alloc_kmem: get pfn array error"));
2178 		goto err1;
2179 	}
2180 
2181 	ASSERT(!agp_check_pfns(softstate->asoft_devreg.agprd_arctype,
2182 	    keyentry.kte_pfnarray, keyentry.kte_pages));
2183 	entryp = agp_fill_empty_keyent(softstate, &keyentry);
2184 	if (!entryp) {
2185 		AGPDB_PRINT2((CE_WARN,
2186 		    "agp_alloc_kmem: agp_fill_empty_keyent error"));
2187 
2188 		goto err1;
2189 	}
2190 	ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
2191 
2192 	return (entryp);
2193 
2194 err1:
2195 	kmem_free(keyentry.kte_pfnarray, sizeof (pfn_t) * keyentry.kte_pages);
2196 	keyentry.kte_pfnarray = NULL;
2197 	(void) ddi_dma_unbind_handle(KMEMP(keyentry.kte_memhdl)->kmem_handle);
2198 	KMEMP(keyentry.kte_memhdl)->kmem_cookies_num = 0;
2199 err2:
2200 	ddi_dma_mem_free(&KMEMP(keyentry.kte_memhdl)->kmem_acchdl);
2201 	KMEMP(keyentry.kte_memhdl)->kmem_acchdl = NULL;
2202 	KMEMP(keyentry.kte_memhdl)->kmem_reallen = 0;
2203 	KMEMP(keyentry.kte_memhdl)->kmem_kvaddr = NULL;
2204 err3:
2205 	ddi_dma_free_handle(&(KMEMP(keyentry.kte_memhdl)->kmem_handle));
2206 	KMEMP(keyentry.kte_memhdl)->kmem_handle = NULL;
2207 err4:
2208 	kmem_free(keyentry.kte_memhdl, sizeof (agp_kmem_handle_t));
2209 	keyentry.kte_memhdl = NULL;
2210 	return (NULL);
2211 
2212 }
2213 
2214 /*
2215  * agp_alloc_mem()
2216  *
2217  * Description:
2218  * 	This function allocate physical memory for userland applications,
2219  * 	in order to save kernel virtual space, we use the direct mapping
2220  * 	memory interface if it is available.
2221  *
2222  * Arguments:
2223  * 	st		driver soft state pointer
2224  * 	length		memory size
2225  * 	type		AGP_NORMAL: normal agp memory, AGP_PHISYCAL: specical
2226  *			memory type for intel i810 IGD
2227  *
2228  * Returns:
2229  * 	NULL 	Invalid memory type or can not allocate memory
2230  * 	Keytable entry pointer returned by agp_alloc_kmem or agp_alloc_pmem
2231  */
2232 static keytable_ent_t *
2233 agp_alloc_mem(agpgart_softstate_t *st, size_t length, int type)
2234 {
2235 
2236 	/*
2237 	 * AGP_PHYSICAL type require contiguous physical pages exported
2238 	 * to X drivers, like i810 HW cursor, ARGB cursor. the number of
2239 	 * pages needed is usuallysmall and contiguous, 4K, 16K. So we
2240 	 * use DDI interface to allocated such memory. And X use xsvc
2241 	 * drivers to map this memory into its own address space.
2242 	 */
2243 	ASSERT(st);
2244 
2245 	switch (type) {
2246 	case AGP_NORMAL:
2247 		return (agp_alloc_pmem(st, length, type));
2248 	case AGP_PHYSICAL:
2249 		return (agp_alloc_kmem(st, length));
2250 	default:
2251 		return (NULL);
2252 	}
2253 }
2254 
2255 /*
2256  * free_gart_table()
2257  *
2258  * Description:
2259  * 	This function frees the gart table memory allocated by driver.
2260  * 	Must disable gart table before calling this function.
2261  *
2262  * Arguments:
2263  * 	softstate		driver soft state pointer
2264  *
2265  */
2266 static void
2267 free_gart_table(agpgart_softstate_t *st)
2268 {
2269 
2270 	if (st->gart_dma_handle == NULL)
2271 		return;
2272 
2273 	(void) ddi_dma_unbind_handle(st->gart_dma_handle);
2274 	ddi_dma_mem_free(&st->gart_dma_acc_handle);
2275 	st->gart_dma_acc_handle = NULL;
2276 	ddi_dma_free_handle(&st->gart_dma_handle);
2277 	st->gart_dma_handle = NULL;
2278 	st->gart_vbase = 0;
2279 	st->gart_size = 0;
2280 }
2281 
2282 /*
2283  * alloc_gart_table()
2284  *
2285  * Description:
2286  * 	This function allocates one physical continuous gart table.
2287  * 	INTEL integrated video device except i810 have their special
2288  * 	video bios; No need to allocate gart table for them.
2289  *
2290  * Arguments:
2291  * 	st		driver soft state pointer
2292  *
2293  * Returns:
2294  * 	0		success
2295  * 	-1		can not allocate gart tabl
2296  */
2297 static int
2298 alloc_gart_table(agpgart_softstate_t *st)
2299 {
2300 	int			num_pages;
2301 	size_t			table_size;
2302 	int			ret = DDI_SUCCESS;
2303 	ddi_dma_cookie_t	cookie;
2304 	uint32_t		num_cookies;
2305 
2306 	num_pages = AGP_MB2PAGES(st->asoft_info.agpki_apersize);
2307 
2308 	/*
2309 	 * Only 40-bit maximum physical memory is supported by today's
2310 	 * AGP hardware (32-bit gart tables can hold 40-bit memory addresses).
2311 	 * No one supports 64-bit gart entries now, so the size of gart
2312 	 * entries defaults to 32-bit though AGP3.0 specifies the possibility
2313 	 * of 64-bit gart entries.
2314 	 */
2315 
2316 	table_size = num_pages * (sizeof (uint32_t));
2317 
2318 	/*
2319 	 * Only AMD64 can put gart table above 4G, 40 bits at maximum
2320 	 */
2321 	if ((st->asoft_devreg.agprd_arctype == ARC_AMD64AGP) ||
2322 	    (st->asoft_devreg.agprd_arctype == ARC_AMD64NOAGP))
2323 		garttable_dma_attr.dma_attr_addr_hi = 0xffffffffffLL;
2324 	else
2325 		garttable_dma_attr.dma_attr_addr_hi = 0xffffffffU;
2326 	/* Allocate physical continuous page frame for gart table */
2327 	if (ret = ddi_dma_alloc_handle(st->asoft_dip,
2328 	    &garttable_dma_attr,
2329 	    DDI_DMA_SLEEP,
2330 	    NULL, &st->gart_dma_handle)) {
2331 		AGPDB_PRINT2((CE_WARN,
2332 		    "alloc_gart_table: ddi_dma_alloc_handle failed"));
2333 		goto err3;
2334 	}
2335 
2336 	if (ret = ddi_dma_mem_alloc(st->gart_dma_handle,
2337 	    table_size,
2338 	    &gart_dev_acc_attr,
2339 	    DDI_DMA_CONSISTENT,
2340 	    DDI_DMA_SLEEP, NULL,
2341 	    &st->gart_vbase,
2342 	    &st->gart_size,
2343 	    &st->gart_dma_acc_handle)) {
2344 		AGPDB_PRINT2((CE_WARN,
2345 		    "alloc_gart_table: ddi_dma_mem_alloc failed"));
2346 		goto err2;
2347 
2348 	}
2349 
2350 	ret = ddi_dma_addr_bind_handle(st->gart_dma_handle,
2351 	    NULL, st->gart_vbase,
2352 	    table_size,
2353 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2354 	    DDI_DMA_SLEEP, NULL,
2355 	    &cookie,  &num_cookies);
2356 
2357 	st->gart_pbase = cookie.dmac_address;
2358 
2359 	if ((ret != DDI_DMA_MAPPED) || (num_cookies != 1)) {
2360 		if (num_cookies > 1)
2361 			(void) ddi_dma_unbind_handle(st->gart_dma_handle);
2362 		AGPDB_PRINT2((CE_WARN,
2363 		    "alloc_gart_table: alloc contiguous phys memory failed"));
2364 		goto err1;
2365 	}
2366 
2367 	return (0);
2368 err1:
2369 	ddi_dma_mem_free(&st->gart_dma_acc_handle);
2370 	st->gart_dma_acc_handle = NULL;
2371 err2:
2372 	ddi_dma_free_handle(&st->gart_dma_handle);
2373 	st->gart_dma_handle = NULL;
2374 err3:
2375 	st->gart_pbase = 0;
2376 	st->gart_size = 0;
2377 	st->gart_vbase = 0;
2378 
2379 	return (-1);
2380 }
2381 
2382 /*
2383  * agp_add_to_gart()
2384  *
2385  * Description:
2386  * 	This function fills the gart table entries by a given page frame number
2387  * 	array and set up the agp aperture page to physical memory page
2388  * 	translation.
2389  * Arguments:
2390  * 	type		valid sytem arc types ARC_AMD64AGP, ARC_INTELAGP,
2391  * 			ARC_AMD64AGP
2392  * 	pfnarray	allocated physical page frame number array
2393  * 	pg_offset	agp aperture start page to be bound
2394  * 	entries		the number of pages to be bound
2395  * 	dma_hdl		gart table dma memory handle
2396  * 	tablep		gart table kernel virtual address
2397  * Returns:
2398  * 	-1		failed
2399  * 	0		success
2400  */
2401 static int
2402 agp_add_to_gart(
2403     agp_arc_type_t type,
2404     pfn_t *pfnarray,
2405     uint32_t pg_offset,
2406     uint32_t entries,
2407     ddi_dma_handle_t dma_hdl,
2408     uint32_t *tablep)
2409 {
2410 	int items = 0;
2411 	uint32_t *entryp;
2412 	uint32_t itemv;
2413 
2414 	entryp = tablep + pg_offset;
2415 	while (items < entries) {
2416 		if (pfn2gartentry(type, pfnarray[items], &itemv))
2417 			break;
2418 		*(entryp + items) = itemv;
2419 		items++;
2420 	}
2421 	if (items < entries)
2422 		return (-1);
2423 
2424 	(void) ddi_dma_sync(dma_hdl, pg_offset * sizeof (uint32_t),
2425 	    entries * sizeof (uint32_t), DDI_DMA_SYNC_FORDEV);
2426 
2427 	return (0);
2428 }
2429 
2430 /*
2431  * agp_bind_key()
2432  *
2433  * Description:
2434  * 	This function will call low level gart table access functions to
2435  * 	set up gart table translation. Also it will do some sanity
2436  * 	checking on key table entry.
2437  *
2438  * Arguments:
2439  * 	softstate		driver soft state pointer
2440  * 	keyent			key table entity pointer to be bound
2441  * 	pg_offset		aperture start page to be bound
2442  * Returns:
2443  * 	EINVAL			not a valid operation
2444  */
2445 static int
2446 agp_bind_key(agpgart_softstate_t *softstate,
2447     keytable_ent_t  *keyent, uint32_t  pg_offset)
2448 {
2449 	uint64_t pg_end;
2450 	int ret = 0;
2451 
2452 	ASSERT(keyent);
2453 	ASSERT((keyent->kte_key >= 0) && (keyent->kte_key < AGP_MAXKEYS));
2454 	ASSERT(mutex_owned(&softstate->asoft_instmutex));
2455 
2456 	pg_end = pg_offset + keyent->kte_pages;
2457 
2458 	if (pg_end > AGP_MB2PAGES(softstate->asoft_info.agpki_apersize)) {
2459 		AGPDB_PRINT2((CE_WARN,
2460 		    "agp_bind_key: key=0x%x,exceed aper range",
2461 		    keyent->kte_key));
2462 
2463 		return (EINVAL);
2464 	}
2465 
2466 	if (agp_check_off(softstate->asoft_table,
2467 	    pg_offset, keyent->kte_pages)) {
2468 		AGPDB_PRINT2((CE_WARN,
2469 		    "agp_bind_key: pg_offset=0x%x, pages=0x%lx overlaped",
2470 		    pg_offset, keyent->kte_pages));
2471 		return (EINVAL);
2472 	}
2473 
2474 	ASSERT(keyent->kte_pfnarray != NULL);
2475 
2476 	switch (softstate->asoft_devreg.agprd_arctype) {
2477 	case ARC_IGD810:
2478 	case ARC_IGD830:
2479 		ret = lyr_i8xx_add_to_gtt(pg_offset, keyent,
2480 		    &softstate->asoft_devreg);
2481 		if (ret)
2482 			return (EIO);
2483 		break;
2484 	case ARC_INTELAGP:
2485 	case ARC_AMD64NOAGP:
2486 	case ARC_AMD64AGP:
2487 		ret =  agp_add_to_gart(
2488 		    softstate->asoft_devreg.agprd_arctype,
2489 		    keyent->kte_pfnarray,
2490 		    pg_offset,
2491 		    keyent->kte_pages,
2492 		    softstate->gart_dma_handle,
2493 		    (uint32_t *)softstate->gart_vbase);
2494 		if (ret)
2495 			return (EINVAL);
2496 		/* Flush GTLB table */
2497 		lyr_flush_gart_cache(&softstate->asoft_devreg);
2498 		break;
2499 	default:
2500 		AGPDB_PRINT2((CE_WARN,
2501 		    "agp_bind_key: arc type = 0x%x unsupported",
2502 		    softstate->asoft_devreg.agprd_arctype));
2503 		return (EINVAL);
2504 	}
2505 	return (0);
2506 }
2507 
2508 static int
2509 agpgart_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2510 {
2511 	int instance;
2512 	agpgart_softstate_t *softstate;
2513 
2514 	if (cmd != DDI_ATTACH) {
2515 		AGPDB_PRINT2((CE_WARN,
2516 		    "agpgart_attach: only attach op supported"));
2517 		return (DDI_FAILURE);
2518 	}
2519 	instance = ddi_get_instance(dip);
2520 
2521 	if (ddi_soft_state_zalloc(agpgart_glob_soft_handle, instance)
2522 	    != DDI_SUCCESS) {
2523 		AGPDB_PRINT2((CE_WARN,
2524 		    "agpgart_attach: soft state zalloc failed"));
2525 		goto err1;
2526 
2527 	}
2528 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2529 	mutex_init(&softstate->asoft_instmutex, NULL, MUTEX_DRIVER, NULL);
2530 	softstate->asoft_dip = dip;
2531 	/*
2532 	 * Allocate LDI identifier for agpgart driver
2533 	 * Agpgart driver is the kernel consumer
2534 	 */
2535 	if (ldi_ident_from_dip(dip, &softstate->asoft_li)) {
2536 		AGPDB_PRINT2((CE_WARN,
2537 		    "agpgart_attach: LDI indentifier allcation failed"));
2538 		goto err2;
2539 	}
2540 
2541 	softstate->asoft_devreg.agprd_arctype = ARC_UNKNOWN;
2542 	/* Install agp kstat */
2543 	if (agp_init_kstats(softstate)) {
2544 		AGPDB_PRINT2((CE_WARN, "agpgart_attach: init kstats error"));
2545 		goto err3;
2546 	}
2547 	/*
2548 	 * devfs will create /dev/agpgart
2549 	 * and  /devices/agpgart:agpgart
2550 	 */
2551 
2552 	if (ddi_create_minor_node(dip, AGPGART_DEVNODE, S_IFCHR,
2553 	    AGP_INST2MINOR(instance),
2554 	    DDI_NT_AGP_PSEUDO, 0)) {
2555 		AGPDB_PRINT2((CE_WARN,
2556 		    "agpgart_attach: Can not create minor node"));
2557 		goto err4;
2558 	}
2559 
2560 	softstate->asoft_table = kmem_zalloc(
2561 	    AGP_MAXKEYS * (sizeof (keytable_ent_t)),
2562 	    KM_SLEEP);
2563 
2564 	return (DDI_SUCCESS);
2565 err4:
2566 	agp_fini_kstats(softstate);
2567 err3:
2568 	ldi_ident_release(softstate->asoft_li);
2569 err2:
2570 	ddi_soft_state_free(agpgart_glob_soft_handle, instance);
2571 err1:
2572 	return (DDI_FAILURE);
2573 }
2574 
2575 static int
2576 agpgart_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2577 {
2578 	int instance;
2579 	agpgart_softstate_t *st;
2580 
2581 	instance = ddi_get_instance(dip);
2582 
2583 	st = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2584 
2585 	if (cmd != DDI_DETACH)
2586 		return (DDI_FAILURE);
2587 
2588 	/*
2589 	 * Caller should free all the memory allocated explicitly.
2590 	 * We release the memory allocated by caller which is not
2591 	 * properly freed. mutex_enter here make sure assertion on
2592 	 * softstate mutex success in agp_dealloc_mem.
2593 	 */
2594 	mutex_enter(&st->asoft_instmutex);
2595 	if (agp_del_allkeys(st)) {
2596 		AGPDB_PRINT2((CE_WARN, "agpgart_detach: agp_del_allkeys err"));
2597 		AGPDB_PRINT2((CE_WARN,
2598 		    "you might free agp memory exported to your applications"));
2599 
2600 		mutex_exit(&st->asoft_instmutex);
2601 		return (DDI_FAILURE);
2602 	}
2603 	mutex_exit(&st->asoft_instmutex);
2604 	if (st->asoft_table) {
2605 		kmem_free(st->asoft_table,
2606 		    AGP_MAXKEYS * (sizeof (keytable_ent_t)));
2607 		st->asoft_table = 0;
2608 	}
2609 
2610 	ddi_remove_minor_node(dip, AGPGART_DEVNODE);
2611 	agp_fini_kstats(st);
2612 	ldi_ident_release(st->asoft_li);
2613 	mutex_destroy(&st->asoft_instmutex);
2614 	ddi_soft_state_free(agpgart_glob_soft_handle, instance);
2615 
2616 	return (DDI_SUCCESS);
2617 }
2618 
2619 /*ARGSUSED*/
2620 static int
2621 agpgart_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
2622     void **resultp)
2623 {
2624 	agpgart_softstate_t *st;
2625 	int instance, rval = DDI_FAILURE;
2626 	dev_t dev;
2627 
2628 	switch (cmd) {
2629 	case DDI_INFO_DEVT2DEVINFO:
2630 		dev = (dev_t)arg;
2631 		instance = AGP_DEV2INST(dev);
2632 		st = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2633 		if (st != NULL) {
2634 			mutex_enter(&st->asoft_instmutex);
2635 			*resultp = st->asoft_dip;
2636 			mutex_exit(&st->asoft_instmutex);
2637 			rval = DDI_SUCCESS;
2638 		} else
2639 			*resultp = NULL;
2640 
2641 		break;
2642 	case DDI_INFO_DEVT2INSTANCE:
2643 		dev = (dev_t)arg;
2644 		instance = AGP_DEV2INST(dev);
2645 		*resultp = (void *)(uintptr_t)instance;
2646 		rval = DDI_SUCCESS;
2647 
2648 		break;
2649 	default:
2650 		break;
2651 	}
2652 
2653 	return (rval);
2654 }
2655 
2656 /*
2657  * agpgart_open()
2658  *
2659  * Description:
2660  * 	This function is the driver open entry point. If it is the
2661  * 	first time the agpgart driver is opened, the driver will
2662  * 	open other agp related layered drivers and set up the agpgart
2663  * 	table properly.
2664  *
2665  * Arguments:
2666  * 	dev			device number pointer
2667  * 	openflags		open flags
2668  *	otyp			OTYP_BLK, OTYP_CHR
2669  * 	credp			user's credential's struct pointer
2670  *
2671  * Returns:
2672  * 	ENXIO			operation error
2673  * 	EAGAIN			resoure temporarily unvailable
2674  * 	0			success
2675  */
2676 /*ARGSUSED*/
2677 static int
2678 agpgart_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
2679 {
2680 	int instance = AGP_DEV2INST(*dev);
2681 	agpgart_softstate_t *softstate;
2682 	int rc = 0;
2683 
2684 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2685 	if (softstate == NULL) {
2686 		AGPDB_PRINT2((CE_WARN, "agpgart_open: get soft state err"));
2687 		return (ENXIO);
2688 	}
2689 	mutex_enter(&softstate->asoft_instmutex);
2690 
2691 	if (softstate->asoft_opened) {
2692 		softstate->asoft_opened++;
2693 		mutex_exit(&softstate->asoft_instmutex);
2694 		return (0);
2695 	}
2696 
2697 	/*
2698 	 * The driver is opened first time, so we initialize layered
2699 	 * driver interface and softstate member here.
2700 	 */
2701 	softstate->asoft_pgused = 0;
2702 	if (lyr_init(&softstate->asoft_devreg, softstate->asoft_li)) {
2703 		AGPDB_PRINT2((CE_WARN, "agpgart_open: lyr_init failed"));
2704 		mutex_exit(&softstate->asoft_instmutex);
2705 		return (EAGAIN);
2706 	}
2707 
2708 	/* Call into layered driver */
2709 	if (lyr_get_info(&softstate->asoft_info, &softstate->asoft_devreg)) {
2710 		AGPDB_PRINT2((CE_WARN, "agpgart_open: lyr_get_info error"));
2711 		lyr_end(&softstate->asoft_devreg);
2712 		mutex_exit(&softstate->asoft_instmutex);
2713 		return (EIO);
2714 	}
2715 
2716 	/*
2717 	 * BIOS already set up gtt table for ARC_IGD830
2718 	 */
2719 	if (IS_INTEL_830(softstate->asoft_devreg.agprd_arctype)) {
2720 		softstate->asoft_opened++;
2721 
2722 		softstate->asoft_pgtotal =
2723 		    get_max_pages(softstate->asoft_info.agpki_apersize);
2724 
2725 		if (lyr_config_devices(&softstate->asoft_devreg)) {
2726 			AGPDB_PRINT2((CE_WARN,
2727 			    "agpgart_open: lyr_config_devices error"));
2728 			lyr_end(&softstate->asoft_devreg);
2729 			mutex_exit(&softstate->asoft_instmutex);
2730 
2731 			return (EIO);
2732 		}
2733 		mutex_exit(&softstate->asoft_instmutex);
2734 		return (0);
2735 	}
2736 
2737 	rc = alloc_gart_table(softstate);
2738 
2739 	/*
2740 	 * Allocate physically contiguous pages for AGP arc or
2741 	 * i810 arc. If failed, divide aper_size by 2 to
2742 	 * reduce gart table size until 4 megabytes. This
2743 	 * is just a workaround for systems with very few
2744 	 * physically contiguous memory.
2745 	 */
2746 	if (rc) {
2747 		while ((softstate->asoft_info.agpki_apersize >= 4) &&
2748 		    (alloc_gart_table(softstate))) {
2749 			softstate->asoft_info.agpki_apersize >>= 1;
2750 		}
2751 		if (softstate->asoft_info.agpki_apersize >= 4)
2752 			rc = 0;
2753 	}
2754 
2755 	if (rc != 0) {
2756 		AGPDB_PRINT2((CE_WARN,
2757 		    "agpgart_open: alloc gart table failed"));
2758 		lyr_end(&softstate->asoft_devreg);
2759 		mutex_exit(&softstate->asoft_instmutex);
2760 		return (EAGAIN);
2761 	}
2762 
2763 	softstate->asoft_pgtotal =
2764 	    get_max_pages(softstate->asoft_info.agpki_apersize);
2765 	/*
2766 	 * BIOS doesn't initialize GTT for i810,
2767 	 * So i810 GTT must be created by driver.
2768 	 *
2769 	 * Set up gart table and enable it.
2770 	 */
2771 	if (lyr_set_gart_addr(softstate->gart_pbase,
2772 	    &softstate->asoft_devreg)) {
2773 		AGPDB_PRINT2((CE_WARN,
2774 		    "agpgart_open: set gart table addr failed"));
2775 		free_gart_table(softstate);
2776 		lyr_end(&softstate->asoft_devreg);
2777 		mutex_exit(&softstate->asoft_instmutex);
2778 		return (EIO);
2779 	}
2780 	if (lyr_config_devices(&softstate->asoft_devreg)) {
2781 		AGPDB_PRINT2((CE_WARN,
2782 		    "agpgart_open: lyr_config_devices failed"));
2783 		free_gart_table(softstate);
2784 		lyr_end(&softstate->asoft_devreg);
2785 		mutex_exit(&softstate->asoft_instmutex);
2786 		return (EIO);
2787 	}
2788 
2789 	softstate->asoft_opened++;
2790 	mutex_exit(&softstate->asoft_instmutex);
2791 
2792 	return (0);
2793 }
2794 
2795 /*
2796  * agpgart_close()
2797  *
2798  * Description:
2799  * 	agpgart_close will release resources allocated in the first open
2800  * 	and close other open layered drivers. Also it frees the memory
2801  *	allocated by ioctls.
2802  *
2803  * Arguments:
2804  * 	dev			device number
2805  * 	flag			file status flag
2806  *	otyp			OTYP_BLK, OTYP_CHR
2807  * 	credp			user's credential's struct pointer
2808  *
2809  * Returns:
2810  * 	ENXIO			not an error, to support "deferred attach"
2811  * 	0			success
2812  */
2813 /*ARGSUSED*/
2814 static int
2815 agpgart_close(dev_t dev, int flag, int otyp, cred_t *credp)
2816 {
2817 	int instance = AGP_DEV2INST(dev);
2818 	agpgart_softstate_t *softstate;
2819 
2820 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2821 	if (softstate == NULL) {
2822 		AGPDB_PRINT2((CE_WARN, "agpgart_close: get soft state err"));
2823 		return (ENXIO);
2824 	}
2825 
2826 	mutex_enter(&softstate->asoft_instmutex);
2827 	ASSERT(softstate->asoft_opened);
2828 
2829 
2830 	/*
2831 	 * If the last process close this device is not the controlling
2832 	 * process, also release the control over agpgart driver here if the
2833 	 * the controlling process fails to release the control before it
2834 	 * close the driver.
2835 	 */
2836 	if (softstate->asoft_acquired == 1) {
2837 		AGPDB_PRINT2((CE_WARN,
2838 		    "agpgart_close: auto release control over driver"));
2839 		release_control(softstate);
2840 	}
2841 
2842 	if (lyr_unconfig_devices(&softstate->asoft_devreg)) {
2843 		AGPDB_PRINT2((CE_WARN,
2844 		    "agpgart_close: lyr_unconfig_device error"));
2845 		mutex_exit(&softstate->asoft_instmutex);
2846 		return (EIO);
2847 	}
2848 	softstate->asoft_agpen = 0;
2849 
2850 	if (!IS_INTEL_830(softstate->asoft_devreg.agprd_arctype)) {
2851 		free_gart_table(softstate);
2852 	}
2853 
2854 	lyr_end(&softstate->asoft_devreg);
2855 
2856 	/*
2857 	 * This statement must be positioned before agp_del_allkeys
2858 	 * agp_dealloc_mem indirectly called by agp_del_allkeys
2859 	 * will test this variable.
2860 	 */
2861 	softstate->asoft_opened = 0;
2862 
2863 	/*
2864 	 * Free the memory allocated by user applications which
2865 	 * was never deallocated.
2866 	 */
2867 	(void) agp_del_allkeys(softstate);
2868 
2869 	mutex_exit(&softstate->asoft_instmutex);
2870 
2871 	return (0);
2872 }
2873 
2874 static int
2875 ioctl_agpgart_info(agpgart_softstate_t  *softstate, void  *arg, int flags)
2876 {
2877 	agp_info_t infostruct;
2878 #ifdef _MULTI_DATAMODEL
2879 	agp_info32_t infostruct32;
2880 #endif
2881 
2882 	bzero(&infostruct, sizeof (agp_info_t));
2883 
2884 #ifdef _MULTI_DATAMODEL
2885 	bzero(&infostruct32, sizeof (agp_info32_t));
2886 	if (ddi_model_convert_from(flags & FMODELS) == DDI_MODEL_ILP32) {
2887 		if (copyinfo(softstate, &infostruct))
2888 			return (EINVAL);
2889 
2890 		agpinfo_default_to_32(infostruct, infostruct32);
2891 		if (ddi_copyout(&infostruct32, arg,
2892 		    sizeof (agp_info32_t), flags) != 0)
2893 			return (EFAULT);
2894 
2895 		return (0);
2896 	}
2897 #endif /* _MULTI_DATAMODEL */
2898 	if (copyinfo(softstate, &infostruct))
2899 		return (EINVAL);
2900 
2901 	if (ddi_copyout(&infostruct, arg, sizeof (agp_info_t), flags) != 0) {
2902 		return (EFAULT);
2903 	}
2904 
2905 	return (0);
2906 }
2907 
2908 static int
2909 ioctl_agpgart_acquire(agpgart_softstate_t  *st)
2910 {
2911 	if (st->asoft_acquired) {
2912 		AGPDB_PRINT2((CE_WARN, "ioctl_acquire: already acquired"));
2913 		return (EBUSY);
2914 	}
2915 	acquire_control(st);
2916 	return (0);
2917 }
2918 
2919 static int
2920 ioctl_agpgart_release(agpgart_softstate_t  *st)
2921 {
2922 	if (is_controlling_proc(st) < 0) {
2923 		AGPDB_PRINT2((CE_WARN,
2924 		    "ioctl_agpgart_release: not a controlling process"));
2925 		return (EPERM);
2926 	}
2927 	release_control(st);
2928 	return (0);
2929 }
2930 
2931 static int
2932 ioctl_agpgart_setup(agpgart_softstate_t  *st, void  *arg, int flags)
2933 {
2934 	agp_setup_t data;
2935 	int rc = 0;
2936 
2937 	if (is_controlling_proc(st) < 0) {
2938 		AGPDB_PRINT2((CE_WARN,
2939 		    "ioctl_agpgart_setup: not a controlling process"));
2940 		return (EPERM);
2941 	}
2942 
2943 	if (!IS_TRUE_AGP(st->asoft_devreg.agprd_arctype)) {
2944 		AGPDB_PRINT2((CE_WARN,
2945 		    "ioctl_agpgart_setup: no true agp bridge"));
2946 		return (EINVAL);
2947 	}
2948 
2949 	if (ddi_copyin(arg, &data, sizeof (agp_setup_t), flags) != 0)
2950 		return (EFAULT);
2951 
2952 	if (rc = agp_setup(st, data.agps_mode))
2953 		return (rc);
2954 	/* Store agp mode status for kstat */
2955 	st->asoft_agpen = 1;
2956 	return (0);
2957 }
2958 
2959 static int
2960 ioctl_agpgart_alloc(agpgart_softstate_t  *st, void  *arg, int flags)
2961 {
2962 	agp_allocate_t	alloc_info;
2963 	keytable_ent_t	*entryp;
2964 	size_t		length;
2965 	uint64_t	pg_num;
2966 
2967 	if (is_controlling_proc(st) < 0) {
2968 		AGPDB_PRINT2((CE_WARN,
2969 		    "ioctl_agpgart_alloc: not a controlling process"));
2970 		return (EPERM);
2971 	}
2972 
2973 	if (ddi_copyin(arg, &alloc_info,
2974 	    sizeof (agp_allocate_t), flags) != 0) {
2975 		return (EFAULT);
2976 	}
2977 	pg_num = st->asoft_pgused + alloc_info.agpa_pgcount;
2978 	if (pg_num > st->asoft_pgtotal) {
2979 		AGPDB_PRINT2((CE_WARN,
2980 		    "ioctl_agpgart_alloc: exceeding the memory pages limit"));
2981 		AGPDB_PRINT2((CE_WARN,
2982 		    "ioctl_agpgart_alloc: request %x pages failed",
2983 		    alloc_info.agpa_pgcount));
2984 		AGPDB_PRINT2((CE_WARN,
2985 		    "ioctl_agpgart_alloc: pages used %x total is %x",
2986 		    st->asoft_pgused, st->asoft_pgtotal));
2987 
2988 		return (EINVAL);
2989 	}
2990 
2991 	length = AGP_PAGES2BYTES(alloc_info.agpa_pgcount);
2992 	entryp = agp_alloc_mem(st, length, alloc_info.agpa_type);
2993 	if (!entryp) {
2994 		AGPDB_PRINT2((CE_WARN,
2995 		    "ioctl_agpgart_alloc: allocate 0x%lx bytes failed",
2996 		    length));
2997 		return (ENOMEM);
2998 	}
2999 	ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
3000 	alloc_info.agpa_key = entryp->kte_key;
3001 	if (alloc_info.agpa_type == AGP_PHYSICAL) {
3002 		alloc_info.agpa_physical =
3003 		    (uint32_t)(entryp->kte_pfnarray[0] << AGP_PAGE_SHIFT);
3004 	}
3005 	/* Update the memory pagse used */
3006 	st->asoft_pgused += alloc_info.agpa_pgcount;
3007 
3008 	if (ddi_copyout(&alloc_info, arg,
3009 	    sizeof (agp_allocate_t), flags) != 0) {
3010 
3011 		return (EFAULT);
3012 	}
3013 
3014 	return (0);
3015 }
3016 
3017 static int
3018 ioctl_agpgart_dealloc(agpgart_softstate_t  *st, intptr_t arg)
3019 {
3020 	int key;
3021 	keytable_ent_t  *keyent;
3022 
3023 	if (is_controlling_proc(st) < 0) {
3024 		AGPDB_PRINT2((CE_WARN,
3025 		    "ioctl_agpgart_dealloc: not a controlling process"));
3026 		return (EPERM);
3027 	}
3028 	key = (int)arg;
3029 	if ((key >= AGP_MAXKEYS) || key < 0) {
3030 		return (EINVAL);
3031 	}
3032 	keyent = &st->asoft_table[key];
3033 	if (!keyent->kte_memhdl) {
3034 		return (EINVAL);
3035 	}
3036 
3037 	if (agp_dealloc_mem(st, keyent))
3038 		return (EINVAL);
3039 
3040 	/* Update the memory pages used */
3041 	st->asoft_pgused -= keyent->kte_pages;
3042 	bzero(keyent, sizeof (keytable_ent_t));
3043 
3044 	return (0);
3045 }
3046 
3047 static int
3048 ioctl_agpgart_bind(agpgart_softstate_t  *st, void  *arg, int flags)
3049 {
3050 	agp_bind_t 	bind_info;
3051 	keytable_ent_t	*keyent;
3052 	int		key;
3053 	uint32_t	pg_offset;
3054 	int		retval = 0;
3055 
3056 	if (is_controlling_proc(st) < 0) {
3057 		AGPDB_PRINT2((CE_WARN,
3058 		    "ioctl_agpgart_bind: not a controlling process"));
3059 		return (EPERM);
3060 	}
3061 
3062 	if (ddi_copyin(arg, &bind_info, sizeof (agp_bind_t), flags) != 0) {
3063 		return (EFAULT);
3064 	}
3065 
3066 	key = bind_info.agpb_key;
3067 	if ((key >= AGP_MAXKEYS) || key < 0) {
3068 		AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_bind: invalid key"));
3069 		return (EINVAL);
3070 	}
3071 
3072 	if (IS_INTEL_830(st->asoft_devreg.agprd_arctype)) {
3073 		if (AGP_PAGES2KB(bind_info.agpb_pgstart) <
3074 		    st->asoft_info.agpki_presize) {
3075 			AGPDB_PRINT2((CE_WARN,
3076 			    "ioctl_agpgart_bind: bind to prealloc area "
3077 			    "pgstart = %dKB < presize = %ldKB",
3078 			    AGP_PAGES2KB(bind_info.agpb_pgstart),
3079 			    st->asoft_info.agpki_presize));
3080 			return (EINVAL);
3081 		}
3082 	}
3083 
3084 	pg_offset = bind_info.agpb_pgstart;
3085 	keyent = &st->asoft_table[key];
3086 	if (!keyent->kte_memhdl) {
3087 		AGPDB_PRINT2((CE_WARN,
3088 		    "ioctl_agpgart_bind: Key = 0x%x can't get keyenty",
3089 		    key));
3090 		return (EINVAL);
3091 	}
3092 
3093 	if (keyent->kte_bound != 0) {
3094 		AGPDB_PRINT2((CE_WARN,
3095 		    "ioctl_agpgart_bind: Key = 0x%x already bound",
3096 		    key));
3097 		return (EINVAL);
3098 	}
3099 	retval = agp_bind_key(st, keyent, pg_offset);
3100 
3101 	if (retval == 0) {
3102 		keyent->kte_pgoff = pg_offset;
3103 		keyent->kte_bound = 1;
3104 	}
3105 
3106 	return (retval);
3107 }
3108 
3109 static int
3110 ioctl_agpgart_unbind(agpgart_softstate_t  *st, void  *arg, int flags)
3111 {
3112 	int key, retval = 0;
3113 	agp_unbind_t unbindinfo;
3114 	keytable_ent_t *keyent;
3115 
3116 	if (is_controlling_proc(st) < 0) {
3117 		AGPDB_PRINT2((CE_WARN,
3118 		    "ioctl_agpgart_bind: not a controlling process"));
3119 		return (EPERM);
3120 	}
3121 
3122 	if (ddi_copyin(arg, &unbindinfo, sizeof (unbindinfo), flags) != 0) {
3123 		return (EFAULT);
3124 	}
3125 	key = unbindinfo.agpu_key;
3126 	if ((key >= AGP_MAXKEYS) || key < 0) {
3127 		AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_unbind: invalid key"));
3128 		return (EINVAL);
3129 	}
3130 	keyent = &st->asoft_table[key];
3131 	if (!keyent->kte_bound) {
3132 		return (EINVAL);
3133 	}
3134 
3135 	if ((retval = agp_unbind_key(st, keyent)) != 0)
3136 		return (retval);
3137 
3138 	return (0);
3139 }
3140 
3141 /*ARGSUSED*/
3142 static int
3143 agpgart_ioctl(dev_t dev, int cmd, intptr_t intarg, int flags,
3144     cred_t *credp, int *rvalp)
3145 {
3146 	int instance;
3147 	int retval = 0;
3148 	void *arg = (void*)intarg;
3149 
3150 	agpgart_softstate_t *softstate;
3151 
3152 	instance = AGP_DEV2INST(dev);
3153 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
3154 	if (softstate == NULL) {
3155 		AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: get soft state err"));
3156 		return (ENXIO);
3157 	}
3158 
3159 	if ((cmd != AGPIOC_INFO) && secpolicy_gart_access(credp)) {
3160 		AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: permission denied"));
3161 		return (EPERM);
3162 	}
3163 
3164 	mutex_enter(&softstate->asoft_instmutex);
3165 
3166 	switch (cmd) {
3167 	case AGPIOC_INFO:
3168 		retval = ioctl_agpgart_info(softstate, arg, flags);
3169 		break;
3170 	case AGPIOC_ACQUIRE:
3171 		retval = ioctl_agpgart_acquire(softstate);
3172 		break;
3173 	case AGPIOC_RELEASE:
3174 		retval = ioctl_agpgart_release(softstate);
3175 		break;
3176 	case AGPIOC_SETUP:
3177 		retval = ioctl_agpgart_setup(softstate, arg, flags);
3178 		break;
3179 	case AGPIOC_ALLOCATE:
3180 		retval = ioctl_agpgart_alloc(softstate, arg, flags);
3181 		break;
3182 	case AGPIOC_DEALLOCATE:
3183 		retval = ioctl_agpgart_dealloc(softstate, intarg);
3184 		break;
3185 	case AGPIOC_BIND:
3186 		retval = ioctl_agpgart_bind(softstate, arg, flags);
3187 		break;
3188 	case AGPIOC_UNBIND:
3189 		retval = ioctl_agpgart_unbind(softstate, arg, flags);
3190 		break;
3191 	default:
3192 		AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: wrong argument"));
3193 		retval = ENXIO;
3194 		break;
3195 	}
3196 
3197 	mutex_exit(&softstate->asoft_instmutex);
3198 	return (retval);
3199 }
3200 
3201 static int
3202 agpgart_segmap(dev_t dev, off_t off, struct as *asp,
3203     caddr_t *addrp, off_t len, unsigned int prot,
3204     unsigned int maxprot, unsigned int flags, cred_t *credp)
3205 {
3206 
3207 	struct agpgart_softstate *softstate;
3208 	int instance;
3209 	int rc = 0;
3210 
3211 	instance = AGP_DEV2INST(dev);
3212 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
3213 	if (softstate == NULL) {
3214 		AGPDB_PRINT2((CE_WARN, "agpgart_segmap: get soft state err"));
3215 		return (ENXIO);
3216 	}
3217 	if (!AGP_ALIGNED(len))
3218 		return (EINVAL);
3219 
3220 	mutex_enter(&softstate->asoft_instmutex);
3221 
3222 	/*
3223 	 * Process must have gart map privilege or gart access privilege
3224 	 * to map agp memory.
3225 	 */
3226 	if (secpolicy_gart_map(credp)) {
3227 		mutex_exit(&softstate->asoft_instmutex);
3228 		AGPDB_PRINT2((CE_WARN, "agpgart_segmap: permission denied"));
3229 		return (EPERM);
3230 	}
3231 
3232 	rc = devmap_setup(dev, (offset_t)off, asp, addrp,
3233 	    (size_t)len, prot, maxprot, flags, credp);
3234 
3235 	mutex_exit(&softstate->asoft_instmutex);
3236 	return (rc);
3237 }
3238 
3239 /*ARGSUSED*/
3240 static int
3241 agpgart_devmap(dev_t dev, devmap_cookie_t cookie, offset_t offset, size_t len,
3242     size_t *mappedlen, uint_t model)
3243 {
3244 	struct agpgart_softstate *softstate;
3245 	int instance, status;
3246 	struct keytable_ent *mementry;
3247 	offset_t local_offset;
3248 
3249 	instance = AGP_DEV2INST(dev);
3250 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
3251 	if (softstate == NULL) {
3252 		AGPDB_PRINT2((CE_WARN, "agpgart_devmap: get soft state err"));
3253 		return (ENXIO);
3254 	}
3255 
3256 
3257 	if (offset > MB2BYTES(softstate->asoft_info.agpki_apersize)) {
3258 		AGPDB_PRINT2((CE_WARN, "agpgart_devmap: offset is too large"));
3259 		return (EINVAL);
3260 	}
3261 
3262 	/*
3263 	 * Can not find any memory now, so fail.
3264 	 */
3265 
3266 	mementry = agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(offset));
3267 
3268 	if (mementry == NULL) {
3269 		AGPDB_PRINT2((CE_WARN,
3270 		    "agpgart_devmap: can not find the proper keyent"));
3271 		return (EINVAL);
3272 	}
3273 
3274 	local_offset = offset - AGP_PAGES2BYTES(mementry->kte_pgoff);
3275 
3276 	if (len > (AGP_PAGES2BYTES(mementry->kte_pages) - local_offset)) {
3277 		len = AGP_PAGES2BYTES(mementry->kte_pages) - local_offset;
3278 	}
3279 
3280 	switch (mementry->kte_type) {
3281 	case AGP_NORMAL:
3282 		status = devmap_pmem_setup(cookie, softstate->asoft_dip,
3283 		    &agp_devmap_cb,
3284 		    PMEMP(mementry->kte_memhdl)->pmem_cookie, local_offset,
3285 		    len, PROT_ALL, (DEVMAP_DEFAULTS|IOMEM_DATA_UC_WR_COMBINE),
3286 		    &mem_dev_acc_attr);
3287 		break;
3288 	default:
3289 		AGPDB_PRINT2((CE_WARN,
3290 		    "agpgart_devmap: not a valid memory type"));
3291 		return (EINVAL);
3292 	}
3293 
3294 
3295 	if (status == 0) {
3296 		*mappedlen = len;
3297 	} else {
3298 		*mappedlen = 0;
3299 		AGPDB_PRINT2((CE_WARN,
3300 		    "agpgart_devmap: devmap interface failed"));
3301 		return (EINVAL);
3302 	}
3303 
3304 	return (0);
3305 }
3306 
3307 static struct cb_ops	agpgart_cb_ops = {
3308 	agpgart_open,		/* open() */
3309 	agpgart_close,		/* close() */
3310 	nodev,			/* strategy() */
3311 	nodev,			/* print routine */
3312 	nodev,			/* no dump routine */
3313 	nodev,			/* read() */
3314 	nodev,			/* write() */
3315 	agpgart_ioctl,		/* agpgart_ioctl */
3316 	agpgart_devmap,		/* devmap routine */
3317 	nodev,			/* no longer use mmap routine */
3318 	agpgart_segmap,		/* system segmap routine */
3319 	nochpoll,		/* no chpoll routine */
3320 	ddi_prop_op,		/* system prop operations */
3321 	0,			/* not a STREAMS driver */
3322 	D_DEVMAP | D_MP,	/* safe for multi-thread/multi-processor */
3323 	CB_REV,			/* cb_ops version? */
3324 	nodev,			/* cb_aread() */
3325 	nodev,			/* cb_awrite() */
3326 };
3327 
3328 static struct dev_ops agpgart_ops = {
3329 	DEVO_REV,		/* devo_rev */
3330 	0,			/* devo_refcnt */
3331 	agpgart_getinfo,	/* devo_getinfo */
3332 	nulldev,		/* devo_identify */
3333 	nulldev,		/* devo_probe */
3334 	agpgart_attach,		/* devo_attach */
3335 	agpgart_detach,		/* devo_detach */
3336 	nodev,			/* devo_reset */
3337 	&agpgart_cb_ops,	/* devo_cb_ops */
3338 	(struct bus_ops *)0,	/* devo_bus_ops */
3339 	NULL,			/* devo_power */
3340 };
3341 
3342 static	struct modldrv modldrv = {
3343 	&mod_driverops,
3344 	"AGP driver v%I%",
3345 	&agpgart_ops,
3346 };
3347 
3348 static struct modlinkage modlinkage = {
3349 	MODREV_1,		/* MODREV_1 is indicated by manual */
3350 	{&modldrv, NULL, NULL, NULL}
3351 };
3352 
3353 static void *agpgart_glob_soft_handle;
3354 
3355 int
3356 _init(void)
3357 {
3358 	int ret = DDI_SUCCESS;
3359 
3360 	ret = ddi_soft_state_init(&agpgart_glob_soft_handle,
3361 	    sizeof (agpgart_softstate_t),
3362 	    AGPGART_MAX_INSTANCES);
3363 
3364 	if (ret != 0) {
3365 		AGPDB_PRINT2((CE_WARN,
3366 		    "_init: soft state init error code=0x%x", ret));
3367 		return (ret);
3368 	}
3369 
3370 	if ((ret = mod_install(&modlinkage)) != 0) {
3371 		AGPDB_PRINT2((CE_WARN,
3372 		    "_init: mod install error code=0x%x", ret));
3373 		ddi_soft_state_fini(&agpgart_glob_soft_handle);
3374 		return (ret);
3375 	}
3376 
3377 	return (DDI_SUCCESS);
3378 }
3379 
3380 int
3381 _info(struct modinfo *modinfop)
3382 {
3383 	return (mod_info(&modlinkage, modinfop));
3384 }
3385 
3386 int
3387 _fini(void)
3388 {
3389 	int ret;
3390 
3391 	if ((ret = mod_remove(&modlinkage)) == 0) {
3392 		ddi_soft_state_fini(&agpgart_glob_soft_handle);
3393 	}
3394 
3395 	return (ret);
3396 }
3397